Example #1
0
 def fetch_deltas(self, root_url, output_dir=None):
     if output_dir is None:
         output_dir = os.path.join(portage.settings['DISTDIR'], 'patches')
     mysettings = portage.config(clone=portage.settings)
     mysettings['DISTDIR'] = output_dir
     urls = []
     for record in self.dbrecords:
         urls.append(posixpath.join(root_url, record.delta.fname))
     if 'distpatch' in mysettings.features:
         mysettings.features.remove('distpatch')
     if not fetch(urls, mysettings):
         raise PatchException('Failed to fetch deltas: %s' % urls)
Example #2
0
	def _run(self):
		# Force consistent color output, in case we are capturing fetch
		# output through a normal pipe due to unavailability of ptys.
		portage.output.havecolor = self._settings.get('NOCOLOR') \
			not in ('yes', 'true')

		rval = 1
		allow_missing = self._get_manifest().allow_missing
		if fetch(self._uri_map, self._settings, fetchonly=self.fetchonly,
			digests=copy.deepcopy(self._get_digests()),
			allow_missing_digests=allow_missing):
			rval = os.EX_OK
		return rval
Example #3
0
    def _spawn(self, args, fd_pipes=None, **kwargs):
        """
		Fork a subprocess, apply local settings, and call fetch().
		"""

        pid = os.fork()
        if pid != 0:
            if not isinstance(pid, int):
                raise AssertionError(
                    "fork returned non-integer: %s" % (repr(pid), ))
            portage.process.spawned_pids.append(pid)
            return [pid]

        # TODO: Find out why PyPy 1.8 with close_fds=True triggers
        # "[Errno 9] Bad file descriptor" in subprocesses. It could
        # be due to garbage collection of file objects that were not
        # closed before going out of scope, since PyPy's garbage
        # collector does not support the refcounting semantics that
        # CPython does.
        close_fds = platform.python_implementation() != 'PyPy'
        portage.process._setup_pipes(fd_pipes, close_fds=close_fds)

        # Use default signal handlers in order to avoid problems
        # killing subprocesses as reported in bug #353239.
        signal.signal(signal.SIGINT, signal.SIG_DFL)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)

        # Force consistent color output, in case we are capturing fetch
        # output through a normal pipe due to unavailability of ptys.
        portage.output.havecolor = self._settings.get('NOCOLOR') \
         not in ('yes', 'true')

        rval = 1
        allow_missing = self._get_manifest().allow_missing
        try:
            if fetch(
                    self._uri_map,
                    self._settings,
                    fetchonly=self.fetchonly,
                    digests=copy.deepcopy(self._get_digests()),
                    allow_missing_digests=allow_missing):
                rval = os.EX_OK
        except SystemExit:
            raise
        except:
            traceback.print_exc()
        finally:
            # Call os._exit() from finally block, in order to suppress any
            # finally blocks from earlier in the call stack. See bug #345289.
            os._exit(rval)
Example #4
0
    def _spawn(self, args, fd_pipes=None, **kwargs):
        """
		Fork a subprocess, apply local settings, and call fetch().
		"""

        pid = os.fork()
        if pid != 0:
            if not isinstance(pid, int):
                raise AssertionError("fork returned non-integer: %s" %
                                     (repr(pid), ))
            portage.process.spawned_pids.append(pid)
            return [pid]

        # TODO: Find out why PyPy 1.8 with close_fds=True triggers
        # "[Errno 9] Bad file descriptor" in subprocesses. It could
        # be due to garbage collection of file objects that were not
        # closed before going out of scope, since PyPy's garbage
        # collector does not support the refcounting semantics that
        # CPython does.
        close_fds = platform.python_implementation() != 'PyPy'
        portage.process._setup_pipes(fd_pipes, close_fds=close_fds)

        # Use default signal handlers in order to avoid problems
        # killing subprocesses as reported in bug #353239.
        signal.signal(signal.SIGINT, signal.SIG_DFL)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)

        # Force consistent color output, in case we are capturing fetch
        # output through a normal pipe due to unavailability of ptys.
        portage.output.havecolor = self._settings.get('NOCOLOR') \
         not in ('yes', 'true')

        rval = 1
        allow_missing = self._get_manifest().allow_missing
        try:
            if fetch(self._uri_map,
                     self._settings,
                     fetchonly=self.fetchonly,
                     digests=copy.deepcopy(self._get_digests()),
                     allow_missing_digests=allow_missing):
                rval = os.EX_OK
        except SystemExit:
            raise
        except:
            traceback.print_exc()
        finally:
            # Call os._exit() from finally block, in order to suppress any
            # finally blocks from earlier in the call stack. See bug #345289.
            os._exit(rval)
Example #5
0
 def fetch(self, myfile=None):
     mysettings = portage.config(clone=portage.settings)
     mysettings['O'] = os.path.dirname(dbapi.findname(self.cpv))
     available_files = self.src_uri_map
     if myfile is None:
         files = available_files
     else:
         if myfile not in available_files:
             raise EbuildException('Invalid distfile: %s' % myfile)
         files = OrderedDict()
         files[myfile] = available_files[myfile]
     if 'distpatch' in mysettings.features:
         mysettings.features.remove('distpatch')
     if not fetch(files, mysettings, allow_missing_digests=False):
         raise EbuildException('Failed to fetch distfiles for %s' % self.cpv)
Example #6
0
    def _run(self):
        # Force consistent color output, in case we are capturing fetch
        # output through a normal pipe due to unavailability of ptys.
        portage.output.havecolor = self._settings.get('NOCOLOR') \
         not in ('yes', 'true')

        rval = 1
        allow_missing = self._get_manifest().allow_missing
        if fetch(self._uri_map,
                 self._settings,
                 fetchonly=self.fetchonly,
                 digests=copy.deepcopy(self._get_digests()),
                 allow_missing_digests=allow_missing):
            rval = os.EX_OK
        return rval
Example #7
0
def fetch_metadata_xsd(metadata_xsd, repoman_settings):
    """
	Fetch metadata.xsd if it doesn't exist or the ctime is older than
	metadata_xsd_ctime_interval.
	@rtype: bool
	@return: True if successful, otherwise False
	"""

    must_fetch = True
    metadata_xsd_st = None
    current_time = int(time.time())
    try:
        metadata_xsd_st = os.stat(metadata_xsd)
    except EnvironmentError as e:
        if e.errno not in (errno.ENOENT, errno.ESTALE):
            raise
        del e
    else:
        # Trigger fetch if metadata.xsd mtime is old or clock is wrong.
        if abs(current_time - metadata_xsd_st.st_ctime) \
         < metadata_xsd_ctime_interval:
            must_fetch = False

    if must_fetch:
        print()
        print("%s the local copy of metadata.xsd "
              "needs to be refetched, doing that now" % green("***"))
        print()

        if not fetch(
            [metadata_xsd_uri], repoman_settings, force=1, try_mirrors=0):
            logging.error("failed to fetch metadata.xsd from '%s'" %
                          metadata_xsd_uri)
            return False

        try:
            portage.util.apply_secpass_permissions(
                metadata_xsd,
                gid=portage.data.portage_gid,
                mode=0o664,
                mask=0o2)
        except portage.exception.PortageException:
            pass

    return True
Example #8
0
    def _spawn(self, args, fd_pipes=None, **kwargs):
        """
		Fork a subprocess, apply local settings, and call fetch().
		"""

        pid = os.fork()
        if pid != 0:
            if not isinstance(pid, int):
                raise AssertionError("fork returned non-integer: %s" %
                                     (repr(pid), ))
            portage.process.spawned_pids.append(pid)
            return [pid]

        portage.locks._close_fds()
        # Disable close_fds since we don't exec (see _setup_pipes docstring).
        portage.process._setup_pipes(fd_pipes, close_fds=False)

        # Use default signal handlers in order to avoid problems
        # killing subprocesses as reported in bug #353239.
        signal.signal(signal.SIGINT, signal.SIG_DFL)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)

        # Force consistent color output, in case we are capturing fetch
        # output through a normal pipe due to unavailability of ptys.
        portage.output.havecolor = self._settings.get('NOCOLOR') \
         not in ('yes', 'true')

        rval = 1
        allow_missing = self._get_manifest().allow_missing
        try:
            if fetch(self._uri_map,
                     self._settings,
                     fetchonly=self.fetchonly,
                     digests=copy.deepcopy(self._get_digests()),
                     allow_missing_digests=allow_missing):
                rval = os.EX_OK
        except SystemExit:
            raise
        except:
            traceback.print_exc()
        finally:
            # Call os._exit() from finally block, in order to suppress any
            # finally blocks from earlier in the call stack. See bug #345289.
            os._exit(rval)
Example #9
0
	def _run(self):
		# Force consistent color output, in case we are capturing fetch
		# output through a normal pipe due to unavailability of ptys.
		portage.output.havecolor = self._settings.get('NOCOLOR') \
			not in ('yes', 'true')

		# For userfetch, drop privileges for the entire fetch call, in
		# order to handle DISTDIR on NFS with root_squash for bug 601252.
		if _want_userfetch(self._settings):
			_drop_privs_userfetch(self._settings)

		rval = 1
		allow_missing = self._get_manifest().allow_missing or \
			'digest' in self._settings.features
		if fetch(self._uri_map, self._settings, fetchonly=self.fetchonly,
			digests=copy.deepcopy(self._get_digests()),
			allow_missing_digests=allow_missing):
			rval = os.EX_OK
		return rval
Example #10
0
	def _spawn(self, args, fd_pipes=None, **kwargs):
		"""
		Fork a subprocess, apply local settings, and call fetch().
		"""

		pid = os.fork()
		if pid != 0:
			if not isinstance(pid, int):
				raise AssertionError(
					"fork returned non-integer: %s" % (repr(pid),))
			portage.process.spawned_pids.append(pid)
			return [pid]

		portage.locks._close_fds()
		# Disable close_fds since we don't exec (see _setup_pipes docstring).
		portage.process._setup_pipes(fd_pipes, close_fds=False)

		# Use default signal handlers in order to avoid problems
		# killing subprocesses as reported in bug #353239.
		signal.signal(signal.SIGINT, signal.SIG_DFL)
		signal.signal(signal.SIGTERM, signal.SIG_DFL)

		# Force consistent color output, in case we are capturing fetch
		# output through a normal pipe due to unavailability of ptys.
		portage.output.havecolor = self._settings.get('NOCOLOR') \
			not in ('yes', 'true')

		rval = 1
		allow_missing = self._get_manifest().allow_missing
		try:
			if fetch(self._uri_map, self._settings, fetchonly=self.fetchonly,
				digests=copy.deepcopy(self._get_digests()),
				allow_missing_digests=allow_missing):
				rval = os.EX_OK
		except SystemExit:
			raise
		except:
			traceback.print_exc()
		finally:
			# Call os._exit() from finally block, in order to suppress any
			# finally blocks from earlier in the call stack. See bug #345289.
			os._exit(rval)
Example #11
0
def digestgen(myarchives=None, mysettings=None, myportdb=None):
    """
	Generates a digest file if missing. Fetches files if necessary.
	NOTE: myarchives and mysettings used to be positional arguments,
		so their order must be preserved for backward compatibility.
	@param mysettings: the ebuild config (mysettings["O"] must correspond
		to the ebuild's parent directory)
	@type mysettings: config
	@param myportdb: a portdbapi instance
	@type myportdb: portdbapi
	@rtype: int
	@return: 1 on success and 0 on failure
	"""
    if mysettings is None or myportdb is None:
        raise TypeError("portage.digestgen(): 'mysettings' and 'myportdb' parameter are required.")

    try:
        portage._doebuild_manifest_exempt_depend += 1
        distfiles_map = {}
        fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
        for cpv in fetchlist_dict:
            try:
                for myfile in fetchlist_dict[cpv]:
                    distfiles_map.setdefault(myfile, []).append(cpv)
            except InvalidDependString as e:
                writemsg("!!! %s\n" % str(e), noiselevel=-1)
                del e
                return 0
        mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
        try:
            mf = mysettings.repositories.get_repo_for_location(mytree)
        except KeyError:
            # backward compatibility
            mytree = os.path.realpath(mytree)
            mf = mysettings.repositories.get_repo_for_location(mytree)

        mf = mf.load_manifest(mysettings["O"], mysettings["DISTDIR"], fetchlist_dict=fetchlist_dict)

        if not mf.allow_create:
            writemsg_stdout(
                _(">>> Skipping creating Manifest for %s; " "repository is configured to not use them\n")
                % mysettings["O"]
            )
            return 1

            # Don't require all hashes since that can trigger excessive
            # fetches when sufficient digests already exist.  To ease transition
            # while Manifest 1 is being removed, only require hashes that will
            # exist before and after the transition.
        required_hash_types = set()
        required_hash_types.add("size")
        required_hash_types.add(MANIFEST2_REQUIRED_HASH)
        dist_hashes = mf.fhashdict.get("DIST", {})

        # To avoid accidental regeneration of digests with the incorrect
        # files (such as partially downloaded files), trigger the fetch
        # code if the file exists and it's size doesn't match the current
        # manifest entry. If there really is a legitimate reason for the
        # digest to change, `ebuild --force digest` can be used to avoid
        # triggering this code (or else the old digests can be manually
        # removed from the Manifest).
        missing_files = []
        for myfile in distfiles_map:
            myhashes = dist_hashes.get(myfile)
            if not myhashes:
                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None
                if st is None or st.st_size == 0:
                    missing_files.append(myfile)
                continue
            size = myhashes.get("size")

            try:
                st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
            except OSError as e:
                if e.errno != errno.ENOENT:
                    raise
                del e
                if size == 0:
                    missing_files.append(myfile)
                    continue
                if required_hash_types.difference(myhashes):
                    missing_files.append(myfile)
                    continue
            else:
                if st.st_size == 0 or size is not None and size != st.st_size:
                    missing_files.append(myfile)
                    continue

        if missing_files:
            for myfile in missing_files:
                uris = set()
                all_restrict = set()
                for cpv in distfiles_map[myfile]:
                    uris.update(myportdb.getFetchMap(cpv, mytree=mytree)[myfile])
                    restrict = myportdb.aux_get(cpv, ["RESTRICT"], mytree=mytree)[0]
                    # Here we ignore conditional parts of RESTRICT since
                    # they don't apply unconditionally. Assume such
                    # conditionals only apply on the client side where
                    # digestgen() does not need to be called.
                    all_restrict.update(use_reduce(restrict, flat=True, matchnone=True))

                    # fetch() uses CATEGORY and PF to display a message
                    # when fetch restriction is triggered.
                    cat, pf = catsplit(cpv)
                    mysettings["CATEGORY"] = cat
                    mysettings["PF"] = pf

                    # fetch() uses PORTAGE_RESTRICT to control fetch
                    # restriction, which is only applied to files that
                    # are not fetchable via a mirror:// URI.
                mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)

                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None

                if not fetch({myfile: uris}, mysettings):
                    myebuild = os.path.join(mysettings["O"], catsplit(cpv)[1] + ".ebuild")
                    spawn_nofetch(myportdb, myebuild)
                    writemsg(_("!!! Fetch failed for %s, can't update " "Manifest\n") % myfile, noiselevel=-1)
                    if myfile in dist_hashes and st is not None and st.st_size > 0:
                        # stat result is obtained before calling fetch(),
                        # since fetch may rename the existing file if the
                        # digest does not match.
                        writemsg(
                            _(
                                "!!! If you would like to "
                                "forcefully replace the existing "
                                "Manifest entry\n!!! for %s, use "
                                "the following command:\n"
                            )
                            % myfile
                            + "!!!    "
                            + colorize("INFORM", "ebuild --force %s manifest" % os.path.basename(myebuild))
                            + "\n",
                            noiselevel=-1,
                        )
                    return 0
        writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
        try:
            mf.create(assumeDistHashesSometimes=True, assumeDistHashesAlways=("assume-digests" in mysettings.features))
        except FileNotFound as e:
            writemsg(_("!!! File %s doesn't exist, can't update " "Manifest\n") % e, noiselevel=-1)
            return 0
        except PortagePackageException as e:
            writemsg(("!!! %s\n") % (e,), noiselevel=-1)
            return 0
        try:
            mf.write(sign=False)
        except PermissionDenied as e:
            writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
            return 0
        if "assume-digests" not in mysettings.features:
            distlist = list(mf.fhashdict.get("DIST", {}))
            distlist.sort()
            auto_assumed = []
            for filename in distlist:
                if not os.path.exists(os.path.join(mysettings["DISTDIR"], filename)):
                    auto_assumed.append(filename)
            if auto_assumed:
                cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
                pkgs = myportdb.cp_list(cp, mytree=mytree)
                pkgs.sort()
                writemsg_stdout("  digest.assumed" + colorize("WARN", str(len(auto_assumed)).rjust(18)) + "\n")
                for pkg_key in pkgs:
                    fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
                    pv = pkg_key.split("/")[1]
                    for filename in auto_assumed:
                        if filename in fetchlist:
                            writemsg_stdout("   %s::%s\n" % (pv, filename))
        return 1
    finally:
        portage._doebuild_manifest_exempt_depend -= 1
def digestgen(myarchives=None, mysettings=None,
	overwrite=None, manifestonly=None, myportdb=None):
	"""
	Generates a digest file if missing. Fetches files if necessary.
	NOTE: myarchives and mysettings used to be positional arguments,
		so their order must be preserved for backward compatibility.
	@param mysettings: the ebuild config (mysettings["O"] must correspond
		to the ebuild's parent directory)
	@type mysettings: config
	@param myportdb: a portdbapi instance
	@type myportdb: portdbapi
	@rtype: int
	@returns: 1 on success and 0 on failure
	"""
	if mysettings is None:
		raise TypeError("portage.digestgen(): missing" + \
			" required 'mysettings' parameter")
	if myportdb is None:
		warnings.warn("portage.digestgen() called without 'myportdb' parameter",
			DeprecationWarning, stacklevel=2)
		myportdb = portage.portdb
	if overwrite is not None:
		warnings.warn("portage.digestgen() called with " + \
			"deprecated 'overwrite' parameter",
			DeprecationWarning, stacklevel=2)
	if manifestonly is not None:
		warnings.warn("portage.digestgen() called with " + \
			"deprecated 'manifestonly' parameter",
			DeprecationWarning, stacklevel=2)

	try:
		portage._doebuild_manifest_exempt_depend += 1
		distfiles_map = {}
		fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
		for cpv in fetchlist_dict:
			try:
				for myfile in fetchlist_dict[cpv]:
					distfiles_map.setdefault(myfile, []).append(cpv)
			except InvalidDependString as e:
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				del e
				return 0
		mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
		manifest1_compat = False
		mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
			fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
		# Don't require all hashes since that can trigger excessive
		# fetches when sufficient digests already exist.  To ease transition
		# while Manifest 1 is being removed, only require hashes that will
		# exist before and after the transition.
		required_hash_types = set()
		required_hash_types.add("size")
		required_hash_types.add(MANIFEST2_REQUIRED_HASH)
		dist_hashes = mf.fhashdict.get("DIST", {})

		# To avoid accidental regeneration of digests with the incorrect
		# files (such as partially downloaded files), trigger the fetch
		# code if the file exists and it's size doesn't match the current
		# manifest entry. If there really is a legitimate reason for the
		# digest to change, `ebuild --force digest` can be used to avoid
		# triggering this code (or else the old digests can be manually
		# removed from the Manifest).
		missing_files = []
		for myfile in distfiles_map:
			myhashes = dist_hashes.get(myfile)
			if not myhashes:
				try:
					st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
				except OSError:
					st = None
				if st is None or st.st_size == 0:
					missing_files.append(myfile)
				continue
			size = myhashes.get("size")

			try:
				st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
			except OSError as e:
				if e.errno != errno.ENOENT:
					raise
				del e
				if size == 0:
					missing_files.append(myfile)
					continue
				if required_hash_types.difference(myhashes):
					missing_files.append(myfile)
					continue
			else:
				if st.st_size == 0 or size is not None and size != st.st_size:
					missing_files.append(myfile)
					continue

		if missing_files:
				mytree = os.path.realpath(os.path.dirname(
					os.path.dirname(mysettings["O"])))
				fetch_settings = config(clone=mysettings)
				debug = mysettings.get("PORTAGE_DEBUG") == "1"
				for myfile in missing_files:
					uris = set()
					for cpv in distfiles_map[myfile]:
						myebuild = os.path.join(mysettings["O"],
							catsplit(cpv)[1] + ".ebuild")
						# for RESTRICT=fetch, mirror, etc...
						doebuild_environment(myebuild, "fetch",
							mysettings["ROOT"], fetch_settings,
							debug, 1, myportdb)
						uris.update(myportdb.getFetchMap(
							cpv, mytree=mytree)[myfile])

					fetch_settings["A"] = myfile # for use by pkg_nofetch()

					try:
						st = os.stat(os.path.join(
							mysettings["DISTDIR"],myfile))
					except OSError:
						st = None

					if not fetch({myfile : uris}, fetch_settings):
						writemsg(_("!!! Fetch failed for %s, can't update "
							"Manifest\n") % myfile, noiselevel=-1)
						if myfile in dist_hashes and \
							st is not None and st.st_size > 0:
							# stat result is obtained before calling fetch(),
							# since fetch may rename the existing file if the
							# digest does not match.
							writemsg(_("!!! If you would like to "
								"forcefully replace the existing "
								"Manifest entry\n!!! for %s, use "
								"the following command:\n") % myfile + \
								"!!!    " + colorize("INFORM",
								"ebuild --force %s manifest" % \
								os.path.basename(myebuild)) + "\n",
								noiselevel=-1)
						return 0
		writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
		try:
			mf.create(assumeDistHashesSometimes=True,
				assumeDistHashesAlways=(
				"assume-digests" in mysettings.features))
		except FileNotFound as e:
			writemsg(_("!!! File %s doesn't exist, can't update "
				"Manifest\n") % e, noiselevel=-1)
			return 0
		except PortagePackageException as e:
			writemsg(("!!! %s\n") % (e,), noiselevel=-1)
			return 0
		try:
			mf.write(sign=False)
		except PermissionDenied as e:
			writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
			return 0
		if "assume-digests" not in mysettings.features:
			distlist = list(mf.fhashdict.get("DIST", {}))
			distlist.sort()
			auto_assumed = []
			for filename in distlist:
				if not os.path.exists(
					os.path.join(mysettings["DISTDIR"], filename)):
					auto_assumed.append(filename)
			if auto_assumed:
				mytree = os.path.realpath(
					os.path.dirname(os.path.dirname(mysettings["O"])))
				cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
				pkgs = myportdb.cp_list(cp, mytree=mytree)
				pkgs.sort()
				writemsg_stdout("  digest.assumed" + colorize("WARN",
					str(len(auto_assumed)).rjust(18)) + "\n")
				for pkg_key in pkgs:
					fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
					pv = pkg_key.split("/")[1]
					for filename in auto_assumed:
						if filename in fetchlist:
							writemsg_stdout(
								"   %s::%s\n" % (pv, filename))
		return 1
	finally:
		portage._doebuild_manifest_exempt_depend -= 1
Example #13
0
def digestgen(myarchives=None, mysettings=None, myportdb=None):
    """
	Generates a digest file if missing. Fetches files if necessary.
	NOTE: myarchives and mysettings used to be positional arguments,
		so their order must be preserved for backward compatibility.
	@param mysettings: the ebuild config (mysettings["O"] must correspond
		to the ebuild's parent directory)
	@type mysettings: config
	@param myportdb: a portdbapi instance
	@type myportdb: portdbapi
	@rtype: int
	@return: 1 on success and 0 on failure
	"""
    if mysettings is None or myportdb is None:
        raise TypeError(
            "portage.digestgen(): 'mysettings' and 'myportdb' parameter are required."
        )

    try:
        portage._doebuild_manifest_exempt_depend += 1
        distfiles_map = {}
        fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
        for cpv in fetchlist_dict:
            try:
                for myfile in fetchlist_dict[cpv]:
                    distfiles_map.setdefault(myfile, []).append(cpv)
            except InvalidDependString as e:
                writemsg("!!! %s\n" % str(e), noiselevel=-1)
                del e
                return 0
        mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
        try:
            mf = mysettings.repositories.get_repo_for_location(mytree)
        except KeyError:
            # backward compatibility
            mytree = os.path.realpath(mytree)
            mf = mysettings.repositories.get_repo_for_location(mytree)

        repo_required_hashes = mf.manifest_required_hashes
        if repo_required_hashes is None:
            repo_required_hashes = MANIFEST2_HASH_DEFAULTS
        mf = mf.load_manifest(mysettings["O"],
                              mysettings["DISTDIR"],
                              fetchlist_dict=fetchlist_dict)

        if not mf.allow_create:
            writemsg_stdout(
                _(">>> Skipping creating Manifest for %s; "
                  "repository is configured to not use them\n") %
                mysettings["O"])
            return 1

        # Don't require all hashes since that can trigger excessive
        # fetches when sufficient digests already exist.  To ease transition
        # while Manifest 1 is being removed, only require hashes that will
        # exist before and after the transition.
        required_hash_types = set()
        required_hash_types.add("size")
        required_hash_types.update(repo_required_hashes)
        dist_hashes = mf.fhashdict.get("DIST", {})

        # To avoid accidental regeneration of digests with the incorrect
        # files (such as partially downloaded files), trigger the fetch
        # code if the file exists and it's size doesn't match the current
        # manifest entry. If there really is a legitimate reason for the
        # digest to change, `ebuild --force digest` can be used to avoid
        # triggering this code (or else the old digests can be manually
        # removed from the Manifest).
        missing_files = []
        for myfile in distfiles_map:
            myhashes = dist_hashes.get(myfile)
            if not myhashes:
                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None
                if st is None or st.st_size == 0:
                    missing_files.append(myfile)
                continue
            size = myhashes.get("size")

            try:
                st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
            except OSError as e:
                if e.errno != errno.ENOENT:
                    raise
                del e
                if size == 0:
                    missing_files.append(myfile)
                    continue
                if required_hash_types.difference(myhashes):
                    missing_files.append(myfile)
                    continue
            else:
                if st.st_size == 0 or size is not None and size != st.st_size:
                    missing_files.append(myfile)
                    continue

        for myfile in missing_files:
            uris = set()
            all_restrict = set()
            for cpv in distfiles_map[myfile]:
                uris.update(myportdb.getFetchMap(cpv, mytree=mytree)[myfile])
                restrict = myportdb.aux_get(cpv, ['RESTRICT'],
                                            mytree=mytree)[0]
                # Here we ignore conditional parts of RESTRICT since
                # they don't apply unconditionally. Assume such
                # conditionals only apply on the client side where
                # digestgen() does not need to be called.
                all_restrict.update(
                    use_reduce(restrict, flat=True, matchnone=True))

                # fetch() uses CATEGORY and PF to display a message
                # when fetch restriction is triggered.
                cat, pf = catsplit(cpv)
                mysettings["CATEGORY"] = cat
                mysettings["PF"] = pf

            # fetch() uses PORTAGE_RESTRICT to control fetch
            # restriction, which is only applied to files that
            # are not fetchable via a mirror:// URI.
            mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)

            try:
                st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
            except OSError:
                st = None

            if not fetch({myfile: uris}, mysettings):
                myebuild = os.path.join(mysettings["O"],
                                        catsplit(cpv)[1] + ".ebuild")
                spawn_nofetch(myportdb, myebuild)
                writemsg(
                    _("!!! Fetch failed for %s, can't update Manifest\n") %
                    myfile,
                    noiselevel=-1)
                if myfile in dist_hashes and \
                 st is not None and st.st_size > 0:
                    # stat result is obtained before calling fetch(),
                    # since fetch may rename the existing file if the
                    # digest does not match.
                    cmd = colorize(
                        "INFORM", "ebuild --force %s manifest" %
                        os.path.basename(myebuild))
                    writemsg((_(
                        "!!! If you would like to forcefully replace the existing Manifest entry\n"
                        "!!! for %s, use the following command:\n") % myfile) +
                             "!!!    %s\n" % cmd,
                             noiselevel=-1)
                return 0

        writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
        try:
            mf.create(assumeDistHashesSometimes=True,
                      assumeDistHashesAlways=("assume-digests"
                                              in mysettings.features))
        except FileNotFound as e:
            writemsg(_("!!! File %s doesn't exist, can't update Manifest\n") %
                     e,
                     noiselevel=-1)
            return 0
        except PortagePackageException as e:
            writemsg(("!!! %s\n") % (e, ), noiselevel=-1)
            return 0
        try:
            mf.write(sign=False)
        except PermissionDenied as e:
            writemsg(_("!!! Permission Denied: %s\n") % (e, ), noiselevel=-1)
            return 0
        if "assume-digests" not in mysettings.features:
            distlist = list(mf.fhashdict.get("DIST", {}))
            distlist.sort()
            auto_assumed = []
            for filename in distlist:
                if not os.path.exists(
                        os.path.join(mysettings["DISTDIR"], filename)):
                    auto_assumed.append(filename)
            if auto_assumed:
                cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
                pkgs = myportdb.cp_list(cp, mytree=mytree)
                pkgs.sort()
                writemsg_stdout("  digest.assumed" +
                                colorize("WARN",
                                         str(len(auto_assumed)).rjust(18)) +
                                "\n")
                for pkg_key in pkgs:
                    fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
                    pv = pkg_key.split("/")[1]
                    for filename in auto_assumed:
                        if filename in fetchlist:
                            writemsg_stdout("   %s::%s\n" % (pv, filename))
        return 1
    finally:
        portage._doebuild_manifest_exempt_depend -= 1
Example #14
0
def digestgen(myarchives=None,
              mysettings=None,
              overwrite=None,
              manifestonly=None,
              myportdb=None):
    """
	Generates a digest file if missing. Fetches files if necessary.
	NOTE: myarchives and mysettings used to be positional arguments,
		so their order must be preserved for backward compatibility.
	@param mysettings: the ebuild config (mysettings["O"] must correspond
		to the ebuild's parent directory)
	@type mysettings: config
	@param myportdb: a portdbapi instance
	@type myportdb: portdbapi
	@rtype: int
	@returns: 1 on success and 0 on failure
	"""
    if mysettings is None:
        raise TypeError("portage.digestgen(): missing" + \
         " required 'mysettings' parameter")
    if myportdb is None:
        warnings.warn(
            "portage.digestgen() called without 'myportdb' parameter",
            DeprecationWarning,
            stacklevel=2)
        myportdb = portage.portdb
    if overwrite is not None:
        warnings.warn("portage.digestgen() called with " + \
         "deprecated 'overwrite' parameter",
         DeprecationWarning, stacklevel=2)
    if manifestonly is not None:
        warnings.warn("portage.digestgen() called with " + \
         "deprecated 'manifestonly' parameter",
         DeprecationWarning, stacklevel=2)

    try:
        portage._doebuild_manifest_exempt_depend += 1
        distfiles_map = {}
        fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
        for cpv in fetchlist_dict:
            try:
                for myfile in fetchlist_dict[cpv]:
                    distfiles_map.setdefault(myfile, []).append(cpv)
            except InvalidDependString as e:
                writemsg("!!! %s\n" % str(e), noiselevel=-1)
                del e
                return 0
        mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
        manifest1_compat = False
        mf = Manifest(mysettings["O"],
                      mysettings["DISTDIR"],
                      fetchlist_dict=fetchlist_dict,
                      manifest1_compat=manifest1_compat)
        # Don't require all hashes since that can trigger excessive
        # fetches when sufficient digests already exist.  To ease transition
        # while Manifest 1 is being removed, only require hashes that will
        # exist before and after the transition.
        required_hash_types = set()
        required_hash_types.add("size")
        required_hash_types.add(MANIFEST2_REQUIRED_HASH)
        dist_hashes = mf.fhashdict.get("DIST", {})

        # To avoid accidental regeneration of digests with the incorrect
        # files (such as partially downloaded files), trigger the fetch
        # code if the file exists and it's size doesn't match the current
        # manifest entry. If there really is a legitimate reason for the
        # digest to change, `ebuild --force digest` can be used to avoid
        # triggering this code (or else the old digests can be manually
        # removed from the Manifest).
        missing_files = []
        for myfile in distfiles_map:
            myhashes = dist_hashes.get(myfile)
            if not myhashes:
                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None
                if st is None or st.st_size == 0:
                    missing_files.append(myfile)
                continue
            size = myhashes.get("size")

            try:
                st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
            except OSError as e:
                if e.errno != errno.ENOENT:
                    raise
                del e
                if size == 0:
                    missing_files.append(myfile)
                    continue
                if required_hash_types.difference(myhashes):
                    missing_files.append(myfile)
                    continue
            else:
                if st.st_size == 0 or size is not None and size != st.st_size:
                    missing_files.append(myfile)
                    continue

        if missing_files:
            mytree = os.path.realpath(
                os.path.dirname(os.path.dirname(mysettings["O"])))
            fetch_settings = config(clone=mysettings)
            debug = mysettings.get("PORTAGE_DEBUG") == "1"
            for myfile in missing_files:
                uris = set()
                for cpv in distfiles_map[myfile]:
                    myebuild = os.path.join(mysettings["O"],
                                            catsplit(cpv)[1] + ".ebuild")
                    # for RESTRICT=fetch, mirror, etc...
                    doebuild_environment(myebuild, "fetch", mysettings["ROOT"],
                                         fetch_settings, debug, 1, myportdb)
                    uris.update(
                        myportdb.getFetchMap(cpv, mytree=mytree)[myfile])

                fetch_settings["A"] = myfile  # for use by pkg_nofetch()

                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None

                if not fetch({myfile: uris}, fetch_settings):
                    writemsg(_("!!! Fetch failed for %s, can't update "
                               "Manifest\n") % myfile,
                             noiselevel=-1)
                    if myfile in dist_hashes and \
                     st is not None and st.st_size > 0:
                        # stat result is obtained before calling fetch(),
                        # since fetch may rename the existing file if the
                        # digest does not match.
                        writemsg(_("!!! If you would like to "
                         "forcefully replace the existing "
                         "Manifest entry\n!!! for %s, use "
                         "the following command:\n") % myfile + \
                         "!!!    " + colorize("INFORM",
                         "ebuild --force %s manifest" % \
                         os.path.basename(myebuild)) + "\n",
                         noiselevel=-1)
                    return 0
        writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
        try:
            mf.create(assumeDistHashesSometimes=True,
                      assumeDistHashesAlways=("assume-digests"
                                              in mysettings.features))
        except FileNotFound as e:
            writemsg(_("!!! File %s doesn't exist, can't update "
                       "Manifest\n") % e,
                     noiselevel=-1)
            return 0
        except PortagePackageException as e:
            writemsg(("!!! %s\n") % (e, ), noiselevel=-1)
            return 0
        try:
            mf.write(sign=False)
        except PermissionDenied as e:
            writemsg(_("!!! Permission Denied: %s\n") % (e, ), noiselevel=-1)
            return 0
        if "assume-digests" not in mysettings.features:
            distlist = list(mf.fhashdict.get("DIST", {}))
            distlist.sort()
            auto_assumed = []
            for filename in distlist:
                if not os.path.exists(
                        os.path.join(mysettings["DISTDIR"], filename)):
                    auto_assumed.append(filename)
            if auto_assumed:
                mytree = os.path.realpath(
                    os.path.dirname(os.path.dirname(mysettings["O"])))
                cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
                pkgs = myportdb.cp_list(cp, mytree=mytree)
                pkgs.sort()
                writemsg_stdout("  digest.assumed" +
                                colorize("WARN",
                                         str(len(auto_assumed)).rjust(18)) +
                                "\n")
                for pkg_key in pkgs:
                    fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
                    pv = pkg_key.split("/")[1]
                    for filename in auto_assumed:
                        if filename in fetchlist:
                            writemsg_stdout("   %s::%s\n" % (pv, filename))
        return 1
    finally:
        portage._doebuild_manifest_exempt_depend -= 1