コード例 #1
0
ファイル: output.py プロジェクト: antarus12345/portage
    def print_blockers(self):
        """Performs the actual output printing of the pre-formatted
		blocker messages
		"""
        for pkg in self.blockers:
            writemsg_stdout("%s\n" % (pkg, ), noiselevel=-1)
        return
コード例 #2
0
    def unlock(self):
        """
        Set GPG_TTY and run GPG unlock command.
        If gpg-keepalive is set, start keepalive thread.
        """
        if self.GPG_unlock_command and (
            self.settings.get("BINPKG_FORMAT", SUPPORTED_GENTOO_BINPKG_FORMATS[0])
            == "gpkg"
        ):
            try:
                os.environ["GPG_TTY"] = os.ttyname(sys.stdout.fileno())
            except OSError as e:
                # When run with no input/output tty, this will fail.
                # However, if the password is given by command,
                # GPG does not need to ask password, so can be ignored.
                writemsg(colorize("WARN", str(e)) + "\n")

            cmd = shlex_split(varexpand(self.GPG_unlock_command, mydict=self.settings))
            return_code = subprocess.Popen(cmd).wait()

            if return_code == os.EX_OK:
                writemsg_stdout(colorize("GOOD", "unlocked") + "\n")
                sys.stdout.flush()
            else:
                raise GPGException("GPG unlock failed")

            if self.keepalive:
                self.GPG_unlock_command = shlex_split(
                    varexpand(self.GPG_unlock_command, mydict=self.settings)
                )
                self.thread = threading.Thread(target=self.gpg_keepalive, daemon=True)
                self.thread.start()
コード例 #3
0
ファイル: output.py プロジェクト: pombredanne/portage-3
	def print_blockers(self):
		"""Performs the actual output printing of the pre-formatted
		blocker messages
		"""
		for pkg in self.blockers:
			writemsg_stdout("%s\n" % (pkg,), noiselevel=-1)
		return
コード例 #4
0
    def print_verbose(self, show_repos):
        """Prints the verbose output to std_out

        @param show_repos: bool.
        """
        writemsg_stdout("\n%s\n" % (self.counters, ), noiselevel=-1)
        if show_repos:
            writemsg_stdout("%s" % (self.conf.repo_display, ), noiselevel=-1)
コード例 #5
0
ファイル: output.py プロジェクト: zy-sunshine/easymgc
    def print_changelog(self):
        """Prints the changelog text to std_out
		"""
        if not self.changelogs:
            return
        writemsg_stdout('\n', noiselevel=-1)
        for revision, text in self.changelogs:
            writemsg_stdout(bold('*' + revision) + '\n' + text, noiselevel=-1)
        return
コード例 #6
0
ファイル: output.py プロジェクト: BoleynSu/PR-portage
    def print_verbose(self, show_repos):
        """Prints the verbose output to std_out

		@param show_repos: bool.
		"""
        writemsg_stdout('\n%s\n' % (self.counters, ), noiselevel=-1)
        if show_repos:
            # Use unicode_literals to force unicode format string so
            # that RepoDisplay.__unicode__() is called in python2.
            writemsg_stdout("%s" % (self.conf.repo_display, ), noiselevel=-1)
コード例 #7
0
ファイル: output.py プロジェクト: zy-sunshine/easymgc
	def print_changelog(self):
		"""Prints the changelog text to std_out
		"""
		if not self.changelogs:
			return
		writemsg_stdout('\n', noiselevel=-1)
		for revision, text in self.changelogs:
			writemsg_stdout(bold('*'+revision) + '\n' + text,
				noiselevel=-1)
		return
コード例 #8
0
	def _ebuild_exit(self, ebuild_process):

		if self.phase == "install":
			out = portage.StringIO()
			log_path = self.settings.get("PORTAGE_LOG_FILE")
			log_file = None
			if log_path is not None:
				log_file = codecs.open(_unicode_encode(log_path,
					encoding=_encodings['fs'], errors='strict'),
					mode='a', encoding=_encodings['content'], errors='replace')
			try:
				_check_build_log(self.settings, out=out)
				msg = _unicode_decode(out.getvalue(),
					encoding=_encodings['content'], errors='replace')
				if msg:
					if not self.background:
						writemsg_stdout(msg, noiselevel=-1)
					if log_file is not None:
						log_file.write(msg)
			finally:
				if log_file is not None:
					log_file.close()

		if self._default_exit(ebuild_process) != os.EX_OK:
			self._die_hooks()
			return

		settings = self.settings

		if self.phase == "install":
			out = None
			log_path = self.settings.get("PORTAGE_LOG_FILE")
			log_file = None
			if self.background and log_path is not None:
				log_file = codecs.open(_unicode_encode(log_path,
					encoding=_encodings['fs'], errors='strict'),
					mode='a', encoding=_encodings['content'], errors='replace')
				out = log_file
			_post_src_install_chost_fix(settings)
			_post_src_install_uid_fix(settings, out=out)
			if log_file is not None:
				log_file.close()

		post_phase_cmds = _post_phase_cmds.get(self.phase)
		if post_phase_cmds is not None:
			post_phase = MiscFunctionsProcess(background=self.background,
				commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
				scheduler=self.scheduler, settings=settings)
			self._start_task(post_phase, self._post_phase_exit)
			return

		self.returncode = ebuild_process.returncode
		self._current_task = None
		self.wait()
コード例 #9
0
ファイル: EbuildPhase.py プロジェクト: wosigh/widk_portage
	def _ebuild_exit(self, ebuild_process):

		if self.phase == "install":
			out = portage.StringIO()
			log_path = self.settings.get("PORTAGE_LOG_FILE")
			log_file = None
			if log_path is not None:
				log_file = codecs.open(_unicode_encode(log_path,
					encoding=_encodings['fs'], errors='strict'),
					mode='a', encoding=_encodings['content'], errors='replace')
			try:
				_check_build_log(self.settings, out=out)
				msg = _unicode_decode(out.getvalue(),
					encoding=_encodings['content'], errors='replace')
				if msg:
					if not self.background:
						writemsg_stdout(msg, noiselevel=-1)
					if log_file is not None:
						log_file.write(msg)
			finally:
				if log_file is not None:
					log_file.close()

		if self._default_exit(ebuild_process) != os.EX_OK:
			self._die_hooks()
			return

		settings = self.settings

		if self.phase == "install":
			out = None
			log_path = self.settings.get("PORTAGE_LOG_FILE")
			log_file = None
			if self.background and log_path is not None:
				log_file = codecs.open(_unicode_encode(log_path,
					encoding=_encodings['fs'], errors='strict'),
					mode='a', encoding=_encodings['content'], errors='replace')
				out = log_file
			_post_src_install_chost_fix(settings)
			_post_src_install_uid_fix(settings, out=out)
			if log_file is not None:
				log_file.close()

		post_phase_cmds = _post_phase_cmds.get(self.phase)
		if post_phase_cmds is not None:
			post_phase = MiscFunctionsProcess(background=self.background,
				commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
				scheduler=self.scheduler, settings=settings)
			self._start_task(post_phase, self._post_phase_exit)
			return

		self.returncode = ebuild_process.returncode
		self._current_task = None
		self.wait()
コード例 #10
0
ファイル: output.py プロジェクト: pombredanne/portage-3
	def print_verbose(self, show_repos):
		"""Prints the verbose output to std_out

		@param show_repos: bool.
		"""
		writemsg_stdout('\n%s\n' % (self.counters,), noiselevel=-1)
		if show_repos:
			# Use unicode_literals to force unicode format string so
			# that RepoDisplay.__unicode__() is called in python2.
			writemsg_stdout("%s" % (self.conf.repo_display,),
				noiselevel=-1)
		return
コード例 #11
0
    def print_messages(self, show_repos):
        """Performs the actual output printing of the pre-formatted
        messages

        @param show_repos: bool.
        """
        for msg in self.print_msg:
            if isinstance(msg, str):
                writemsg_stdout("%s\n" % (msg, ), noiselevel=-1)
                continue
            myprint, self.verboseadd, repoadd = msg
            if self.verboseadd:
                myprint += " " + self.verboseadd
            if show_repos and repoadd:
                myprint += " " + teal("[%s]" % repoadd)
            writemsg_stdout("%s\n" % (myprint, ), noiselevel=-1)
コード例 #12
0
ファイル: output.py プロジェクト: pombredanne/portage-3
	def print_messages(self, show_repos):
		"""Performs the actual output printing of the pre-formatted
		messages

		@param show_repos: bool.
		"""
		for msg in self.print_msg:
			if isinstance(msg, basestring):
				writemsg_stdout("%s\n" % (msg,), noiselevel=-1)
				continue
			myprint, self.verboseadd, repoadd = msg
			if self.verboseadd:
				myprint += " " + self.verboseadd
			if show_repos and repoadd:
				myprint += " " + teal("[%s]" % repoadd)
			writemsg_stdout("%s\n" % (myprint,), noiselevel=-1)
		return
コード例 #13
0
def old_tree_timestamp_warn(portdir, settings):
	unixtime = time.time()
	default_warnsync = 30

	timestamp_file = os.path.join(portdir, "metadata/timestamp.x")
	try:
		lastsync = grabfile(timestamp_file)
	except PortageException:
		return False

	if not lastsync:
		return False

	lastsync = lastsync[0].split()
	if not lastsync:
		return False

	try:
		lastsync = int(lastsync[0])
	except ValueError:
		return False

	var_name = 'PORTAGE_SYNC_STALE'
	try:
		warnsync = float(settings.get(var_name, default_warnsync))
	except ValueError:
		writemsg_level("!!! %s contains non-numeric value: %s\n" % \
			(var_name, settings[var_name]),
			level=logging.ERROR, noiselevel=-1)
		return False

	if warnsync <= 0:
		return False

	if (unixtime - 86400 * warnsync) > lastsync:
		if have_english_locale():
			writemsg_stdout(">>> Last emerge --sync was %s ago\n" % \
				whenago(unixtime - lastsync), noiselevel=-1)
		else:
			writemsg_stdout(">>> %s\n" % \
				_("Last emerge --sync was %s") % \
				time.strftime('%c', time.localtime(lastsync)),
				noiselevel=-1)
		return True
	return False
コード例 #14
0
ファイル: utilities.py プロジェクト: pombredanne/portage-3
def UpdateChangeLog(pkgdir, user, msg, skel_path, category, package,
	new=(), removed=(), changed=(), pretend=False, quiet=False):
	"""
	Write an entry to an existing ChangeLog, or create a new one.
	Updates copyright year on changed files, and updates the header of
	ChangeLog with the contents of skel.ChangeLog.
	"""

	if '<root@' in user:
		if not quiet:
			logging.critical('Please set ECHANGELOG_USER or run as non-root')
		return None

	# ChangeLog times are in UTC
	gmtime = time.gmtime()
	year = time.strftime('%Y', gmtime)
	date = time.strftime('%d %b %Y', gmtime)

	# check modified files and the ChangeLog for copyright updates
	# patches and diffs (identified by .patch and .diff) are excluded
	for fn in chain(new, changed):
		if fn.endswith('.diff') or fn.endswith('.patch'):
			continue
		update_copyright(os.path.join(pkgdir, fn), year, pretend=pretend)

	cl_path = os.path.join(pkgdir, 'ChangeLog')
	clold_lines = []
	clnew_lines = []
	old_header_lines = []
	header_lines = []

	clold_file = None
	try:
		clold_file = io.open(_unicode_encode(cl_path,
			encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['repo.content'], errors='replace')
	except EnvironmentError:
		pass

	f, clnew_path = mkstemp()

	# construct correct header first
	try:
		if clold_file is not None:
			# retain header from old ChangeLog
			first_line = True
			for line in clold_file:
				line_strip = line.strip()
				if line_strip and line[:1] != "#":
					clold_lines.append(line)
					break
				# always make sure cat/pkg is up-to-date in case we are
				# moving packages around, or copied from another pkg, or ...
				if first_line:
					if line.startswith('# ChangeLog for'):
						line = '# ChangeLog for %s/%s\n' % (category, package)
					first_line = False
				old_header_lines.append(line)
				header_lines.append(_update_copyright_year(year, line))
				if not line_strip:
					break

		clskel_file = None
		if not header_lines:
			# delay opening this until we find we need a header
			try:
				clskel_file = io.open(_unicode_encode(skel_path,
					encoding=_encodings['fs'], errors='strict'),
					mode='r', encoding=_encodings['repo.content'],
					errors='replace')
			except EnvironmentError:
				pass

		if clskel_file is not None:
			# read skel.ChangeLog up to first empty line
			for line in clskel_file:
				line_strip = line.strip()
				if not line_strip:
					break
				line = line.replace('<CATEGORY>', category)
				line = line.replace('<PACKAGE_NAME>', package)
				line = _update_copyright_year(year, line)
				header_lines.append(line)
			header_lines.append('\n')
			clskel_file.close()

		# write new ChangeLog entry
		clnew_lines.extend(header_lines)
		newebuild = False
		for fn in new:
			if not fn.endswith('.ebuild'):
				continue
			ebuild = fn.split(os.sep)[-1][0:-7] 
			clnew_lines.append('*%s (%s)\n' % (ebuild, date))
			newebuild = True
		if newebuild:
			clnew_lines.append('\n')
		trivial_files = ('ChangeLog', 'Manifest')
		display_new = ['+' + elem for elem in new
			if elem not in trivial_files]
		display_removed = ['-' + elem for elem in removed]
		display_changed = [elem for elem in changed
			if elem not in trivial_files]
		if not (display_new or display_removed or display_changed):
			# If there's nothing else to display, show one of the
			# trivial files.
			for fn in trivial_files:
				if fn in new:
					display_new = ['+' + fn]
					break
				elif fn in changed:
					display_changed = [fn]
					break

		display_new.sort()
		display_removed.sort()
		display_changed.sort()

		mesg = '%s; %s %s:' % (date, user, ', '.join(chain(
			display_new, display_removed, display_changed)))
		for line in textwrap.wrap(mesg, 80, \
				initial_indent='  ', subsequent_indent='  ', \
				break_on_hyphens=False):
			clnew_lines.append('%s\n' % line)
		for line in textwrap.wrap(msg, 80, \
				initial_indent='  ', subsequent_indent='  '):
			clnew_lines.append('%s\n' % line)
		# Don't append a trailing newline if the file is new.
		if clold_file is not None:
			clnew_lines.append('\n')

		f = io.open(f, mode='w', encoding=_encodings['repo.content'],
			errors='backslashreplace')

		for line in clnew_lines:
			f.write(line)

		# append stuff from old ChangeLog
		if clold_file is not None:

			if clold_lines:
				# clold_lines may contain a saved non-header line
				# that we want to write first.
				# Also, append this line to clnew_lines so that the
				# unified_diff call doesn't show it as removed.
				for line in clold_lines:
					f.write(line)
					clnew_lines.append(line)

			else:
				# ensure that there is no more than one blank
				# line after our new entry
				for line in clold_file:
					if line.strip():
						f.write(line)
						break

			# Now prepend old_header_lines to clold_lines, for use
			# in the unified_diff call below.
			clold_lines = old_header_lines + clold_lines

			# Trim any trailing newlines.
			lines = clold_file.readlines()
			clold_file.close()
			while lines and lines[-1] == '\n':
				del lines[-1]
			f.writelines(lines)
		f.close()

		# show diff
		if not quiet:
			for line in difflib.unified_diff(clold_lines, clnew_lines,
					fromfile=cl_path, tofile=cl_path, n=0):
				util.writemsg_stdout(line, noiselevel=-1)
			util.writemsg_stdout("\n", noiselevel=-1)

		if pretend:
			# remove what we've done
			os.remove(clnew_path)
		else:
			# rename to ChangeLog, and set permissions
			try:
				clold_stat = os.stat(cl_path)
			except OSError:
				clold_stat = None

			shutil.move(clnew_path, cl_path)

			if clold_stat is None:
				util.apply_permissions(cl_path, mode=0o644)
			else:
				util.apply_stat_permissions(cl_path, clold_stat)

		if clold_file is None:
			return True
		else:
			return False
	except IOError as e:
		err = 'Repoman is unable to create/write to Changelog.new file: %s' % (e,)
		logging.critical(err)
		# try to remove if possible
		try:
			os.remove(clnew_path)
		except OSError:
			pass
		return None
コード例 #15
0
ファイル: rsync.py プロジェクト: Zlogene/portage-1
    def update(self):
        '''Internal update function which performs the transfer'''
        opts = self.options.get('emerge_config').opts
        self.usersync_uid = self.options.get('usersync_uid', None)
        enter_invalid = '--ask-enter-invalid' in opts
        quiet = '--quiet' in opts
        out = portage.output.EOutput(quiet=quiet)
        syncuri = self.repo.sync_uri
        if self.repo.module_specific_options.get('sync-rsync-vcs-ignore',
                                                 'false').lower() == 'true':
            vcs_dirs = ()
        else:
            vcs_dirs = frozenset(VCS_DIRS)
            vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))

        for vcs_dir in vcs_dirs:
            writemsg_level(("!!! %s appears to be under revision " + \
             "control (contains %s).\n!!! Aborting rsync sync "
             "(override with \"sync-rsync-vcs-ignore = true\" in repos.conf).\n") % \
             (self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
            return (1, False)
        self.timeout = 180

        rsync_opts = []
        if self.settings["PORTAGE_RSYNC_OPTS"] == "":
            rsync_opts = self._set_rsync_defaults()
        else:
            rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
        self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)

        self.extra_rsync_opts = list()
        if self.repo.module_specific_options.get('sync-rsync-extra-opts'):
            self.extra_rsync_opts.extend(
                portage.util.shlex_split(
                    self.repo.module_specific_options['sync-rsync-extra-opts'])
            )

        exitcode = 0
        verify_failure = False

        # Process GLEP74 verification options.
        # Default verification to 'no'; it's enabled for ::gentoo
        # via default repos.conf though.
        self.verify_metamanifest = (self.repo.module_specific_options.get(
            'sync-rsync-verify-metamanifest', 'no') in ('yes', 'true'))
        # Support overriding job count.
        self.verify_jobs = self.repo.module_specific_options.get(
            'sync-rsync-verify-jobs', None)
        if self.verify_jobs is not None:
            try:
                self.verify_jobs = int(self.verify_jobs)
                if self.verify_jobs < 0:
                    raise ValueError(self.verify_jobs)
            except ValueError:
                writemsg_level(
                    "!!! sync-rsync-verify-jobs not a positive integer: %s\n" %
                    (self.verify_jobs, ),
                    level=logging.WARNING,
                    noiselevel=-1)
                self.verify_jobs = None
            else:
                if self.verify_jobs == 0:
                    # Use the apparent number of processors if gemato
                    # supports it.
                    self.verify_jobs = None
        # Support overriding max age.
        self.max_age = self.repo.module_specific_options.get(
            'sync-rsync-verify-max-age', '')
        if self.max_age:
            try:
                self.max_age = int(self.max_age)
                if self.max_age < 0:
                    raise ValueError(self.max_age)
            except ValueError:
                writemsg_level(
                    "!!! sync-rsync-max-age must be a non-negative integer: %s\n"
                    % (self.max_age, ),
                    level=logging.WARNING,
                    noiselevel=-1)
                self.max_age = 0
        else:
            self.max_age = 0

        openpgp_env = None
        if self.verify_metamanifest and gemato is not None:
            # Use isolated environment if key is specified,
            # system environment otherwise
            if self.repo.sync_openpgp_key_path is not None:
                openpgp_env = gemato.openpgp.OpenPGPEnvironment()
            else:
                openpgp_env = gemato.openpgp.OpenPGPSystemEnvironment()

        try:
            # Load and update the keyring early. If it fails, then verification
            # will not be performed and the user will have to fix it and try again,
            # so we may as well bail out before actual rsync happens.
            if openpgp_env is not None and self.repo.sync_openpgp_key_path is not None:
                try:
                    out.einfo('Using keys from %s' %
                              (self.repo.sync_openpgp_key_path, ))
                    with io.open(self.repo.sync_openpgp_key_path, 'rb') as f:
                        openpgp_env.import_key(f)
                    self._refresh_keys(openpgp_env)
                except (GematoException, asyncio.TimeoutError) as e:
                    writemsg_level(
                        "!!! Manifest verification impossible due to keyring problem:\n%s\n"
                        % (e, ),
                        level=logging.ERROR,
                        noiselevel=-1)
                    return (1, False)

            # Real local timestamp file.
            self.servertimestampfile = os.path.join(self.repo.location,
                                                    "metadata",
                                                    "timestamp.chk")

            content = portage.util.grabfile(self.servertimestampfile)
            timestamp = 0
            if content:
                try:
                    timestamp = time.mktime(
                        time.strptime(content[0], TIMESTAMP_FORMAT))
                except (OverflowError, ValueError):
                    pass
            del content

            try:
                self.rsync_initial_timeout = \
                 int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
            except ValueError:
                self.rsync_initial_timeout = 15

            try:
                maxretries = int(self.settings["PORTAGE_RSYNC_RETRIES"])
            except SystemExit as e:
                raise  # Needed else can't exit
            except:
                maxretries = -1  #default number of retries

            if syncuri.startswith("file://"):
                self.proto = "file"
                dosyncuri = syncuri[7:]
                unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
                    dosyncuri, timestamp, opts)
                self._process_exitcode(exitcode, dosyncuri, out, 1)
                if exitcode == 0:
                    if unchanged:
                        self.repo_storage.abort_update()
                    else:
                        self.repo_storage.commit_update()
                        self.repo_storage.garbage_collection()
                return (exitcode, updatecache_flg)

            retries = 0
            try:
                self.proto, user_name, hostname, port = re.split(
                    r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
                    syncuri,
                    maxsplit=4)[1:5]
            except ValueError:
                writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
                               noiselevel=-1,
                               level=logging.ERROR)
                return (1, False)

            self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")

            if port is None:
                port = ""
            if user_name is None:
                user_name = ""
            if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
                getaddrinfo_host = hostname
            else:
                # getaddrinfo needs the brackets stripped
                getaddrinfo_host = hostname[1:-1]
            updatecache_flg = False
            all_rsync_opts = set(self.rsync_opts)
            all_rsync_opts.update(self.extra_rsync_opts)

            family = socket.AF_UNSPEC
            if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
                family = socket.AF_INET
            elif socket.has_ipv6 and \
             ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
                family = socket.AF_INET6

            addrinfos = None
            uris = []

            try:
                addrinfos = getaddrinfo_validate(
                    socket.getaddrinfo(getaddrinfo_host, None, family,
                                       socket.SOCK_STREAM))
            except socket.error as e:
                writemsg_level("!!! getaddrinfo failed for '%s': %s\n" %
                               (_unicode_decode(hostname), _unicode(e)),
                               noiselevel=-1,
                               level=logging.ERROR)

            if addrinfos:

                AF_INET = socket.AF_INET
                AF_INET6 = None
                if socket.has_ipv6:
                    AF_INET6 = socket.AF_INET6

                ips_v4 = []
                ips_v6 = []

                for addrinfo in addrinfos:
                    if addrinfo[0] == AF_INET:
                        ips_v4.append("%s" % addrinfo[4][0])
                    elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
                        # IPv6 addresses need to be enclosed in square brackets
                        ips_v6.append("[%s]" % addrinfo[4][0])

                random.shuffle(ips_v4)
                random.shuffle(ips_v6)

                # Give priority to the address family that
                # getaddrinfo() returned first.
                if AF_INET6 is not None and addrinfos and \
                 addrinfos[0][0] == AF_INET6:
                    ips = ips_v6 + ips_v4
                else:
                    ips = ips_v4 + ips_v6

                for ip in ips:
                    uris.append(
                        syncuri.replace(
                            "//" + user_name + hostname + port + "/",
                            "//" + user_name + ip + port + "/", 1))

            if not uris:
                # With some configurations we need to use the plain hostname
                # rather than try to resolve the ip addresses (bug #340817).
                uris.append(syncuri)

            # reverse, for use with pop()
            uris.reverse()
            uris_orig = uris[:]

            effective_maxretries = maxretries
            if effective_maxretries < 0:
                effective_maxretries = len(uris) - 1

            local_state_unchanged = True
            while (1):
                if uris:
                    dosyncuri = uris.pop()
                elif maxretries < 0 or retries > maxretries:
                    writemsg("!!! Exhausted addresses for %s\n" %
                             _unicode_decode(hostname),
                             noiselevel=-1)
                    return (1, False)
                else:
                    uris.extend(uris_orig)
                    dosyncuri = uris.pop()

                if (retries == 0):
                    if "--ask" in opts:
                        uq = UserQuery(opts)
                        if uq.query("Do you want to sync your ebuild repository " + \
                         "with the mirror at\n" + blue(dosyncuri) + bold("?"),
                         enter_invalid) == "No":
                            print()
                            print("Quitting.")
                            print()
                            sys.exit(128 + signal.SIGINT)
                    self.logger(self.xterm_titles,
                                ">>> Starting rsync with " + dosyncuri)
                    if "--quiet" not in opts:
                        print(">>> Starting rsync with " + dosyncuri + "...")
                else:
                    self.logger(self.xterm_titles,
                     ">>> Starting retry %d of %d with %s" % \
                      (retries, effective_maxretries, dosyncuri))
                    writemsg_stdout(
                     "\n\n>>> Starting retry %d of %d with %s\n" % \
                     (retries, effective_maxretries, dosyncuri), noiselevel=-1)

                if dosyncuri.startswith('ssh://'):
                    dosyncuri = dosyncuri[6:].replace('/', ':/', 1)

                unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
                    dosyncuri, timestamp, opts)
                if not unchanged:
                    local_state_unchanged = False
                if is_synced:
                    break

                retries = retries + 1

                if maxretries < 0 or retries <= maxretries:
                    print(">>> Retrying...")
                else:
                    # over retries
                    # exit loop
                    exitcode = EXCEEDED_MAX_RETRIES
                    break

            self._process_exitcode(exitcode, dosyncuri, out, maxretries)

            if local_state_unchanged:
                # The quarantine download_dir is not intended to exist
                # in this case, so refer gemato to the normal repository
                # location.
                download_dir = self.repo.location
            else:
                download_dir = self.download_dir

            # if synced successfully, verify now
            if exitcode == 0 and self.verify_metamanifest:
                if gemato is None:
                    writemsg_level(
                        "!!! Unable to verify: gemato-11.0+ is required\n",
                        level=logging.ERROR,
                        noiselevel=-1)
                    exitcode = 127
                else:
                    try:
                        # we always verify the Manifest signature, in case
                        # we had to deal with key revocation case
                        m = gemato.recursiveloader.ManifestRecursiveLoader(
                            os.path.join(download_dir, 'Manifest'),
                            verify_openpgp=True,
                            openpgp_env=openpgp_env,
                            max_jobs=self.verify_jobs)
                        if not m.openpgp_signed:
                            raise RuntimeError(
                                'OpenPGP signature not found on Manifest')

                        ts = m.find_timestamp()
                        if ts is None:
                            raise RuntimeError(
                                'Timestamp not found in Manifest')
                        if (self.max_age != 0
                                and (datetime.datetime.utcnow() - ts.ts).days >
                                self.max_age):
                            out.quiet = False
                            out.ewarn(
                                'Manifest is over %d days old, this is suspicious!'
                                % (self.max_age, ))
                            out.ewarn(
                                'You may want to try using another mirror and/or reporting this one:'
                            )
                            out.ewarn('  %s' % (dosyncuri, ))
                            out.ewarn('')
                            out.quiet = quiet

                        out.einfo('Manifest timestamp: %s UTC' % (ts.ts, ))
                        out.einfo('Valid OpenPGP signature found:')
                        out.einfo(
                            '- primary key: %s' %
                            (m.openpgp_signature.primary_key_fingerprint))
                        out.einfo('- subkey: %s' %
                                  (m.openpgp_signature.fingerprint))
                        out.einfo('- timestamp: %s UTC' %
                                  (m.openpgp_signature.timestamp))

                        # if nothing has changed, skip the actual Manifest
                        # verification
                        if not local_state_unchanged:
                            out.ebegin('Verifying %s' % (download_dir, ))
                            m.assert_directory_verifies()
                            out.eend(0)
                    except GematoException as e:
                        writemsg_level(
                            "!!! Manifest verification failed:\n%s\n" % (e, ),
                            level=logging.ERROR,
                            noiselevel=-1)
                        exitcode = 1
                        verify_failure = True

            if exitcode == 0 and not local_state_unchanged:
                self.repo_storage.commit_update()
                self.repo_storage.garbage_collection()

            return (exitcode, updatecache_flg)
        finally:
            # Don't delete the update if verification failed, in case
            # the cause needs to be investigated.
            if not verify_failure:
                self.repo_storage.abort_update()
            if openpgp_env is not None:
                openpgp_env.close()
コード例 #16
0
 def append(msg):
     writemsg_stdout(msg, noiselevel=-1)
コード例 #17
0
def digestgen(myarchives=None, mysettings=None, myportdb=None):
    """
	Generates a digest file if missing. Fetches files if necessary.
	NOTE: myarchives and mysettings used to be positional arguments,
		so their order must be preserved for backward compatibility.
	@param mysettings: the ebuild config (mysettings["O"] must correspond
		to the ebuild's parent directory)
	@type mysettings: config
	@param myportdb: a portdbapi instance
	@type myportdb: portdbapi
	@rtype: int
	@return: 1 on success and 0 on failure
	"""
    if mysettings is None or myportdb is None:
        raise TypeError(
            "portage.digestgen(): 'mysettings' and 'myportdb' parameter are required."
        )

    try:
        portage._doebuild_manifest_exempt_depend += 1
        distfiles_map = {}
        fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
        for cpv in fetchlist_dict:
            try:
                for myfile in fetchlist_dict[cpv]:
                    distfiles_map.setdefault(myfile, []).append(cpv)
            except InvalidDependString as e:
                writemsg("!!! %s\n" % str(e), noiselevel=-1)
                del e
                return 0
        mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
        try:
            mf = mysettings.repositories.get_repo_for_location(mytree)
        except KeyError:
            # backward compatibility
            mytree = os.path.realpath(mytree)
            mf = mysettings.repositories.get_repo_for_location(mytree)

        repo_required_hashes = mf.manifest_required_hashes
        if repo_required_hashes is None:
            repo_required_hashes = MANIFEST2_HASH_DEFAULTS
        mf = mf.load_manifest(mysettings["O"],
                              mysettings["DISTDIR"],
                              fetchlist_dict=fetchlist_dict)

        if not mf.allow_create:
            writemsg_stdout(
                _(">>> Skipping creating Manifest for %s; "
                  "repository is configured to not use them\n") %
                mysettings["O"])
            return 1

        # Don't require all hashes since that can trigger excessive
        # fetches when sufficient digests already exist.  To ease transition
        # while Manifest 1 is being removed, only require hashes that will
        # exist before and after the transition.
        required_hash_types = set()
        required_hash_types.add("size")
        required_hash_types.update(repo_required_hashes)
        dist_hashes = mf.fhashdict.get("DIST", {})

        # To avoid accidental regeneration of digests with the incorrect
        # files (such as partially downloaded files), trigger the fetch
        # code if the file exists and it's size doesn't match the current
        # manifest entry. If there really is a legitimate reason for the
        # digest to change, `ebuild --force digest` can be used to avoid
        # triggering this code (or else the old digests can be manually
        # removed from the Manifest).
        missing_files = []
        for myfile in distfiles_map:
            myhashes = dist_hashes.get(myfile)
            if not myhashes:
                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None
                if st is None or st.st_size == 0:
                    missing_files.append(myfile)
                continue
            size = myhashes.get("size")

            try:
                st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
            except OSError as e:
                if e.errno != errno.ENOENT:
                    raise
                del e
                if size == 0:
                    missing_files.append(myfile)
                    continue
                if required_hash_types.difference(myhashes):
                    missing_files.append(myfile)
                    continue
            else:
                if st.st_size == 0 or size is not None and size != st.st_size:
                    missing_files.append(myfile)
                    continue

        for myfile in missing_files:
            uris = set()
            all_restrict = set()
            for cpv in distfiles_map[myfile]:
                uris.update(myportdb.getFetchMap(cpv, mytree=mytree)[myfile])
                restrict = myportdb.aux_get(cpv, ['RESTRICT'],
                                            mytree=mytree)[0]
                # Here we ignore conditional parts of RESTRICT since
                # they don't apply unconditionally. Assume such
                # conditionals only apply on the client side where
                # digestgen() does not need to be called.
                all_restrict.update(
                    use_reduce(restrict, flat=True, matchnone=True))

                # fetch() uses CATEGORY and PF to display a message
                # when fetch restriction is triggered.
                cat, pf = catsplit(cpv)
                mysettings["CATEGORY"] = cat
                mysettings["PF"] = pf

            # fetch() uses PORTAGE_RESTRICT to control fetch
            # restriction, which is only applied to files that
            # are not fetchable via a mirror:// URI.
            mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)

            try:
                st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
            except OSError:
                st = None

            if not fetch({myfile: uris}, mysettings):
                myebuild = os.path.join(mysettings["O"],
                                        catsplit(cpv)[1] + ".ebuild")
                spawn_nofetch(myportdb, myebuild)
                writemsg(
                    _("!!! Fetch failed for %s, can't update Manifest\n") %
                    myfile,
                    noiselevel=-1)
                if myfile in dist_hashes and \
                 st is not None and st.st_size > 0:
                    # stat result is obtained before calling fetch(),
                    # since fetch may rename the existing file if the
                    # digest does not match.
                    cmd = colorize(
                        "INFORM", "ebuild --force %s manifest" %
                        os.path.basename(myebuild))
                    writemsg((_(
                        "!!! If you would like to forcefully replace the existing Manifest entry\n"
                        "!!! for %s, use the following command:\n") % myfile) +
                             "!!!    %s\n" % cmd,
                             noiselevel=-1)
                return 0

        writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
        try:
            mf.create(assumeDistHashesSometimes=True,
                      assumeDistHashesAlways=("assume-digests"
                                              in mysettings.features))
        except FileNotFound as e:
            writemsg(_("!!! File %s doesn't exist, can't update Manifest\n") %
                     e,
                     noiselevel=-1)
            return 0
        except PortagePackageException as e:
            writemsg(("!!! %s\n") % (e, ), noiselevel=-1)
            return 0
        try:
            mf.write(sign=False)
        except PermissionDenied as e:
            writemsg(_("!!! Permission Denied: %s\n") % (e, ), noiselevel=-1)
            return 0
        if "assume-digests" not in mysettings.features:
            distlist = list(mf.fhashdict.get("DIST", {}))
            distlist.sort()
            auto_assumed = []
            for filename in distlist:
                if not os.path.exists(
                        os.path.join(mysettings["DISTDIR"], filename)):
                    auto_assumed.append(filename)
            if auto_assumed:
                cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
                pkgs = myportdb.cp_list(cp, mytree=mytree)
                pkgs.sort()
                writemsg_stdout("  digest.assumed" +
                                colorize("WARN",
                                         str(len(auto_assumed)).rjust(18)) +
                                "\n")
                for pkg_key in pkgs:
                    fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
                    pv = pkg_key.split("/")[1]
                    for filename in auto_assumed:
                        if filename in fetchlist:
                            writemsg_stdout("   %s::%s\n" % (pv, filename))
        return 1
    finally:
        portage._doebuild_manifest_exempt_depend -= 1
コード例 #18
0
					def onUpdate(maxval, curval):
						if curval > 0:
							writemsg_stdout("*")
コード例 #19
0
ファイル: utilities.py プロジェクト: rrozestw/portage
def UpdateChangeLog(
	pkgdir, user, msg, skel_path, category, package,
	new=(), removed=(), changed=(), pretend=False, quiet=False):
	"""
	Write an entry to an existing ChangeLog, or create a new one.
	Updates copyright year on changed files, and updates the header of
	ChangeLog with the contents of skel.ChangeLog.
	"""

	if '<root@' in user:
		if not quiet:
			logging.critical('Please set ECHANGELOG_USER or run as non-root')
		return None

	# ChangeLog times are in UTC
	gmtime = time.gmtime()
	year = time.strftime('%Y', gmtime)
	date = time.strftime('%d %b %Y', gmtime)

	# check modified files and the ChangeLog for copyright updates
	# patches and diffs (identified by .patch and .diff) are excluded
	for fn in chain(new, changed):
		if fn.endswith('.diff') or fn.endswith('.patch'):
			continue
		update_copyright(os.path.join(pkgdir, fn), year, pretend=pretend)

	cl_path = os.path.join(pkgdir, 'ChangeLog')
	clold_lines = []
	clnew_lines = []
	old_header_lines = []
	header_lines = []

	clold_file = None
	try:
		clold_file = io.open(_unicode_encode(
			cl_path, encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['repo.content'], errors='replace')
	except EnvironmentError:
		pass

	f, clnew_path = mkstemp()

	# construct correct header first
	try:
		if clold_file is not None:
			# retain header from old ChangeLog
			first_line = True
			for line in clold_file:
				line_strip = line.strip()
				if line_strip and line[:1] != "#":
					clold_lines.append(line)
					break
				# always make sure cat/pkg is up-to-date in case we are
				# moving packages around, or copied from another pkg, or ...
				if first_line:
					if line.startswith('# ChangeLog for'):
						line = '# ChangeLog for %s/%s\n' % (category, package)
					first_line = False
				old_header_lines.append(line)
				header_lines.append(update_copyright_year(year, line))
				if not line_strip:
					break

		clskel_file = None
		if not header_lines:
			# delay opening this until we find we need a header
			try:
				clskel_file = io.open(_unicode_encode(
					skel_path, encoding=_encodings['fs'], errors='strict'),
					mode='r', encoding=_encodings['repo.content'],
					errors='replace')
			except EnvironmentError:
				pass

		if clskel_file is not None:
			# read skel.ChangeLog up to first empty line
			for line in clskel_file:
				line_strip = line.strip()
				if not line_strip:
					break
				line = line.replace('<CATEGORY>', category)
				line = line.replace('<PACKAGE_NAME>', package)
				line = update_copyright_year(year, line)
				header_lines.append(line)
			header_lines.append('\n')
			clskel_file.close()

		# write new ChangeLog entry
		clnew_lines.extend(header_lines)
		newebuild = False
		for fn in new:
			if not fn.endswith('.ebuild'):
				continue
			ebuild = fn.split(os.sep)[-1][0:-7]
			clnew_lines.append('*%s (%s)\n' % (ebuild, date))
			newebuild = True
		if newebuild:
			clnew_lines.append('\n')
		trivial_files = ('ChangeLog', 'Manifest')
		display_new = [
			'+' + elem
			for elem in new
			if elem not in trivial_files]
		display_removed = [
			'-' + elem
			for elem in removed]
		display_changed = [
			elem for elem in changed
			if elem not in trivial_files]
		if not (display_new or display_removed or display_changed):
			# If there's nothing else to display, show one of the
			# trivial files.
			for fn in trivial_files:
				if fn in new:
					display_new = ['+' + fn]
					break
				elif fn in changed:
					display_changed = [fn]
					break

		display_new.sort()
		display_removed.sort()
		display_changed.sort()

		mesg = '%s; %s %s:' % (date, user, ', '.join(chain(
			display_new, display_removed, display_changed)))
		for line in textwrap.wrap(
			mesg, 80, initial_indent='  ', subsequent_indent='  ',
			break_on_hyphens=False):
			clnew_lines.append('%s\n' % line)
		for line in textwrap.wrap(
			msg, 80, initial_indent='  ', subsequent_indent='  '):
			clnew_lines.append('%s\n' % line)
		# Don't append a trailing newline if the file is new.
		if clold_file is not None:
			clnew_lines.append('\n')

		f = io.open(
			f, mode='w', encoding=_encodings['repo.content'],
			errors='backslashreplace')

		for line in clnew_lines:
			f.write(line)

		# append stuff from old ChangeLog
		if clold_file is not None:

			if clold_lines:
				# clold_lines may contain a saved non-header line
				# that we want to write first.
				# Also, append this line to clnew_lines so that the
				# unified_diff call doesn't show it as removed.
				for line in clold_lines:
					f.write(line)
					clnew_lines.append(line)

			else:
				# ensure that there is no more than one blank
				# line after our new entry
				for line in clold_file:
					if line.strip():
						f.write(line)
						break

			# Now prepend old_header_lines to clold_lines, for use
			# in the unified_diff call below.
			clold_lines = old_header_lines + clold_lines

			# Trim any trailing newlines.
			lines = clold_file.readlines()
			clold_file.close()
			while lines and lines[-1] == '\n':
				del lines[-1]
			f.writelines(lines)
		f.close()

		# show diff
		if not quiet:
			for line in difflib.unified_diff(
				clold_lines, clnew_lines,
				fromfile=cl_path, tofile=cl_path, n=0):
				util.writemsg_stdout(line, noiselevel=-1)
			util.writemsg_stdout("\n", noiselevel=-1)

		if pretend:
			# remove what we've done
			os.remove(clnew_path)
		else:
			# rename to ChangeLog, and set permissions
			try:
				clold_stat = os.stat(cl_path)
			except OSError:
				clold_stat = None

			shutil.move(clnew_path, cl_path)

			if clold_stat is None:
				util.apply_permissions(cl_path, mode=0o644)
			else:
				util.apply_stat_permissions(cl_path, clold_stat)

		if clold_file is None:
			return True
		else:
			return False
	except IOError as e:
		err = 'Repoman is unable to create/write to Changelog.new file: %s' % (e,)
		logging.critical(err)
		# try to remove if possible
		try:
			os.remove(clnew_path)
		except OSError:
			pass
		return None
コード例 #20
0
ファイル: utilities.py プロジェクト: clickbeetle/portage-cb
def update_copyright(fn_path, year, pretend=False):
    """
	Check file for a Copyright statement, and update its year.  The
	patterns used for replacing copyrights are taken from echangelog.
	Only the first lines of each file that start with a hash ('#') are
	considered, until a line is found that doesn't start with a hash.
	Files are read and written in binary mode, so that this function
	will work correctly with files encoded in any character set, as
	long as the copyright statements consist of plain ASCII.
	"""

    try:
        fn_hdl = io.open(_unicode_encode(fn_path,
                                         encoding=_encodings['fs'],
                                         errors='strict'),
                         mode='rb')
    except EnvironmentError:
        return

    orig_header = []
    new_header = []

    for line in fn_hdl:
        line_strip = line.strip()
        orig_header.append(line)
        if not line_strip or line_strip[:1] != b'#':
            new_header.append(line)
            break

        line = _update_copyright_year(year, line)
        new_header.append(line)

    difflines = 0
    for line in difflib.unified_diff(
        [_unicode_decode(line) for line in orig_header],
        [_unicode_decode(line) for line in new_header],
            fromfile=fn_path,
            tofile=fn_path,
            n=0):
        util.writemsg_stdout(line, noiselevel=-1)
        difflines += 1
    util.writemsg_stdout("\n", noiselevel=-1)

    # unified diff has three lines to start with
    if difflines > 3 and not pretend:
        # write new file with changed header
        f, fnnew_path = mkstemp()
        f = io.open(f, mode='wb')
        for line in new_header:
            f.write(line)
        for line in fn_hdl:
            f.write(line)
        f.close()
        try:
            fn_stat = os.stat(fn_path)
        except OSError:
            fn_stat = None

        shutil.move(fnnew_path, fn_path)

        if fn_stat is None:
            util.apply_permissions(fn_path, mode=0o644)
        else:
            util.apply_stat_permissions(fn_path, fn_stat)
    fn_hdl.close()
コード例 #21
0
ファイル: rsync.py プロジェクト: gentoo/portage
	def update(self):
		'''Internal update function which performs the transfer'''
		opts = self.options.get('emerge_config').opts
		self.usersync_uid = self.options.get('usersync_uid', None)
		enter_invalid = '--ask-enter-invalid' in opts
		quiet = '--quiet' in opts
		out = portage.output.EOutput(quiet=quiet)
		syncuri = self.repo.sync_uri
		if self.repo.module_specific_options.get(
			'sync-rsync-vcs-ignore', 'false').lower() == 'true':
			vcs_dirs = ()
		else:
			vcs_dirs = frozenset(VCS_DIRS)
			vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))

		for vcs_dir in vcs_dirs:
			writemsg_level(("!!! %s appears to be under revision " + \
				"control (contains %s).\n!!! Aborting rsync sync "
				"(override with \"sync-rsync-vcs-ignore = true\" in repos.conf).\n") % \
				(self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
			return (1, False)
		self.timeout=180

		rsync_opts = []
		if self.settings["PORTAGE_RSYNC_OPTS"] == "":
			rsync_opts = self._set_rsync_defaults()
		else:
			rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
		self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)

		self.extra_rsync_opts = list()
		if self.repo.module_specific_options.get('sync-rsync-extra-opts'):
			self.extra_rsync_opts.extend(portage.util.shlex_split(
				self.repo.module_specific_options['sync-rsync-extra-opts']))

		exitcode = 0
		verify_failure = False

		# Process GLEP74 verification options.
		# Default verification to 'no'; it's enabled for ::gentoo
		# via default repos.conf though.
		self.verify_metamanifest = (
				self.repo.module_specific_options.get(
					'sync-rsync-verify-metamanifest', 'no') in ('yes', 'true'))
		# Support overriding job count.
		self.verify_jobs = self.repo.module_specific_options.get(
				'sync-rsync-verify-jobs', None)
		if self.verify_jobs is not None:
			try:
				self.verify_jobs = int(self.verify_jobs)
				if self.verify_jobs < 0:
					raise ValueError(self.verify_jobs)
			except ValueError:
				writemsg_level("!!! sync-rsync-verify-jobs not a positive integer: %s\n" % (self.verify_jobs,),
					level=logging.WARNING, noiselevel=-1)
				self.verify_jobs = None
			else:
				if self.verify_jobs == 0:
					# Use the apparent number of processors if gemato
					# supports it.
					self.verify_jobs = None
		# Support overriding max age.
		self.max_age = self.repo.module_specific_options.get(
				'sync-rsync-verify-max-age', '')
		if self.max_age:
			try:
				self.max_age = int(self.max_age)
				if self.max_age < 0:
					raise ValueError(self.max_age)
			except ValueError:
				writemsg_level("!!! sync-rsync-max-age must be a non-negative integer: %s\n" % (self.max_age,),
					level=logging.WARNING, noiselevel=-1)
				self.max_age = 0
		else:
			self.max_age = 0

		openpgp_env = None
		if self.verify_metamanifest and gemato is not None:
			# Use isolated environment if key is specified,
			# system environment otherwise
			if self.repo.sync_openpgp_key_path is not None:
				openpgp_env = gemato.openpgp.OpenPGPEnvironment()
			else:
				openpgp_env = gemato.openpgp.OpenPGPSystemEnvironment()

		try:
			# Load and update the keyring early. If it fails, then verification
			# will not be performed and the user will have to fix it and try again,
			# so we may as well bail out before actual rsync happens.
			if openpgp_env is not None and self.repo.sync_openpgp_key_path is not None:
				try:
					out.einfo('Using keys from %s' % (self.repo.sync_openpgp_key_path,))
					with io.open(self.repo.sync_openpgp_key_path, 'rb') as f:
						openpgp_env.import_key(f)
					self._refresh_keys(openpgp_env)
				except (GematoException, asyncio.TimeoutError) as e:
					writemsg_level("!!! Manifest verification impossible due to keyring problem:\n%s\n"
							% (e,),
							level=logging.ERROR, noiselevel=-1)
					return (1, False)

			# Real local timestamp file.
			self.servertimestampfile = os.path.join(
				self.repo.location, "metadata", "timestamp.chk")

			content = portage.util.grabfile(self.servertimestampfile)
			timestamp = 0
			if content:
				try:
					timestamp = time.mktime(time.strptime(content[0],
						TIMESTAMP_FORMAT))
				except (OverflowError, ValueError):
					pass
			del content

			try:
				self.rsync_initial_timeout = \
					int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
			except ValueError:
				self.rsync_initial_timeout = 15

			try:
				maxretries=int(self.settings["PORTAGE_RSYNC_RETRIES"])
			except SystemExit as e:
				raise # Needed else can't exit
			except:
				maxretries = -1 #default number of retries

			if syncuri.startswith("file://"):
				self.proto = "file"
				dosyncuri = syncuri[7:]
				unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
					dosyncuri, timestamp, opts)
				self._process_exitcode(exitcode, dosyncuri, out, 1)
				if exitcode == 0:
					if unchanged:
						self.repo_storage.abort_update()
					else:
						self.repo_storage.commit_update()
						self.repo_storage.garbage_collection()
				return (exitcode, updatecache_flg)

			retries=0
			try:
				self.proto, user_name, hostname, port = re.split(
					r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
					syncuri, maxsplit=4)[1:5]
			except ValueError:
				writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
					noiselevel=-1, level=logging.ERROR)
				return (1, False)

			self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")

			if port is None:
				port=""
			if user_name is None:
				user_name=""
			if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
				getaddrinfo_host = hostname
			else:
				# getaddrinfo needs the brackets stripped
				getaddrinfo_host = hostname[1:-1]
			updatecache_flg = False
			all_rsync_opts = set(self.rsync_opts)
			all_rsync_opts.update(self.extra_rsync_opts)

			family = socket.AF_UNSPEC
			if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
				family = socket.AF_INET
			elif socket.has_ipv6 and \
				("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
				family = socket.AF_INET6

			addrinfos = None
			uris = []

			try:
				addrinfos = getaddrinfo_validate(
					socket.getaddrinfo(getaddrinfo_host, None,
					family, socket.SOCK_STREAM))
			except socket.error as e:
				writemsg_level(
					"!!! getaddrinfo failed for '%s': %s\n"
					% (_unicode_decode(hostname), _unicode(e)),
					noiselevel=-1, level=logging.ERROR)

			if addrinfos:

				AF_INET = socket.AF_INET
				AF_INET6 = None
				if socket.has_ipv6:
					AF_INET6 = socket.AF_INET6

				ips_v4 = []
				ips_v6 = []

				for addrinfo in addrinfos:
					if addrinfo[0] == AF_INET:
						ips_v4.append("%s" % addrinfo[4][0])
					elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
						# IPv6 addresses need to be enclosed in square brackets
						ips_v6.append("[%s]" % addrinfo[4][0])

				random.shuffle(ips_v4)
				random.shuffle(ips_v6)

				# Give priority to the address family that
				# getaddrinfo() returned first.
				if AF_INET6 is not None and addrinfos and \
					addrinfos[0][0] == AF_INET6:
					ips = ips_v6 + ips_v4
				else:
					ips = ips_v4 + ips_v6

				for ip in ips:
					uris.append(syncuri.replace(
						"//" + user_name + hostname + port + "/",
						"//" + user_name + ip + port + "/", 1))

			if not uris:
				# With some configurations we need to use the plain hostname
				# rather than try to resolve the ip addresses (bug #340817).
				uris.append(syncuri)

			# reverse, for use with pop()
			uris.reverse()
			uris_orig = uris[:]

			effective_maxretries = maxretries
			if effective_maxretries < 0:
				effective_maxretries = len(uris) - 1

			local_state_unchanged = True
			while (1):
				if uris:
					dosyncuri = uris.pop()
				elif maxretries < 0 or retries > maxretries:
					writemsg("!!! Exhausted addresses for %s\n"
						% _unicode_decode(hostname), noiselevel=-1)
					return (1, False)
				else:
					uris.extend(uris_orig)
					dosyncuri = uris.pop()

				if (retries==0):
					if "--ask" in opts:
						uq = UserQuery(opts)
						if uq.query("Do you want to sync your ebuild repository " + \
							"with the mirror at\n" + blue(dosyncuri) + bold("?"),
							enter_invalid) == "No":
							print()
							print("Quitting.")
							print()
							sys.exit(128 + signal.SIGINT)
					self.logger(self.xterm_titles,
						">>> Starting rsync with " + dosyncuri)
					if "--quiet" not in opts:
						print(">>> Starting rsync with "+dosyncuri+"...")
				else:
					self.logger(self.xterm_titles,
						">>> Starting retry %d of %d with %s" % \
							(retries, effective_maxretries, dosyncuri))
					writemsg_stdout(
						"\n\n>>> Starting retry %d of %d with %s\n" % \
						(retries, effective_maxretries, dosyncuri), noiselevel=-1)

				if dosyncuri.startswith('ssh://'):
					dosyncuri = dosyncuri[6:].replace('/', ':/', 1)

				unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
					dosyncuri, timestamp, opts)
				if not unchanged:
					local_state_unchanged = False
				if is_synced:
					break

				retries=retries+1

				if maxretries < 0 or retries <= maxretries:
					print(">>> Retrying...")
				else:
					# over retries
					# exit loop
					exitcode = EXCEEDED_MAX_RETRIES
					break

			self._process_exitcode(exitcode, dosyncuri, out, maxretries)

			if local_state_unchanged:
				# The quarantine download_dir is not intended to exist
				# in this case, so refer gemato to the normal repository
				# location.
				download_dir = self.repo.location
			else:
				download_dir = self.download_dir

			# if synced successfully, verify now
			if exitcode == 0 and self.verify_metamanifest:
				if gemato is None:
					writemsg_level("!!! Unable to verify: gemato-11.0+ is required\n",
						level=logging.ERROR, noiselevel=-1)
					exitcode = 127
				else:
					try:
						# we always verify the Manifest signature, in case
						# we had to deal with key revocation case
						m = gemato.recursiveloader.ManifestRecursiveLoader(
								os.path.join(download_dir, 'Manifest'),
								verify_openpgp=True,
								openpgp_env=openpgp_env,
								max_jobs=self.verify_jobs)
						if not m.openpgp_signed:
							raise RuntimeError('OpenPGP signature not found on Manifest')

						ts = m.find_timestamp()
						if ts is None:
							raise RuntimeError('Timestamp not found in Manifest')
						if (self.max_age != 0 and
								(datetime.datetime.utcnow() - ts.ts).days > self.max_age):
							out.quiet = False
							out.ewarn('Manifest is over %d days old, this is suspicious!' % (self.max_age,))
							out.ewarn('You may want to try using another mirror and/or reporting this one:')
							out.ewarn('  %s' % (dosyncuri,))
							out.ewarn('')
							out.quiet = quiet

						out.einfo('Manifest timestamp: %s UTC' % (ts.ts,))
						out.einfo('Valid OpenPGP signature found:')
						out.einfo('- primary key: %s' % (
							m.openpgp_signature.primary_key_fingerprint))
						out.einfo('- subkey: %s' % (
							m.openpgp_signature.fingerprint))
						out.einfo('- timestamp: %s UTC' % (
							m.openpgp_signature.timestamp))

						# if nothing has changed, skip the actual Manifest
						# verification
						if not local_state_unchanged:
							out.ebegin('Verifying %s' % (download_dir,))
							m.assert_directory_verifies()
							out.eend(0)
					except GematoException as e:
						writemsg_level("!!! Manifest verification failed:\n%s\n"
								% (e,),
								level=logging.ERROR, noiselevel=-1)
						exitcode = 1
						verify_failure = True

			if exitcode == 0 and not local_state_unchanged:
				self.repo_storage.commit_update()
				self.repo_storage.garbage_collection()

			return (exitcode, updatecache_flg)
		finally:
			# Don't delete the update if verification failed, in case
			# the cause needs to be investigated.
			if not verify_failure:
				self.repo_storage.abort_update()
			if openpgp_env is not None:
				openpgp_env.close()
コード例 #22
0
def pkgmerge(mytbz2, myroot, mysettings, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None):
    """will merge a .tbz2 file, returning a list of runtime dependencies
		that must be satisfied, or None if there was a merge error.	This
		code assumes the package exists."""

    warnings.warn("portage.pkgmerge() is deprecated", DeprecationWarning, stacklevel=2)

    if mydbapi is None:
        mydbapi = portage.db[myroot]["bintree"].dbapi
    if vartree is None:
        vartree = portage.db[myroot]["vartree"]
    if mytbz2[-5:] != ".tbz2":
        print(_("!!! Not a .tbz2 file"))
        return 1

    tbz2_lock = None
    mycat = None
    mypkg = None
    did_merge_phase = False
    success = False
    try:
        """ Don't lock the tbz2 file because the filesytem could be readonly or
		shared by a cluster."""
        # tbz2_lock = portage.locks.lockfile(mytbz2, wantnewlockfile=1)

        mypkg = os.path.basename(mytbz2)[:-5]
        xptbz2 = portage.xpak.tbz2(mytbz2)
        mycat = xptbz2.getfile(_unicode_encode("CATEGORY", encoding=_encodings["repo.content"]))
        if not mycat:
            writemsg(_("!!! CATEGORY info missing from info chunk, aborting...\n"), noiselevel=-1)
            return 1
        mycat = _unicode_decode(mycat, encoding=_encodings["repo.content"], errors="replace")
        mycat = mycat.strip()

        # These are the same directories that would be used at build time.
        builddir = os.path.join(mysettings["PORTAGE_TMPDIR"], "portage", mycat, mypkg)
        catdir = os.path.dirname(builddir)
        pkgloc = os.path.join(builddir, "image")
        infloc = os.path.join(builddir, "build-info")
        myebuild = os.path.join(infloc, os.path.basename(mytbz2)[:-4] + "ebuild")
        portage.util.ensure_dirs(os.path.dirname(catdir), uid=portage_uid, gid=portage_gid, mode=0o70, mask=0)
        portage.util.ensure_dirs(catdir, uid=portage_uid, gid=portage_gid, mode=0o70, mask=0)
        try:
            shutil.rmtree(builddir)
        except (IOError, OSError) as e:
            if e.errno != errno.ENOENT:
                raise
            del e
        for mydir in (builddir, pkgloc, infloc):
            portage.util.ensure_dirs(mydir, uid=portage_uid, gid=portage_gid, mode=0o755)
        writemsg_stdout(_(">>> Extracting info\n"))
        xptbz2.unpackinfo(infloc)
        mysettings.setcpv(mycat + "/" + mypkg, mydb=mydbapi)
        # Store the md5sum in the vdb.
        fp = open(_unicode_encode(os.path.join(infloc, "BINPKGMD5")), "w")
        fp.write(str(portage.checksum.perform_md5(mytbz2)) + "\n")
        fp.close()

        # This gives bashrc users an opportunity to do various things
        # such as remove binary packages after they're installed.
        mysettings["PORTAGE_BINPKG_FILE"] = mytbz2
        mysettings.backup_changes("PORTAGE_BINPKG_FILE")
        debug = mysettings.get("PORTAGE_DEBUG", "") == "1"

        # Eventually we'd like to pass in the saved ebuild env here.
        retval = portage.doebuild(
            myebuild, "setup", myroot, mysettings, debug=debug, tree="bintree", mydbapi=mydbapi, vartree=vartree
        )
        if retval != os.EX_OK:
            writemsg(_("!!! Setup failed: %s\n") % retval, noiselevel=-1)
            return retval

        writemsg_stdout(_(">>> Extracting %s\n") % mypkg)
        retval = portage.process.spawn_bash(
            "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc), env=mysettings.environ()
        )
        if retval != os.EX_OK:
            writemsg(_("!!! Error Extracting '%s'\n") % mytbz2, noiselevel=-1)
            return retval
            # portage.locks.unlockfile(tbz2_lock)
            # tbz2_lock = None

        mylink = portage.dblink(
            mycat, mypkg, myroot, mysettings, vartree=vartree, treetype="bintree", blockers=blockers
        )
        retval = mylink.merge(pkgloc, infloc, myroot, myebuild, cleanup=0, mydbapi=mydbapi, prev_mtimes=prev_mtimes)
        did_merge_phase = True
        success = retval == os.EX_OK
        return retval
    finally:
        mysettings.pop("PORTAGE_BINPKG_FILE", None)
        if tbz2_lock:
            portage.locks.unlockfile(tbz2_lock)
        if True:
            if not did_merge_phase:
                # The merge phase handles this already.  Callers don't know how
                # far this function got, so we have to call elog_process() here
                # so that it's only called once.
                from portage.elog import elog_process

                elog_process(mycat + "/" + mypkg, mysettings)
            try:
                if success:
                    shutil.rmtree(builddir)
            except (IOError, OSError) as e:
                if e.errno != errno.ENOENT:
                    raise
                del e
コード例 #23
0
    def output(self):
        """Outputs the results of the search."""
        msg = []
        msg.append("\b\b  \n[ Results for search key : " + \
         bold(self.searchkey) + " ]\n")
        msg.append("[ Applications found : " + \
         bold(str(self.mlen)) + " ]\n\n")
        vardb = self.vartree.dbapi
        for mtype in self.matches:
            for match, masked in self.matches[mtype]:
                full_package = None
                if mtype == "pkg":
                    catpack = match
                    full_package = self.portdb.xmatch("bestmatch-visible",
                                                      match)
                    if not full_package:
                        #no match found; we don't want to query description
                        masked = 1
                        full_package = portage.best(
                            self.portdb.xmatch("match-all", match))
                elif mtype == "desc":
                    full_package = match
                    match = portage.cpv_getkey(match)
                elif mtype == "set":
                    msg.append(green("*") + "  " + bold(match) + "\n")
                    if self.verbose:
                        msg.append("      " + darkgreen("Description:") + \
                         "   " + \
                         self.sdict[match].getMetadata("DESCRIPTION") \
                         + "\n\n")
                    writemsg_stdout(''.join(msg), noiselevel=-1)
                if full_package:
                    try:
                        desc, homepage, license = self.portdb.aux_get(
                            full_package,
                            ["DESCRIPTION", "HOMEPAGE", "LICENSE"])
                    except KeyError:
                        msg.append(
                            "emerge: search: aux_get() failed, skipping\n")
                        continue
                    if masked:
                        msg.append(green("*") + "  " + \
                         white(match) + " " + red("[ Masked ]") + "\n")
                    else:
                        msg.append(green("*") + "  " + bold(match) + "\n")
                    myversion = self.getVersion(full_package,
                                                search.VERSION_RELEASE)

                    mysum = [0, 0]
                    file_size_str = None
                    mycat = match.split("/")[0]
                    mypkg = match.split("/")[1]
                    mycpv = match + "-" + myversion
                    myebuild = self.portdb.findname(mycpv)
                    if myebuild:
                        pkgdir = os.path.dirname(myebuild)
                        from portage import manifest
                        mf = manifest.Manifest(pkgdir,
                                               self.settings["DISTDIR"])
                        try:
                            uri_map = self.portdb.getFetchMap(mycpv)
                        except portage.exception.InvalidDependString as e:
                            file_size_str = "Unknown (%s)" % (e, )
                            del e
                        else:
                            try:
                                mysum[0] = mf.getDistfilesSize(uri_map)
                            except KeyError as e:
                                file_size_str = "Unknown (missing " + \
                                 "digest for %s)" % (e,)
                                del e

                    available = False
                    for db in self._dbs:
                        if db is not vardb and \
                         db.cpv_exists(mycpv):
                            available = True
                            if not myebuild and hasattr(db, "bintree"):
                                myebuild = db.bintree.getname(mycpv)
                                try:
                                    mysum[0] = os.stat(myebuild).st_size
                                except OSError:
                                    myebuild = None
                            break

                    if myebuild and file_size_str is None:
                        mystr = str(mysum[0] // 1024)
                        mycount = len(mystr)
                        while (mycount > 3):
                            mycount -= 3
                            mystr = mystr[:mycount] + "," + mystr[mycount:]
                        file_size_str = mystr + " kB"

                    if self.verbose:
                        if available:
                            msg.append("      %s %s\n" % \
                             (darkgreen("Latest version available:"),
                             myversion))
                        msg.append("      %s\n" % \
                         self.getInstallationStatus(mycat+'/'+mypkg))
                        if myebuild:
                            msg.append("      %s %s\n" % \
                             (darkgreen("Size of files:"), file_size_str))
                        msg.append("      " + darkgreen("Homepage:") + \
                         "      " + homepage + "\n")
                        msg.append("      " + darkgreen("Description:") \
                         + "   " + desc + "\n")
                        msg.append("      " + darkgreen("License:") + \
                         "       " + license + "\n\n")
        writemsg_stdout(''.join(msg), noiselevel=-1)
コード例 #24
0
ファイル: search.py プロジェクト: Spencerx/portage
	def output(self):
		"""Outputs the results of the search."""
		msg = []
		msg.append("\b\b  \n[ Results for search key : " + \
			bold(self.searchkey) + " ]\n")
		msg.append("[ Applications found : " + \
			bold(str(self.mlen)) + " ]\n\n")
		vardb = self.vartree.dbapi
		metadata_keys = set(Package.metadata_keys)
		metadata_keys.update(["DESCRIPTION", "HOMEPAGE", "LICENSE", "SRC_URI"])
		metadata_keys = tuple(metadata_keys)
		for mtype in self.matches:
			for match,masked in self.matches[mtype]:
				full_package = None
				if mtype == "pkg":
					full_package = self._xmatch(
						"bestmatch-visible", match)
					if not full_package:
						#no match found; we don't want to query description
						masked=1
						full_package = portage.best(
							self._xmatch("match-all",match))
				elif mtype == "desc":
					full_package = match
					match        = portage.cpv_getkey(match)
				elif mtype == "set":
					msg.append(green("*") + "  " + bold(match) + "\n")
					if self.verbose:
						msg.append("      " + darkgreen("Description:") + \
							"   " + \
							self.sdict[match].getMetadata("DESCRIPTION") \
							+ "\n\n")
				if full_package:
					try:
						metadata = dict(zip(metadata_keys,
							self._aux_get(full_package, metadata_keys)))
					except KeyError:
						msg.append("emerge: search: aux_get() failed, skipping\n")
						continue

					desc = metadata["DESCRIPTION"]
					homepage = metadata["HOMEPAGE"]
					license = metadata["LICENSE"]

					if masked:
						msg.append(green("*") + "  " + \
							white(match) + " " + red("[ Masked ]") + "\n")
					else:
						msg.append(green("*") + "  " + bold(match) + "\n")
					myversion = self.getVersion(full_package, search.VERSION_RELEASE)

					mysum = [0,0]
					file_size_str = None
					mycat = match.split("/")[0]
					mypkg = match.split("/")[1]
					mycpv = match + "-" + myversion
					myebuild = self._findname(mycpv)
					if myebuild:
						pkg = Package(built=False, cpv=mycpv,
							installed=False, metadata=metadata,
							root_config=self.root_config, type_name="ebuild")
						pkgdir = os.path.dirname(myebuild)
						mf = self.settings.repositories.get_repo_for_location(
							os.path.dirname(os.path.dirname(pkgdir)))
						mf = mf.load_manifest(
							pkgdir, self.settings["DISTDIR"])
						try:
							uri_map = _parse_uri_map(mycpv, metadata,
								use=pkg.use.enabled)
						except portage.exception.InvalidDependString as e:
							file_size_str = "Unknown (%s)" % (e,)
							del e
						else:
							try:
								mysum[0] = mf.getDistfilesSize(uri_map)
							except KeyError as e:
								file_size_str = "Unknown (missing " + \
									"digest for %s)" % (e,)
								del e

					available = False
					for db in self._dbs:
						if db is not vardb and \
							db.cpv_exists(mycpv):
							available = True
							if not myebuild and hasattr(db, "bintree"):
								myebuild = db.bintree.getname(mycpv)
								try:
									mysum[0] = os.stat(myebuild).st_size
								except OSError:
									myebuild = None
							break

					if myebuild and file_size_str is None:
						file_size_str = localized_size(mysum[0])

					if self.verbose:
						if available:
							msg.append("      %s %s\n" % \
								(darkgreen("Latest version available:"),
								myversion))
						msg.append("      %s\n" % \
							self.getInstallationStatus(mycat+'/'+mypkg))
						if myebuild:
							msg.append("      %s %s\n" % \
								(darkgreen("Size of files:"), file_size_str))
						msg.append("      " + darkgreen("Homepage:") + \
							"      " + homepage + "\n")
						msg.append("      " + darkgreen("Description:") \
							+ "   " + desc + "\n")
						msg.append("      " + darkgreen("License:") + \
							"       " + license + "\n\n")
		writemsg_stdout(''.join(msg), noiselevel=-1)
コード例 #25
0
ファイル: search.py プロジェクト: gentoo/portage
			def append(msg):
				writemsg_stdout(msg, noiselevel=-1)
コード例 #26
0
ファイル: search.py プロジェクト: gentoo/portage
	def _iter_search(self):

		match_category = 0
		self.packagematches = []
		if self.searchdesc:
			self.searchdesc=1
			self.matches = {"pkg":[], "desc":[], "set":[]}
		else:
			self.searchdesc=0
			self.matches = {"pkg":[], "set":[]}
		writemsg_stdout("Searching...\n\n", noiselevel=-1)

		regexsearch = False
		if self.searchkey.startswith('%'):
			regexsearch = True
			self.searchkey = self.searchkey[1:]
		if self.searchkey.startswith('@'):
			match_category = 1
			self.searchkey = self.searchkey[1:]
		# Auto-detect category match mode (@ symbol can be deprecated
		# after this is available in a stable version of portage).
		if '/' in self.searchkey:
			match_category = 1
		fuzzy = False
		if regexsearch:
			self.searchre=re.compile(self.searchkey,re.I)
		else:
			self.searchre=re.compile(re.escape(self.searchkey), re.I)

			# Fuzzy search does not support regular expressions, therefore
			# it is disabled for regular expression searches.
			if self.fuzzy:
				fuzzy = True
				cutoff = float(self.search_similarity) / 100
				if match_category:
					# Weigh the similarity of category and package
					# names independently, in order to avoid matching
					# lots of irrelevant packages in the same category
					# when the package name is much shorter than the
					# category name.
					part_split = portage.catsplit
				else:
					part_split = lambda match_string: (match_string,)

				part_matchers = []
				for part in part_split(self.searchkey):
					seq_match = difflib.SequenceMatcher()
					seq_match.set_seq2(part.lower())
					part_matchers.append(seq_match)

				def fuzzy_search_part(seq_match, match_string):
					seq_match.set_seq1(match_string.lower())
					return (seq_match.real_quick_ratio() >= cutoff and
						seq_match.quick_ratio() >= cutoff and
						seq_match.ratio() >= cutoff)

				def fuzzy_search(match_string):
					return all(fuzzy_search_part(seq_match, part)
						for seq_match, part in zip(
						part_matchers, part_split(match_string)))

		for package in self._cp_all():
			self._spinner_update()

			if match_category:
				match_string  = package[:]
			else:
				match_string  = package.split("/")[-1]

			if self.searchre.search(match_string):
				yield ("pkg", package)
			elif fuzzy and fuzzy_search(match_string):
				yield ("pkg", package)
			elif self.searchdesc: # DESCRIPTION searching
				# Use _first_cp to avoid an expensive visibility check,
				# since the visibility check can be avoided entirely
				# when the DESCRIPTION does not match.
				full_package = self._first_cp(package)
				if not full_package:
					continue
				try:
					full_desc = self._aux_get(
						full_package, ["DESCRIPTION"])[0]
				except KeyError:
					self._aux_get_error(full_package)
					continue
				if not self.searchre.search(full_desc):
					continue

				yield ("desc", package)

		self.sdict = self.setconfig.getSets()
		for setname in self.sdict:
			self._spinner_update()
			if match_category:
				match_string = setname
			else:
				match_string = setname.split("/")[-1]
			
			if self.searchre.search(match_string):
				yield ("set", setname)
			elif self.searchdesc:
				if self.searchre.search(
					self.sdict[setname].getMetadata("DESCRIPTION")):
					yield ("set", setname)
コード例 #27
0
ファイル: search.py プロジェクト: Whissi/portage
	def _iter_search(self):

		match_category = 0
		self.packagematches = []
		if self.searchdesc:
			self.searchdesc=1
			self.matches = {"pkg":[], "desc":[], "set":[]}
		else:
			self.searchdesc=0
			self.matches = {"pkg":[], "set":[]}
		writemsg_stdout("Searching...\n\n", noiselevel=-1)

		regexsearch = False
		if self.searchkey.startswith('%'):
			regexsearch = True
			self.searchkey = self.searchkey[1:]
		if self.searchkey.startswith('@'):
			match_category = 1
			self.searchkey = self.searchkey[1:]
		if regexsearch:
			self.searchre=re.compile(self.searchkey,re.I)
		else:
			self.searchre=re.compile(re.escape(self.searchkey), re.I)

		for package in self._cp_all():
			self._spinner_update()

			if match_category:
				match_string  = package[:]
			else:
				match_string  = package.split("/")[-1]

			if self.searchre.search(match_string):
				yield ("pkg", package)
			elif self.searchdesc: # DESCRIPTION searching
				# Use _first_cp to avoid an expensive visibility check,
				# since the visibility check can be avoided entirely
				# when the DESCRIPTION does not match.
				full_package = self._first_cp(package)
				if not full_package:
					continue
				try:
					full_desc = self._aux_get(
						full_package, ["DESCRIPTION"])[0]
				except KeyError:
					self._aux_get_error(full_package)
					continue
				if not self.searchre.search(full_desc):
					continue

				yield ("desc", package)

		self.sdict = self.setconfig.getSets()
		for setname in self.sdict:
			self._spinner_update()
			if match_category:
				match_string = setname
			else:
				match_string = setname.split("/")[-1]
			
			if self.searchre.search(match_string):
				yield ("set", setname)
			elif self.searchdesc:
				if self.searchre.search(
					self.sdict[setname].getMetadata("DESCRIPTION")):
					yield ("set", setname)
コード例 #28
0
ファイル: output.py プロジェクト: antarus12345/portage
    def __call__(self, depgraph, mylist, favorites=None, verbosity=None):
        """The main operation to format and display the resolver output.

		@param depgraph: dependency grah
		@param mylist: list of packages being processed
		@param favorites: list, defaults to []
		@param verbosity: verbose level, defaults to None
		Modifies self.conf, self.myfetchlist, self.portdb, self.vardb,
			self.pkgsettings, self.verboseadd, self.oldlp, self.newlp,
			self.print_msg,
		"""
        if favorites is None:
            favorites = []
        self.conf = _DisplayConfig(depgraph, mylist, favorites, verbosity)
        mylist = self.get_display_list(self.conf.mylist)
        # files to fetch list - avoids counting a same file twice
        # in size display (verbose mode)
        self.myfetchlist = set()

        self.quiet_repo_display = "--quiet-repo-display" in depgraph._frozen_config.myopts
        if self.quiet_repo_display:
            # Use this set to detect when all the "repoadd" strings are "[0]"
            # and disable the entire repo display in this case.
            repoadd_set = set()

        self.restrict_fetch_list = {}

        for mylist_index in range(len(mylist)):
            pkg, depth, ordered = mylist[mylist_index]
            self.portdb = self.conf.trees[pkg.root]["porttree"].dbapi
            self.vardb = self.conf.trees[pkg.root]["vartree"].dbapi
            self.pkgsettings = self.conf.pkgsettings[pkg.root]
            self.indent = " " * depth

            if isinstance(pkg, Blocker):
                self._blockers(pkg)
            else:
                pkg_info = self.set_pkg_info(pkg, ordered)
                pkg_info.oldbest_list, myinslotlist = \
                 self._get_installed_best(pkg, pkg_info)
                if ordered and pkg_info.merge and \
                 not pkg_info.attr_display.new:
                    for arg, atom in depgraph._iter_atoms_for_pkg(pkg):
                        if arg.force_reinstall:
                            pkg_info.attr_display.force_reinstall = True
                            break

                self.verboseadd = ""
                if self.quiet_repo_display:
                    self.repoadd = None
                self._display_use(pkg, pkg_info)
                if self.conf.verbosity == 3:
                    if self.quiet_repo_display:
                        self.verbose_size(pkg, repoadd_set, pkg_info)
                    else:
                        self.verbose_size(pkg, None, pkg_info)

                self.oldlp = self.conf.columnwidth - 30
                self.newlp = self.oldlp - 30
                pkg_info.oldbest = self.convert_myoldbest(pkg, pkg_info)
                pkg_info.system, pkg_info.world = \
                 self.check_system_world(pkg)
                if 'interactive' in pkg.properties and \
                 pkg.operation == 'merge':
                    pkg_info.attr_display.interactive = True
                    if ordered:
                        self.counters.interactive += 1

                if self.include_mask_str():
                    pkg_info.attr_display.mask = self.gen_mask_str(pkg)

                if pkg.root_config.settings["ROOT"] != "/":
                    if pkg_info.oldbest:
                        pkg_info.oldbest += " "
                    if self.conf.columns:
                        myprint = self._set_non_root_columns(pkg, pkg_info)
                    else:
                        pkg_str = self._append_build_id(pkg.cpv, pkg, pkg_info)
                        if self.conf.verbosity == 3:
                            pkg_str = self._append_slot(pkg_str, pkg, pkg_info)
                            pkg_str = self._append_repository(
                                pkg_str, pkg, pkg_info)
                        if not pkg_info.merge:
                            addl = self.empty_space_in_brackets()
                            myprint = "[%s%s] " % (
                                self.pkgprint(pkg_info.operation.ljust(13),
                                              pkg_info),
                                addl,
                            )
                        else:
                            myprint = "[%s %s] " % (self.pkgprint(
                                pkg.type_name,
                                pkg_info), pkg_info.attr_display)
                        myprint += self.indent + \
                         self.pkgprint(pkg_str, pkg_info) + " " + \
                         pkg_info.oldbest + darkgreen("to " + pkg.root)
                else:
                    if self.conf.columns:
                        myprint = self._set_root_columns(pkg, pkg_info)
                    else:
                        myprint = self._set_no_columns(pkg, pkg_info)

                if self.conf.columns and pkg.operation == "uninstall":
                    continue
                if self.quiet_repo_display:
                    self.print_msg.append(
                        (myprint, self.verboseadd, self.repoadd))
                else:
                    self.print_msg.append((myprint, self.verboseadd, None))

        show_repos = self.quiet_repo_display and repoadd_set and repoadd_set != set(
            ["0"])

        # now finally print out the messages
        self.print_messages(show_repos)
        self.print_blockers()
        if self.conf.verbosity == 3:
            self.print_verbose(show_repos)
        for pkg, pkg_info in self.restrict_fetch_list.items():
            writemsg_stdout("\nFetch instructions for %s:\n" % (pkg.cpv, ),
                            noiselevel=-1)
            spawn_nofetch(self.conf.trees[pkg.root]["porttree"].dbapi,
                          pkg_info.ebuild_path)
        if self.conf.changelog:
            self.print_changelog()

        return os.EX_OK
コード例 #29
0
ファイル: search.py プロジェクト: amadio/portage
	def _iter_search(self):

		match_category = 0
		self.packagematches = []
		if self.searchdesc:
			self.searchdesc=1
			self.matches = {"pkg":[], "desc":[], "set":[]}
		else:
			self.searchdesc=0
			self.matches = {"pkg":[], "set":[]}
		writemsg_stdout("Searching...\n\n", noiselevel=-1)

		regexsearch = False
		if self.searchkey.startswith('%'):
			regexsearch = True
			self.searchkey = self.searchkey[1:]
		if self.searchkey.startswith('@'):
			match_category = 1
			self.searchkey = self.searchkey[1:]
		fuzzy = False
		if regexsearch:
			self.searchre=re.compile(self.searchkey,re.I)
		else:
			self.searchre=re.compile(re.escape(self.searchkey), re.I)

			# Fuzzy search does not support regular expressions, therefore
			# it is disabled for regular expression searches.
			if self.fuzzy:
				fuzzy = True
				cutoff = float(self.search_similarity) / 100
				seq_match = difflib.SequenceMatcher()
				seq_match.set_seq2(self.searchkey.lower())

				def fuzzy_search(match_string):
					seq_match.set_seq1(match_string.lower())
					return (seq_match.real_quick_ratio() >= cutoff and
						seq_match.quick_ratio() >= cutoff and
						seq_match.ratio() >= cutoff)

		for package in self._cp_all():
			self._spinner_update()

			if match_category:
				match_string  = package[:]
			else:
				match_string  = package.split("/")[-1]

			if self.searchre.search(match_string):
				yield ("pkg", package)
			elif fuzzy and fuzzy_search(match_string):
				yield ("pkg", package)
			elif self.searchdesc: # DESCRIPTION searching
				# Use _first_cp to avoid an expensive visibility check,
				# since the visibility check can be avoided entirely
				# when the DESCRIPTION does not match.
				full_package = self._first_cp(package)
				if not full_package:
					continue
				try:
					full_desc = self._aux_get(
						full_package, ["DESCRIPTION"])[0]
				except KeyError:
					self._aux_get_error(full_package)
					continue
				if not self.searchre.search(full_desc):
					continue

				yield ("desc", package)

		self.sdict = self.setconfig.getSets()
		for setname in self.sdict:
			self._spinner_update()
			if match_category:
				match_string = setname
			else:
				match_string = setname.split("/")[-1]
			
			if self.searchre.search(match_string):
				yield ("set", setname)
			elif self.searchdesc:
				if self.searchre.search(
					self.sdict[setname].getMetadata("DESCRIPTION")):
					yield ("set", setname)
コード例 #30
0
ファイル: rsync.py プロジェクト: jonasstein/portage
	def _sync(self):
		'''Internal sync function which performs only the sync'''
		opts = self.options.get('emerge_config').opts
		self.usersync_uid = self.options.get('usersync_uid', None)
		enter_invalid = '--ask-enter-invalid' in opts
		out = portage.output.EOutput()
		syncuri = self.repo.sync_uri
		vcs_dirs = frozenset(VCS_DIRS)
		vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))

		for vcs_dir in vcs_dirs:
			writemsg_level(("!!! %s appears to be under revision " + \
				"control (contains %s).\n!!! Aborting rsync sync.\n") % \
				(self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
			return (1, False)
		self.timeout=180

		rsync_opts = []
		if self.settings["PORTAGE_RSYNC_OPTS"] == "":
			rsync_opts = self._set_rsync_defaults()
		else:
			rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
		self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)

		self.extra_rsync_opts = portage.util.shlex_split(
			self.settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))

		# Real local timestamp file.
		self.servertimestampfile = os.path.join(
			self.repo.location, "metadata", "timestamp.chk")

		content = portage.util.grabfile(self.servertimestampfile)
		timestamp = 0
		if content:
			try:
				timestamp = time.mktime(time.strptime(content[0],
					TIMESTAMP_FORMAT))
			except (OverflowError, ValueError):
				pass
		del content

		try:
			self.rsync_initial_timeout = \
				int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
		except ValueError:
			self.rsync_initial_timeout = 15

		try:
			maxretries=int(self.settings["PORTAGE_RSYNC_RETRIES"])
		except SystemExit as e:
			raise # Needed else can't exit
		except:
			maxretries = -1 #default number of retries

		if syncuri.startswith("file://"):
			self.proto = "file"
			dosyncuri = syncuri[6:]
			is_synced, exitcode = self._do_rsync(
				dosyncuri, timestamp, opts)
			self._process_exitcode(exitcode, dosyncuri, out, 1)
			return (exitcode, exitcode == os.EX_OK)

		retries=0
		try:
			self.proto, user_name, hostname, port = re.split(
				r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
				syncuri, maxsplit=4)[1:5]
		except ValueError:
			writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
				noiselevel=-1, level=logging.ERROR)
			return (1, False)

		self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")

		if port is None:
			port=""
		if user_name is None:
			user_name=""
		if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
			getaddrinfo_host = hostname
		else:
			# getaddrinfo needs the brackets stripped
			getaddrinfo_host = hostname[1:-1]
		updatecache_flg=True
		all_rsync_opts = set(self.rsync_opts)
		all_rsync_opts.update(self.extra_rsync_opts)

		family = socket.AF_UNSPEC
		if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
			family = socket.AF_INET
		elif socket.has_ipv6 and \
			("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
			family = socket.AF_INET6

		addrinfos = None
		uris = []

		try:
			addrinfos = getaddrinfo_validate(
				socket.getaddrinfo(getaddrinfo_host, None,
				family, socket.SOCK_STREAM))
		except socket.error as e:
			writemsg_level(
				"!!! getaddrinfo failed for '%s': %s\n" % (hostname, e),
				noiselevel=-1, level=logging.ERROR)

		if addrinfos:

			AF_INET = socket.AF_INET
			AF_INET6 = None
			if socket.has_ipv6:
				AF_INET6 = socket.AF_INET6

			ips_v4 = []
			ips_v6 = []

			for addrinfo in addrinfos:
				if addrinfo[0] == AF_INET:
					ips_v4.append("%s" % addrinfo[4][0])
				elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
					# IPv6 addresses need to be enclosed in square brackets
					ips_v6.append("[%s]" % addrinfo[4][0])

			random.shuffle(ips_v4)
			random.shuffle(ips_v6)

			# Give priority to the address family that
			# getaddrinfo() returned first.
			if AF_INET6 is not None and addrinfos and \
				addrinfos[0][0] == AF_INET6:
				ips = ips_v6 + ips_v4
			else:
				ips = ips_v4 + ips_v6

			for ip in ips:
				uris.append(syncuri.replace(
					"//" + user_name + hostname + port + "/",
					"//" + user_name + ip + port + "/", 1))

		if not uris:
			# With some configurations we need to use the plain hostname
			# rather than try to resolve the ip addresses (bug #340817).
			uris.append(syncuri)

		# reverse, for use with pop()
		uris.reverse()

		effective_maxretries = maxretries
		if effective_maxretries < 0:
			effective_maxretries = len(uris) - 1

		while (1):
			if uris:
				dosyncuri = uris.pop()
			else:
				writemsg("!!! Exhausted addresses for %s\n" % \
					hostname, noiselevel=-1)
				return (1, False)

			if (retries==0):
				if "--ask" in opts:
					uq = UserQuery(opts)
					if uq.query("Do you want to sync your Portage tree " + \
						"with the mirror at\n" + blue(dosyncuri) + bold("?"),
						enter_invalid) == "No":
						print()
						print("Quitting.")
						print()
						sys.exit(128 + signal.SIGINT)
				self.logger(self.xterm_titles,
					">>> Starting rsync with " + dosyncuri)
				if "--quiet" not in opts:
					print(">>> Starting rsync with "+dosyncuri+"...")
			else:
				self.logger(self.xterm_titles,
					">>> Starting retry %d of %d with %s" % \
						(retries, effective_maxretries, dosyncuri))
				writemsg_stdout(
					"\n\n>>> Starting retry %d of %d with %s\n" % \
					(retries, effective_maxretries, dosyncuri), noiselevel=-1)

			if dosyncuri.startswith('ssh://'):
				dosyncuri = dosyncuri[6:].replace('/', ':/', 1)

			is_synced, exitcode = self._do_rsync(dosyncuri, timestamp, opts)
			if is_synced:
				break

			retries=retries+1

			if maxretries < 0 or retries <= maxretries:
				print(">>> Retrying...")
			else:
				# over retries
				# exit loop
				updatecache_flg=False
				exitcode = EXCEEDED_MAX_RETRIES
				break
		self._process_exitcode(exitcode, dosyncuri, out, maxretries)
		return (exitcode, updatecache_flg)
コード例 #31
0
def _global_updates(trees, prev_mtimes):
	"""
	Perform new global updates if they exist in $PORTDIR/profiles/updates/.
	This simply returns if ROOT != "/" (when len(trees) != 1). If ROOT != "/"
	then the user should instead use emaint --fix movebin and/or moveinst.

	@param trees: A dictionary containing portage trees.
	@type trees: dict
	@param prev_mtimes: A dictionary containing mtimes of files located in
		$PORTDIR/profiles/updates/.
	@type prev_mtimes: dict
	@rtype: None or List
	@return: None if no were no updates, otherwise a list of update commands
		that have been performed.
	"""
	# only do this if we're root and not running repoman/ebuild digest

	if secpass < 2 or \
		"SANDBOX_ACTIVE" in os.environ or \
		len(trees) != 1:
		return 0
	root = "/"
	mysettings = trees["/"]["vartree"].settings
	updpath = os.path.join(mysettings["PORTDIR"], "profiles", "updates")

	try:
		if mysettings["PORTAGE_CALLER"] == "fixpackages":
			update_data = grab_updates(updpath)
		else:
			update_data = grab_updates(updpath, prev_mtimes)
	except DirectoryNotFound:
		writemsg(_("--- 'profiles/updates' is empty or "
			"not available. Empty portage tree?\n"), noiselevel=1)
		return 0
	myupd = None
	if len(update_data) > 0:
		do_upgrade_packagesmessage = 0
		myupd = []
		timestamps = {}
		for mykey, mystat, mycontent in update_data:
			writemsg_stdout("\n\n")
			writemsg_stdout(colorize("GOOD",
				_("Performing Global Updates: "))+bold(mykey)+"\n")
			writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
			writemsg_stdout(_("  %s='update pass'  %s='binary update'  "
				"%s='/var/db update'  %s='/var/db move'\n"
				"  %s='/var/db SLOT move'  %s='binary move'  "
				"%s='binary SLOT move'\n  %s='update /etc/portage/package.*'\n") % \
				(bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
			valid_updates, errors = parse_updates(mycontent)
			myupd.extend(valid_updates)
			writemsg_stdout(len(valid_updates) * "." + "\n")
			if len(errors) == 0:
				# Update our internal mtime since we
				# processed all of our directives.
				timestamps[mykey] = mystat[stat.ST_MTIME]
			else:
				for msg in errors:
					writemsg("%s\n" % msg, noiselevel=-1)

		world_file = os.path.join(root, WORLD_FILE)
		world_list = grabfile(world_file)
		world_modified = False
		for update_cmd in myupd:
			for pos, atom in enumerate(world_list):
				new_atom = update_dbentry(update_cmd, atom)
				if atom != new_atom:
					world_list[pos] = new_atom
					world_modified = True
		if world_modified:
			world_list.sort()
			write_atomic(world_file,
				"".join("%s\n" % (x,) for x in world_list))

		update_config_files("/",
			mysettings.get("CONFIG_PROTECT","").split(),
			mysettings.get("CONFIG_PROTECT_MASK","").split(),
			myupd)

		vardb = trees["/"]["vartree"].dbapi
		bindb = trees["/"]["bintree"].dbapi
		if not os.access(bindb.bintree.pkgdir, os.W_OK):
			bindb = None
		else:
			# Call binarytree.populate(), since we want to make sure it's
			# only populated with local packages here (getbinpkgs=0).
			bindb.bintree.populate()
		for update_cmd in myupd:
			if update_cmd[0] == "move":
				moves = vardb.move_ent(update_cmd)
				if moves:
					writemsg_stdout(moves * "@")
				if bindb:
					moves = bindb.move_ent(update_cmd)
					if moves:
						writemsg_stdout(moves * "%")
			elif update_cmd[0] == "slotmove":
				moves = vardb.move_slot_ent(update_cmd)
				if moves:
					writemsg_stdout(moves * "s")
				if bindb:
					moves = bindb.move_slot_ent(update_cmd)
					if moves:
						writemsg_stdout(moves * "S")

		# The above global updates proceed quickly, so they
		# are considered a single mtimedb transaction.
		if len(timestamps) > 0:
			# We do not update the mtime in the mtimedb
			# until after _all_ of the above updates have
			# been processed because the mtimedb will
			# automatically commit when killed by ctrl C.
			for mykey, mtime in timestamps.items():
				prev_mtimes[mykey] = mtime

		# We gotta do the brute force updates for these now.
		if mysettings["PORTAGE_CALLER"] == "fixpackages" or \
		"fixpackages" in mysettings.features:
			def onUpdate(maxval, curval):
				if curval > 0:
					writemsg_stdout("#")
			vardb.update_ents(myupd, onUpdate=onUpdate)
			if bindb:
				def onUpdate(maxval, curval):
					if curval > 0:
						writemsg_stdout("*")
				bindb.update_ents(myupd, onUpdate=onUpdate)
		else:
			do_upgrade_packagesmessage = 1

		# Update progress above is indicated by characters written to stdout so
		# we print a couple new lines here to separate the progress output from
		# what follows.
		print()
		print()

		if do_upgrade_packagesmessage and bindb and \
			bindb.cpv_all():
			writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
			writemsg_stdout(bold(_("Note: This can take a very long time.")))
			writemsg_stdout("\n")
	if myupd:
		return myupd
コード例 #32
0
def fetch(myuris, mysettings, listonly=0, fetchonly=0,
	locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
	allow_missing_digests=True, force=False):
	"""
	Fetch files to DISTDIR and also verify digests if they are available.

	@param myuris: Maps each file name to a tuple of available fetch URIs.
	@type myuris: dict
	@param mysettings: Portage config instance.
	@type mysettings: portage.config
	@param listonly: Only print URIs and do not actually fetch them.
	@type listonly: bool
	@param fetchonly: Do not block for files that are locked by a
		concurrent fetcher process. This means that the function can
		return successfully *before* all files have been successfully
		fetched!
	@type fetchonly: bool
	@param use_locks: Enable locks. This parameter is ineffective if
		FEATURES=distlocks is disabled in the portage config!
	@type use_locks: bool
	@param digests: Maps each file name to a dict of digest types and values.
	@type digests: dict
	@param allow_missing_digests: Enable fetch even if there are no digests
		available for verification.
	@type allow_missing_digests: bool
	@param force: Force download, even when a file already exists in
		DISTDIR. This is most useful when there are no digests available,
		since otherwise download will be automatically forced if the
		existing file does not match the available digests. Also, this
		avoids the need to remove the existing file in advance, which
		makes it possible to atomically replace the file and avoid
		interference with concurrent processes.
	@type force: bool
	@rtype: int
	@return: 1 if successful, 0 otherwise.
	"""

	if force and digests:
		# Since the force parameter can trigger unnecessary fetch when the
		# digests match, do not allow force=True when digests are provided.
		raise PortageException(_('fetch: force=True is not allowed when digests are provided'))

	if not myuris:
		return 1

	features = mysettings.features
	restrict = mysettings.get("PORTAGE_RESTRICT","").split()
	userfetch = portage.data.secpass >= 2 and "userfetch" in features

	# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
	restrict_mirror = "mirror" in restrict or "nomirror" in restrict
	if restrict_mirror:
		if ("mirror" in features) and ("lmirror" not in features):
			# lmirror should allow you to bypass mirror restrictions.
			# XXX: This is not a good thing, and is temporary at best.
			print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
			return 1

	# Generally, downloading the same file repeatedly from
	# every single available mirror is a waste of bandwidth
	# and time, so there needs to be a cap.
	checksum_failure_max_tries = 5
	v = checksum_failure_max_tries
	try:
		v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
			checksum_failure_max_tries))
	except (ValueError, OverflowError):
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains non-integer value: '%s'\n") % \
			mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	if v < 1:
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains value less than 1: '%s'\n") % v, noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	checksum_failure_max_tries = v
	del v

	fetch_resume_size_default = "350K"
	fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
	if fetch_resume_size is not None:
		fetch_resume_size = "".join(fetch_resume_size.split())
		if not fetch_resume_size:
			# If it's undefined or empty, silently use the default.
			fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
		if match is None or \
			(match.group(2).upper() not in _size_suffix_map):
			writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
				" contains an unrecognized format: '%s'\n") % \
				mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
			writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
				"default value: %s\n") % fetch_resume_size_default,
				noiselevel=-1)
			fetch_resume_size = None
	if fetch_resume_size is None:
		fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
	fetch_resume_size = int(match.group(1)) * \
		2 ** _size_suffix_map[match.group(2).upper()]

	# Behave like the package has RESTRICT="primaryuri" after a
	# couple of checksum failures, to increase the probablility
	# of success before checksum_failure_max_tries is reached.
	checksum_failure_primaryuri = 2
	thirdpartymirrors = mysettings.thirdpartymirrors()

	# In the background parallel-fetch process, it's safe to skip checksum
	# verification of pre-existing files in $DISTDIR that have the correct
	# file size. The parent process will verify their checksums prior to
	# the unpack phase.

	parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
	if parallel_fetchonly:
		fetchonly = 1

	check_config_instance(mysettings)

	custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
		CUSTOM_MIRRORS_FILE), recursive=1)

	mymirrors=[]

	if listonly or ("distlocks" not in features):
		use_locks = 0

	distdir_writable = os.access(mysettings["DISTDIR"], os.W_OK)
	fetch_to_ro = 0
	if "skiprocheck" in features:
		fetch_to_ro = 1

	if not distdir_writable and fetch_to_ro:
		if use_locks:
			writemsg(colorize("BAD",
				_("!!! For fetching to a read-only filesystem, "
				"locking should be turned off.\n")), noiselevel=-1)
			writemsg(_("!!! This can be done by adding -distlocks to "
				"FEATURES in /etc/portage/make.conf\n"), noiselevel=-1)
#			use_locks = 0

	# local mirrors are always added
	if try_mirrors and "local" in custommirrors:
		mymirrors += custommirrors["local"]

	if restrict_mirror:
		# We don't add any mirrors.
		pass
	else:
		if try_mirrors:
			mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]

	hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
	if hash_filter.transparent:
		hash_filter = None
	skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
	if skip_manifest:
		allow_missing_digests = True
	pkgdir = mysettings.get("O")
	if digests is None and not (pkgdir is None or skip_manifest):
		mydigests = mysettings.repositories.get_repo_for_location(
			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
			pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
	elif digests is None or skip_manifest:
		# no digests because fetch was not called for a specific package
		mydigests = {}
	else:
		mydigests = digests

	ro_distdirs = [x for x in \
		shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
		if os.path.isdir(x)]

	fsmirrors = []
	for x in range(len(mymirrors)-1,-1,-1):
		if mymirrors[x] and mymirrors[x][0]=='/':
			fsmirrors += [mymirrors[x]]
			del mymirrors[x]

	restrict_fetch = "fetch" in restrict
	force_mirror = "force-mirror" in features and not restrict_mirror
	custom_local_mirrors = custommirrors.get("local", [])
	if restrict_fetch:
		# With fetch restriction, a normal uri may only be fetched from
		# custom local mirrors (if available).  A mirror:// uri may also
		# be fetched from specific mirrors (effectively overriding fetch
		# restriction, but only for specific mirrors).
		locations = custom_local_mirrors
	else:
		locations = mymirrors

	file_uri_tuples = []
	# Check for 'items' attribute since OrderedDict is not a dict.
	if hasattr(myuris, 'items'):
		for myfile, uri_set in myuris.items():
			for myuri in uri_set:
				file_uri_tuples.append((myfile, myuri))
			if not uri_set:
				file_uri_tuples.append((myfile, None))
	else:
		for myuri in myuris:
			if urlparse(myuri).scheme:
				file_uri_tuples.append((os.path.basename(myuri), myuri))
			else:
				file_uri_tuples.append((os.path.basename(myuri), None))

	filedict = OrderedDict()
	primaryuri_dict = {}
	thirdpartymirror_uris = {}
	for myfile, myuri in file_uri_tuples:
		if myfile not in filedict:
			filedict[myfile]=[]
			if distdir_writable:
				mirror_cache = os.path.join(mysettings["DISTDIR"],
						".mirror-cache.json")
			else:
				mirror_cache = None
			for l in locations:
				filedict[myfile].append(functools.partial(
					get_mirror_url, l, myfile, mysettings, mirror_cache))
		if myuri is None:
			continue
		if myuri[:9]=="mirror://":
			eidx = myuri.find("/", 9)
			if eidx != -1:
				mirrorname = myuri[9:eidx]
				path = myuri[eidx+1:]

				# Try user-defined mirrors first
				if mirrorname in custommirrors:
					for cmirr in custommirrors[mirrorname]:
						filedict[myfile].append(
							cmirr.rstrip("/") + "/" + path)

				# now try the official mirrors
				if mirrorname in thirdpartymirrors:
					uris = [locmirr.rstrip("/") + "/" + path \
						for locmirr in thirdpartymirrors[mirrorname]]
					random.shuffle(uris)
					filedict[myfile].extend(uris)
					thirdpartymirror_uris.setdefault(myfile, []).extend(uris)

				if mirrorname not in custommirrors and \
					mirrorname not in thirdpartymirrors:
					writemsg(_("!!! No known mirror by the name: %s\n") % (mirrorname))
			else:
				writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
				writemsg("  %s\n" % (myuri), noiselevel=-1)
		else:
			if restrict_fetch or force_mirror:
				# Only fetch from specific mirrors is allowed.
				continue
			primaryuris = primaryuri_dict.get(myfile)
			if primaryuris is None:
				primaryuris = []
				primaryuri_dict[myfile] = primaryuris
			primaryuris.append(myuri)

	# Order primaryuri_dict values to match that in SRC_URI.
	for uris in primaryuri_dict.values():
		uris.reverse()

	# Prefer thirdpartymirrors over normal mirrors in cases when
	# the file does not yet exist on the normal mirrors.
	for myfile, uris in thirdpartymirror_uris.items():
		primaryuri_dict.setdefault(myfile, []).extend(uris)

	# Now merge primaryuri values into filedict (includes mirrors
	# explicitly referenced in SRC_URI).
	if "primaryuri" in restrict:
		for myfile, uris in filedict.items():
			filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
	else:
		for myfile in filedict:
			filedict[myfile] += primaryuri_dict.get(myfile, [])

	can_fetch=True

	if listonly:
		can_fetch = False

	if can_fetch and not fetch_to_ro:
		try:
			_ensure_distdir(mysettings, mysettings["DISTDIR"])
		except PortageException as e:
			if not os.path.isdir(mysettings["DISTDIR"]):
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
				writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)

	if can_fetch and \
		not fetch_to_ro and \
		not os.access(mysettings["DISTDIR"], os.W_OK):
		writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
			noiselevel=-1)
		can_fetch = False

	distdir_writable = can_fetch and not fetch_to_ro
	failed_files = set()
	restrict_fetch_msg = False
	valid_hashes = set(get_valid_checksum_keys())
	valid_hashes.discard("size")

	for myfile in filedict:
		"""
		fetched  status
		0        nonexistent
		1        partially downloaded
		2        completely downloaded
		"""
		fetched = 0

		orig_digests = mydigests.get(myfile, {})

		if not (allow_missing_digests or listonly):
			verifiable_hash_types = set(orig_digests).intersection(valid_hashes)
			if not verifiable_hash_types:
				expected = " ".join(sorted(valid_hashes))
				got = set(orig_digests)
				got.discard("size")
				got = " ".join(sorted(got))
				reason = (_("Insufficient data for checksum verification"),
					got, expected)
				writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
					noiselevel=-1)
				writemsg(_("!!! Reason: %s\n") % reason[0],
					noiselevel=-1)
				writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
					(reason[1], reason[2]), noiselevel=-1)

				if fetchonly:
					failed_files.add(myfile)
					continue
				else:
					return 0

		size = orig_digests.get("size")
		if size == 0:
			# Zero-byte distfiles are always invalid, so discard their digests.
			del mydigests[myfile]
			orig_digests.clear()
			size = None
		pruned_digests = orig_digests
		if parallel_fetchonly:
			pruned_digests = {}
			if size is not None:
				pruned_digests["size"] = size

		myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
		download_path = myfile_path if fetch_to_ro else myfile_path + _download_suffix
		has_space = True
		has_space_superuser = True
		file_lock = None
		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		else:
			# check if there is enough space in DISTDIR to completely store myfile
			# overestimate the filesize so we aren't bitten by FS overhead
			vfs_stat = None
			if size is not None and hasattr(os, "statvfs"):
				try:
					vfs_stat = os.statvfs(mysettings["DISTDIR"])
				except OSError as e:
					writemsg_level("!!! statvfs('%s'): %s\n" %
						(mysettings["DISTDIR"], e),
						noiselevel=-1, level=logging.ERROR)
					del e

			if vfs_stat is not None:
				try:
					mysize = os.stat(myfile_path).st_size
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
					mysize = 0
				if (size - mysize + vfs_stat.f_bsize) >= \
					(vfs_stat.f_bsize * vfs_stat.f_bavail):

					if (size - mysize + vfs_stat.f_bsize) >= \
						(vfs_stat.f_bsize * vfs_stat.f_bfree):
						has_space_superuser = False

					if not has_space_superuser:
						has_space = False
					elif portage.data.secpass < 2:
						has_space = False
					elif userfetch:
						has_space = False

			if distdir_writable and use_locks:

				lock_kwargs = {}
				if fetchonly:
					lock_kwargs["flags"] = os.O_NONBLOCK

				try:
					file_lock = lockfile(myfile_path,
						wantnewlockfile=1, **lock_kwargs)
				except TryAgain:
					writemsg(_(">>> File '%s' is already locked by "
						"another fetcher. Continuing...\n") % myfile,
						noiselevel=-1)
					continue
		try:
			if not listonly:

				eout = EOutput()
				eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
				match, mystat = _check_distfile(
					myfile_path, pruned_digests, eout, hash_filter=hash_filter)
				if match and not force:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if distdir_writable and not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e
					continue

				# Remove broken symlinks or symlinks to files which
				# _check_distfile did not match above.
				if distdir_writable and mystat is None or os.path.islink(myfile_path):
					try:
						os.unlink(myfile_path)
					except OSError as e:
						if e.errno not in (errno.ENOENT, errno.ESTALE):
							raise
					mystat = None

				if mystat is not None:
					if stat.S_ISDIR(mystat.st_mode):
						writemsg_level(
							_("!!! Unable to fetch file since "
							"a directory is in the way: \n"
							"!!!   %s\n") % myfile_path,
							level=logging.ERROR, noiselevel=-1)
						return 0

					if distdir_writable and not force:
						# Since _check_distfile did not match above, the file
						# is either corrupt or its identity has changed since
						# the last time it was fetched, so rename it.
						temp_filename = _checksum_failure_temp_file(
							mysettings, mysettings["DISTDIR"], myfile)
						writemsg_stdout(_("Refetching... "
							"File renamed to '%s'\n\n") % \
							temp_filename, noiselevel=-1)

				# Stat the temporary download file for comparison with
				# fetch_resume_size.
				try:
					mystat = os.stat(download_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					mystat = None

				if mystat is not None:
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(download_path)
							except OSError:
								pass
					elif distdir_writable and size is not None:
						if mystat.st_size < fetch_resume_size and \
							mystat.st_size < size:
							# If the file already exists and the size does not
							# match the existing digests, it may be that the
							# user is attempting to update the digest. In this
							# case, the digestgen() function will advise the
							# user to use `ebuild --force foo.ebuild manifest`
							# in order to force the old digests to be replaced.
							# Since the user may want to keep this file, rename
							# it instead of deleting it.
							writemsg(_(">>> Renaming distfile with size "
								"%d (smaller than " "PORTAGE_FETCH_RESU"
								"ME_MIN_SIZE)\n") % mystat.st_size)
							temp_filename = \
								_checksum_failure_temp_file(
									mysettings, mysettings["DISTDIR"],
									os.path.basename(download_path))
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)
						elif mystat.st_size >= size:
							temp_filename = \
								_checksum_failure_temp_file(
									mysettings, mysettings["DISTDIR"],
									os.path.basename(download_path))
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)

				if distdir_writable and ro_distdirs:
					readonly_file = None
					for x in ro_distdirs:
						filename = os.path.join(x, myfile)
						match, mystat = _check_distfile(
							filename, pruned_digests, eout, hash_filter=hash_filter)
						if match:
							readonly_file = filename
							break
					if readonly_file is not None:
						try:
							os.unlink(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
						os.symlink(readonly_file, myfile_path)
						continue

				# this message is shown only after we know that
				# the file is not already fetched
				if not has_space:
					writemsg(_("!!! Insufficient space to store %s in %s\n") % \
						(myfile, mysettings["DISTDIR"]), noiselevel=-1)

					if has_space_superuser:
						writemsg(_("!!! Insufficient privileges to use "
							"remaining space.\n"), noiselevel=-1)
						if userfetch:
							writemsg(_("!!! You may set FEATURES=\"-userfetch\""
								" in /etc/portage/make.conf in order to fetch with\n"
								"!!! superuser privileges.\n"), noiselevel=-1)

				if fsmirrors and not os.path.exists(myfile_path) and has_space:
					for mydir in fsmirrors:
						mirror_file = os.path.join(mydir, myfile)
						try:
							shutil.copyfile(mirror_file, download_path)
							writemsg(_("Local mirror has file: %s\n") % myfile)
							break
						except (IOError, OSError) as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e

				try:
					mystat = os.stat(download_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
				else:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if not os.path.islink(download_path):
						try:
							apply_secpass_permissions(download_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(download_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % (e,), noiselevel=-1)

					# If the file is empty then it's obviously invalid. Remove
					# the empty file and try to download if possible.
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(download_path)
							except EnvironmentError:
								pass
					elif not orig_digests:
						# We don't have a digest, but the file exists.  We must
						# assume that it is fully downloaded.
						if not force:
							continue
					else:
						if (mydigests[myfile].get("size") is not None
								and mystat.st_size < mydigests[myfile]["size"]
								and not restrict_fetch):
							fetched = 1 # Try to resume this download.
						elif parallel_fetchonly and \
							mystat.st_size == mydigests[myfile]["size"]:
							eout = EOutput()
							eout.quiet = \
								mysettings.get("PORTAGE_QUIET") == "1"
							eout.ebegin(
								"%s size ;-)" % (myfile, ))
							eout.eend(0)
							continue
						else:
							digests = _filter_unaccelarated_hashes(mydigests[myfile])
							if hash_filter is not None:
								digests = _apply_hash_filter(digests, hash_filter)
							verified_ok, reason = verify_all(download_path, digests)
							if not verified_ok:
								writemsg(_("!!! Previously fetched"
									" file: '%s'\n") % myfile, noiselevel=-1)
								writemsg(_("!!! Reason: %s\n") % reason[0],
									noiselevel=-1)
								writemsg(_("!!! Got:      %s\n"
									"!!! Expected: %s\n") % \
									(reason[1], reason[2]), noiselevel=-1)
								if reason[0] == _("Insufficient data for checksum verification"):
									return 0
								if distdir_writable:
									temp_filename = \
										_checksum_failure_temp_file(
											mysettings, mysettings["DISTDIR"],
											os.path.basename(download_path))
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
							else:
								if not fetch_to_ro:
									_movefile(download_path, myfile_path, mysettings=mysettings)
								eout = EOutput()
								eout.quiet = \
									mysettings.get("PORTAGE_QUIET", None) == "1"
								if digests:
									digests = list(digests)
									digests.sort()
									eout.ebegin(
										"%s %s ;-)" % (myfile, " ".join(digests)))
									eout.eend(0)
								continue # fetch any remaining files

			# Create a reversed list since that is optimal for list.pop().
			uri_list = filedict[myfile][:]
			uri_list.reverse()
			checksum_failure_count = 0
			tried_locations = set()
			while uri_list:
				loc = uri_list.pop()
				if isinstance(loc, functools.partial):
					loc = loc()
				# Eliminate duplicates here in case we've switched to
				# "primaryuri" mode on the fly due to a checksum failure.
				if loc in tried_locations:
					continue
				tried_locations.add(loc)
				if listonly:
					writemsg_stdout(loc+" ", noiselevel=-1)
					continue
				# allow different fetchcommands per protocol
				protocol = loc[0:loc.find("://")]

				global_config_path = GLOBAL_CONFIG_PATH
				if portage.const.EPREFIX:
					global_config_path = os.path.join(portage.const.EPREFIX,
							GLOBAL_CONFIG_PATH.lstrip(os.sep))

				missing_file_param = False
				fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
				fetchcommand = mysettings.get(fetchcommand_var)
				if fetchcommand is None:
					fetchcommand_var = "FETCHCOMMAND"
					fetchcommand = mysettings.get(fetchcommand_var)
					if fetchcommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (fetchcommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in fetchcommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % fetchcommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
				resumecommand = mysettings.get(resumecommand_var)
				if resumecommand is None:
					resumecommand_var = "RESUMECOMMAND"
					resumecommand = mysettings.get(resumecommand_var)
					if resumecommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (resumecommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in resumecommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % resumecommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				if missing_file_param:
					writemsg_level(
						_("!!! Refer to the make.conf(5) man page for "
						"information about how to\n!!! correctly specify "
						"FETCHCOMMAND and RESUMECOMMAND.\n"),
						level=logging.ERROR, noiselevel=-1)
					if myfile != os.path.basename(loc):
						return 0

				if not can_fetch:
					if fetched != 2:
						try:
							mysize = os.stat(download_path).st_size
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							mysize = 0

						if mysize == 0:
							writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
								noiselevel=-1)
						elif size is None or size > mysize:
							writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
								noiselevel=-1)
						else:
							writemsg(_("!!! File %s is incorrect size, "
								"but unable to retry.\n") % myfile, noiselevel=-1)
						return 0
					else:
						continue

				if fetched != 2 and has_space:
					#we either need to resume or start the download
					if fetched == 1:
						try:
							mystat = os.stat(download_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:
							if distdir_writable and mystat.st_size < fetch_resume_size:
								writemsg(_(">>> Deleting distfile with size "
									"%d (smaller than " "PORTAGE_FETCH_RESU"
									"ME_MIN_SIZE)\n") % mystat.st_size)
								try:
									os.unlink(download_path)
								except OSError as e:
									if e.errno not in \
										(errno.ENOENT, errno.ESTALE):
										raise
									del e
								fetched = 0
					if fetched == 1:
						#resume mode:
						writemsg(_(">>> Resuming download...\n"))
						locfetch=resumecommand
						command_var = resumecommand_var
					else:
						#normal mode:
						locfetch=fetchcommand
						command_var = fetchcommand_var
					writemsg_stdout(_(">>> Downloading '%s'\n") % \
						_hide_url_passwd(loc))
					variables = {
						"URI":     loc,
						"FILE":    os.path.basename(download_path)
					}

					for k in ("DISTDIR", "PORTAGE_SSH_OPTS"):
						v = mysettings.get(k)
						if v is not None:
							variables[k] = v

					myfetch = shlex_split(locfetch)
					myfetch = [varexpand(x, mydict=variables) for x in myfetch]
					myret = -1
					try:

						myret = _spawn_fetch(mysettings, myfetch)

					finally:
						try:
							apply_secpass_permissions(download_path,
								gid=portage_gid, mode=0o664, mask=0o2)
						except FileNotFound:
							pass
						except PortageException as e:
							if not os.access(download_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e

					# If the file is empty then it's obviously invalid.  Don't
					# trust the return value from the fetcher.  Remove the
					# empty file and try to download again.
					try:
						mystat = os.lstat(download_path)
						if mystat.st_size == 0 or (stat.S_ISLNK(mystat.st_mode) and not os.path.exists(download_path)):
							os.unlink(download_path)
							fetched = 0
							continue
					except EnvironmentError:
						pass

					if mydigests is not None and myfile in mydigests:
						try:
							mystat = os.stat(download_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:

							if stat.S_ISDIR(mystat.st_mode):
								# This can happen if FETCHCOMMAND erroneously
								# contains wget's -P option where it should
								# instead have -O.
								writemsg_level(
									_("!!! The command specified in the "
									"%s variable appears to have\n!!! "
									"created a directory instead of a "
									"normal file.\n") % command_var,
									level=logging.ERROR, noiselevel=-1)
								writemsg_level(
									_("!!! Refer to the make.conf(5) "
									"man page for information about how "
									"to\n!!! correctly specify "
									"FETCHCOMMAND and RESUMECOMMAND.\n"),
									level=logging.ERROR, noiselevel=-1)
								return 0

							# no exception?  file exists. let digestcheck() report
							# an appropriately for size or checksum errors

							# If the fetcher reported success and the file is
							# too small, it's probably because the digest is
							# bad (upstream changed the distfile).  In this
							# case we don't want to attempt to resume. Show a
							# digest verification failure to that the user gets
							# a clue about what just happened.
							if myret != os.EX_OK and \
								mystat.st_size < mydigests[myfile]["size"]:
								# Fetch failed... Try the next one... Kill 404 files though.
								if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
									html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
									with io.open(
										_unicode_encode(download_path,
										encoding=_encodings['fs'], errors='strict'),
										mode='r', encoding=_encodings['content'], errors='replace'
										) as f:
										if html404.search(f.read()):
											try:
												os.unlink(download_path)
												writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
												fetched = 0
												continue
											except (IOError, OSError):
												pass
								fetched = 1
								continue
							if True:
								# File is the correct size--check the checksums for the fetched
								# file NOW, for those users who don't have a stable/continuous
								# net connection. This way we have a chance to try to download
								# from another mirror...
								digests = _filter_unaccelarated_hashes(mydigests[myfile])
								if hash_filter is not None:
									digests = _apply_hash_filter(digests, hash_filter)
								verified_ok, reason = verify_all(download_path, digests)
								if not verified_ok:
									writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
										noiselevel=-1)
									writemsg(_("!!! Reason: %s\n") % reason[0],
										noiselevel=-1)
									writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
										(reason[1], reason[2]), noiselevel=-1)
									if reason[0] == _("Insufficient data for checksum verification"):
										return 0
									if distdir_writable:
										temp_filename = \
											_checksum_failure_temp_file(
												mysettings, mysettings["DISTDIR"],
												os.path.basename(download_path))
										writemsg_stdout(_("Refetching... "
											"File renamed to '%s'\n\n") % \
											temp_filename, noiselevel=-1)
									fetched=0
									checksum_failure_count += 1
									if checksum_failure_count == \
										checksum_failure_primaryuri:
										# Switch to "primaryuri" mode in order
										# to increase the probablility of
										# of success.
										primaryuris = \
											primaryuri_dict.get(myfile)
										if primaryuris:
											uri_list.extend(
												reversed(primaryuris))
									if checksum_failure_count >= \
										checksum_failure_max_tries:
										break
								else:
									if not fetch_to_ro:
										_movefile(download_path, myfile_path, mysettings=mysettings)
									eout = EOutput()
									eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
									if digests:
										eout.ebegin("%s %s ;-)" % \
											(myfile, " ".join(sorted(digests))))
										eout.eend(0)
									fetched=2
									break
					else: # no digests available
						if not myret:
							if not fetch_to_ro:
								_movefile(download_path, myfile_path, mysettings=mysettings)
							fetched=2
							break
						elif mydigests!=None:
							writemsg(_("No digest file available and download failed.\n\n"),
								noiselevel=-1)
		finally:
			if use_locks and file_lock:
				unlockfile(file_lock)
				file_lock = None

		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		if fetched != 2:
			if restrict_fetch and not restrict_fetch_msg:
				restrict_fetch_msg = True
				msg = _("\n!!! %s/%s"
					" has fetch restriction turned on.\n"
					"!!! This probably means that this "
					"ebuild's files must be downloaded\n"
					"!!! manually.  See the comments in"
					" the ebuild for more information.\n\n") % \
					(mysettings["CATEGORY"], mysettings["PF"])
				writemsg_level(msg,
					level=logging.ERROR, noiselevel=-1)
			elif restrict_fetch:
				pass
			elif listonly:
				pass
			elif not filedict[myfile]:
				writemsg(_("Warning: No mirrors available for file"
					" '%s'\n") % (myfile), noiselevel=-1)
			else:
				writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
					noiselevel=-1)

			if listonly:
				failed_files.add(myfile)
				continue
			elif fetchonly:
				failed_files.add(myfile)
				continue
			return 0
	if failed_files:
		return 0
	return 1
コード例 #33
0
ファイル: output.py プロジェクト: pombredanne/portage-3
	def print_changelog(self):
		"""Prints the changelog text to std_out
		"""
		for chunk in self.changelogs:
			writemsg_stdout(chunk,
				noiselevel=-1)
コード例 #34
0
def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
	root = trees._running_eroot
	mysettings = trees[root]["vartree"].settings
	portdb = trees[root]["porttree"].dbapi
	vardb = trees[root]["vartree"].dbapi
	bindb = trees[root]["bintree"].dbapi

	world_file = os.path.join(mysettings['EROOT'], WORLD_FILE)
	world_list = grabfile(world_file)
	world_modified = False
	world_warnings = set()
	updpath_map = {}
	# Maps repo_name to list of updates. If a given repo has no updates
	# directory, it will be omitted. If a repo has an updates directory
	# but none need to be applied (according to timestamp logic), the
	# value in the dict will be an empty list.
	repo_map = {}
	timestamps = {}

	retupd = False
	update_notice_printed = False
	for repo_name in portdb.getRepositories():
		repo = portdb.getRepositoryPath(repo_name)
		updpath = os.path.join(repo, "profiles", "updates")
		if not os.path.isdir(updpath):
			continue

		if updpath in updpath_map:
			repo_map[repo_name] = updpath_map[updpath]
			continue

		try:
			if if_mtime_changed:
				update_data = grab_updates(updpath, prev_mtimes=prev_mtimes)
			else:
				update_data = grab_updates(updpath)
		except DirectoryNotFound:
			continue
		myupd = []
		updpath_map[updpath] = myupd
		repo_map[repo_name] = myupd
		if len(update_data) > 0:
			for mykey, mystat, mycontent in update_data:
				if not update_notice_printed:
					update_notice_printed = True
					writemsg_stdout("\n")
					if quiet:
						writemsg_stdout(colorize("GOOD",
							_("Performing Global Updates\n")))
						writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
					else:
						writemsg_stdout(colorize("GOOD",
							_("Performing Global Updates:\n")))
						writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
						writemsg_stdout(_("  %s='update pass'  %s='binary update'  "
							"%s='/var/db update'  %s='/var/db move'\n"
							"  %s='/var/db SLOT move'  %s='binary move'  "
							"%s='binary SLOT move'\n  %s='update /etc/portage/package.*'\n") % \
							(bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
				valid_updates, errors = parse_updates(mycontent)
				myupd.extend(valid_updates)
				if not quiet:
					writemsg_stdout(bold(mykey))
					writemsg_stdout(len(valid_updates) * "." + "\n")
				if len(errors) == 0:
					# Update our internal mtime since we
					# processed all of our directives.
					timestamps[mykey] = mystat[stat.ST_MTIME]
				else:
					for msg in errors:
						writemsg("%s\n" % msg, noiselevel=-1)
			if myupd:
				retupd = True

	if retupd:
		if os.access(bindb.bintree.pkgdir, os.W_OK):
			# Call binarytree.populate(), since we want to make sure it's
			# only populated with local packages here (getbinpkgs=0).
			bindb.bintree.populate()
		else:
			bindb = None

	master_repo = portdb.getRepositoryName(portdb.porttree_root)
	if master_repo in repo_map:
		repo_map['DEFAULT'] = repo_map[master_repo]

	for repo_name, myupd in repo_map.items():
			if repo_name == 'DEFAULT':
				continue
			if not myupd:
				continue

			def repo_match(repository):
				return repository == repo_name or \
					(repo_name == master_repo and repository not in repo_map)

			def _world_repo_match(atoma, atomb):
				"""
				Check whether to perform a world change from atoma to atomb.
				If best vardb match for atoma comes from the same repository
				as the update file, allow that. Additionally, if portdb still
				can find a match for old atom name, warn about that.
				"""
				matches = vardb.match(atoma)
				if not matches:
					matches = vardb.match(atomb)
				if matches and \
					repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
					if portdb.match(atoma):
						world_warnings.add((atoma, atomb))
					return True
				else:
					return False

			for update_cmd in myupd:
				for pos, atom in enumerate(world_list):
					new_atom = update_dbentry(update_cmd, atom)
					if atom != new_atom:
						if _world_repo_match(atom, new_atom):
							world_list[pos] = new_atom
							world_modified = True

			for update_cmd in myupd:
				if update_cmd[0] == "move":
					moves = vardb.move_ent(update_cmd, repo_match=repo_match)
					if moves:
						writemsg_stdout(moves * "@")
					if bindb:
						moves = bindb.move_ent(update_cmd, repo_match=repo_match)
						if moves:
							writemsg_stdout(moves * "%")
				elif update_cmd[0] == "slotmove":
					moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
					if moves:
						writemsg_stdout(moves * "s")
					if bindb:
						moves = bindb.move_slot_ent(update_cmd, repo_match=repo_match)
						if moves:
							writemsg_stdout(moves * "S")

	if world_modified:
		world_list.sort()
		write_atomic(world_file,
			"".join("%s\n" % (x,) for x in world_list))
		if world_warnings:
			# XXX: print warning that we've updated world entries
			# and the old name still matches something (from an overlay)?
			pass

	if retupd:

			def _config_repo_match(repo_name, atoma, atomb):
				"""
				Check whether to perform a world change from atoma to atomb.
				If best vardb match for atoma comes from the same repository
				as the update file, allow that. Additionally, if portdb still
				can find a match for old atom name, warn about that.
				"""
				matches = vardb.match(atoma)
				if not matches:
					matches = vardb.match(atomb)
					if not matches:
						return False
				repository = vardb.aux_get(best(matches), ['repository'])[0]
				return repository == repo_name or \
					(repo_name == master_repo and repository not in repo_map)

			update_config_files(root,
				shlex_split(mysettings.get("CONFIG_PROTECT", "")),
				shlex_split(mysettings.get("CONFIG_PROTECT_MASK", "")),
				repo_map, match_callback=_config_repo_match)

			# The above global updates proceed quickly, so they
			# are considered a single mtimedb transaction.
			if timestamps:
				# We do not update the mtime in the mtimedb
				# until after _all_ of the above updates have
				# been processed because the mtimedb will
				# automatically commit when killed by ctrl C.
				for mykey, mtime in timestamps.items():
					prev_mtimes[mykey] = mtime

			do_upgrade_packagesmessage = False
			# We gotta do the brute force updates for these now.
			if True:
				def onUpdate(maxval, curval):
					if curval > 0:
						writemsg_stdout("#")
				if quiet:
					onUpdate = None
				vardb.update_ents(repo_map, onUpdate=onUpdate)
				if bindb:
					def onUpdate(maxval, curval):
						if curval > 0:
							writemsg_stdout("*")
					if quiet:
						onUpdate = None
					bindb.update_ents(repo_map, onUpdate=onUpdate)
			else:
				do_upgrade_packagesmessage = 1

			# Update progress above is indicated by characters written to stdout so
			# we print a couple new lines here to separate the progress output from
			# what follows.
			writemsg_stdout("\n\n")

			if do_upgrade_packagesmessage and bindb and \
				bindb.cpv_all():
				writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
				writemsg_stdout(bold(_("Note: This can take a very long time.")))
				writemsg_stdout("\n")

	return retupd
コード例 #35
0
def digestgen(myarchives=None, mysettings=None, myportdb=None):
    """
	Generates a digest file if missing. Fetches files if necessary.
	NOTE: myarchives and mysettings used to be positional arguments,
		so their order must be preserved for backward compatibility.
	@param mysettings: the ebuild config (mysettings["O"] must correspond
		to the ebuild's parent directory)
	@type mysettings: config
	@param myportdb: a portdbapi instance
	@type myportdb: portdbapi
	@rtype: int
	@return: 1 on success and 0 on failure
	"""
    if mysettings is None or myportdb is None:
        raise TypeError("portage.digestgen(): 'mysettings' and 'myportdb' parameter are required.")

    try:
        portage._doebuild_manifest_exempt_depend += 1
        distfiles_map = {}
        fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
        for cpv in fetchlist_dict:
            try:
                for myfile in fetchlist_dict[cpv]:
                    distfiles_map.setdefault(myfile, []).append(cpv)
            except InvalidDependString as e:
                writemsg("!!! %s\n" % str(e), noiselevel=-1)
                del e
                return 0
        mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
        try:
            mf = mysettings.repositories.get_repo_for_location(mytree)
        except KeyError:
            # backward compatibility
            mytree = os.path.realpath(mytree)
            mf = mysettings.repositories.get_repo_for_location(mytree)

        mf = mf.load_manifest(mysettings["O"], mysettings["DISTDIR"], fetchlist_dict=fetchlist_dict)

        if not mf.allow_create:
            writemsg_stdout(
                _(">>> Skipping creating Manifest for %s; " "repository is configured to not use them\n")
                % mysettings["O"]
            )
            return 1

            # Don't require all hashes since that can trigger excessive
            # fetches when sufficient digests already exist.  To ease transition
            # while Manifest 1 is being removed, only require hashes that will
            # exist before and after the transition.
        required_hash_types = set()
        required_hash_types.add("size")
        required_hash_types.add(MANIFEST2_REQUIRED_HASH)
        dist_hashes = mf.fhashdict.get("DIST", {})

        # To avoid accidental regeneration of digests with the incorrect
        # files (such as partially downloaded files), trigger the fetch
        # code if the file exists and it's size doesn't match the current
        # manifest entry. If there really is a legitimate reason for the
        # digest to change, `ebuild --force digest` can be used to avoid
        # triggering this code (or else the old digests can be manually
        # removed from the Manifest).
        missing_files = []
        for myfile in distfiles_map:
            myhashes = dist_hashes.get(myfile)
            if not myhashes:
                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None
                if st is None or st.st_size == 0:
                    missing_files.append(myfile)
                continue
            size = myhashes.get("size")

            try:
                st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
            except OSError as e:
                if e.errno != errno.ENOENT:
                    raise
                del e
                if size == 0:
                    missing_files.append(myfile)
                    continue
                if required_hash_types.difference(myhashes):
                    missing_files.append(myfile)
                    continue
            else:
                if st.st_size == 0 or size is not None and size != st.st_size:
                    missing_files.append(myfile)
                    continue

        if missing_files:
            for myfile in missing_files:
                uris = set()
                all_restrict = set()
                for cpv in distfiles_map[myfile]:
                    uris.update(myportdb.getFetchMap(cpv, mytree=mytree)[myfile])
                    restrict = myportdb.aux_get(cpv, ["RESTRICT"], mytree=mytree)[0]
                    # Here we ignore conditional parts of RESTRICT since
                    # they don't apply unconditionally. Assume such
                    # conditionals only apply on the client side where
                    # digestgen() does not need to be called.
                    all_restrict.update(use_reduce(restrict, flat=True, matchnone=True))

                    # fetch() uses CATEGORY and PF to display a message
                    # when fetch restriction is triggered.
                    cat, pf = catsplit(cpv)
                    mysettings["CATEGORY"] = cat
                    mysettings["PF"] = pf

                    # fetch() uses PORTAGE_RESTRICT to control fetch
                    # restriction, which is only applied to files that
                    # are not fetchable via a mirror:// URI.
                mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)

                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None

                if not fetch({myfile: uris}, mysettings):
                    myebuild = os.path.join(mysettings["O"], catsplit(cpv)[1] + ".ebuild")
                    spawn_nofetch(myportdb, myebuild)
                    writemsg(_("!!! Fetch failed for %s, can't update " "Manifest\n") % myfile, noiselevel=-1)
                    if myfile in dist_hashes and st is not None and st.st_size > 0:
                        # stat result is obtained before calling fetch(),
                        # since fetch may rename the existing file if the
                        # digest does not match.
                        writemsg(
                            _(
                                "!!! If you would like to "
                                "forcefully replace the existing "
                                "Manifest entry\n!!! for %s, use "
                                "the following command:\n"
                            )
                            % myfile
                            + "!!!    "
                            + colorize("INFORM", "ebuild --force %s manifest" % os.path.basename(myebuild))
                            + "\n",
                            noiselevel=-1,
                        )
                    return 0
        writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
        try:
            mf.create(assumeDistHashesSometimes=True, assumeDistHashesAlways=("assume-digests" in mysettings.features))
        except FileNotFound as e:
            writemsg(_("!!! File %s doesn't exist, can't update " "Manifest\n") % e, noiselevel=-1)
            return 0
        except PortagePackageException as e:
            writemsg(("!!! %s\n") % (e,), noiselevel=-1)
            return 0
        try:
            mf.write(sign=False)
        except PermissionDenied as e:
            writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
            return 0
        if "assume-digests" not in mysettings.features:
            distlist = list(mf.fhashdict.get("DIST", {}))
            distlist.sort()
            auto_assumed = []
            for filename in distlist:
                if not os.path.exists(os.path.join(mysettings["DISTDIR"], filename)):
                    auto_assumed.append(filename)
            if auto_assumed:
                cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
                pkgs = myportdb.cp_list(cp, mytree=mytree)
                pkgs.sort()
                writemsg_stdout("  digest.assumed" + colorize("WARN", str(len(auto_assumed)).rjust(18)) + "\n")
                for pkg_key in pkgs:
                    fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
                    pv = pkg_key.split("/")[1]
                    for filename in auto_assumed:
                        if filename in fetchlist:
                            writemsg_stdout("   %s::%s\n" % (pv, filename))
        return 1
    finally:
        portage._doebuild_manifest_exempt_depend -= 1
コード例 #36
0
    def _iter_search(self):

        match_category = 0
        self.packagematches = []
        if self.searchdesc:
            self.searchdesc = 1
            self.matches = {"pkg": [], "desc": [], "set": []}
        else:
            self.searchdesc = 0
            self.matches = {"pkg": [], "set": []}
        writemsg_stdout("Searching...\n\n", noiselevel=-1)

        regexsearch = False
        if self.searchkey.startswith('%'):
            regexsearch = True
            self.searchkey = self.searchkey[1:]
        if self.searchkey.startswith('@'):
            match_category = 1
            self.searchkey = self.searchkey[1:]
        fuzzy = False
        if regexsearch:
            self.searchre = re.compile(self.searchkey, re.I)
        else:
            self.searchre = re.compile(re.escape(self.searchkey), re.I)

            # Fuzzy search does not support regular expressions, therefore
            # it is disabled for regular expression searches.
            if self.fuzzy:
                fuzzy = True
                cutoff = float(self.search_similarity) / 100
                if match_category:
                    # Weigh the similarity of category and package
                    # names independently, in order to avoid matching
                    # lots of irrelevant packages in the same category
                    # when the package name is much shorter than the
                    # category name.
                    part_split = portage.catsplit
                else:
                    part_split = lambda match_string: (match_string, )

                part_matchers = []
                for part in part_split(self.searchkey):
                    seq_match = difflib.SequenceMatcher()
                    seq_match.set_seq2(part.lower())
                    part_matchers.append(seq_match)

                def fuzzy_search_part(seq_match, match_string):
                    seq_match.set_seq1(match_string.lower())
                    return (seq_match.real_quick_ratio() >= cutoff
                            and seq_match.quick_ratio() >= cutoff
                            and seq_match.ratio() >= cutoff)

                def fuzzy_search(match_string):
                    return all(
                        fuzzy_search_part(seq_match, part)
                        for seq_match, part in zip(part_matchers,
                                                   part_split(match_string)))

        for package in self._cp_all():
            self._spinner_update()

            if match_category:
                match_string = package[:]
            else:
                match_string = package.split("/")[-1]

            if self.searchre.search(match_string):
                yield ("pkg", package)
            elif fuzzy and fuzzy_search(match_string):
                yield ("pkg", package)
            elif self.searchdesc:  # DESCRIPTION searching
                # Use _first_cp to avoid an expensive visibility check,
                # since the visibility check can be avoided entirely
                # when the DESCRIPTION does not match.
                full_package = self._first_cp(package)
                if not full_package:
                    continue
                try:
                    full_desc = self._aux_get(full_package, ["DESCRIPTION"])[0]
                except KeyError:
                    self._aux_get_error(full_package)
                    continue
                if not self.searchre.search(full_desc):
                    continue

                yield ("desc", package)

        self.sdict = self.setconfig.getSets()
        for setname in self.sdict:
            self._spinner_update()
            if match_category:
                match_string = setname
            else:
                match_string = setname.split("/")[-1]

            if self.searchre.search(match_string):
                yield ("set", setname)
            elif self.searchdesc:
                if self.searchre.search(
                        self.sdict[setname].getMetadata("DESCRIPTION")):
                    yield ("set", setname)
コード例 #37
0
 def onUpdate(_maxval, curval):
     if curval > 0:
         writemsg_stdout("*")
コード例 #38
0
def digestgen(myarchives=None, mysettings=None,
	overwrite=None, manifestonly=None, myportdb=None):
	"""
	Generates a digest file if missing. Fetches files if necessary.
	NOTE: myarchives and mysettings used to be positional arguments,
		so their order must be preserved for backward compatibility.
	@param mysettings: the ebuild config (mysettings["O"] must correspond
		to the ebuild's parent directory)
	@type mysettings: config
	@param myportdb: a portdbapi instance
	@type myportdb: portdbapi
	@rtype: int
	@returns: 1 on success and 0 on failure
	"""
	if mysettings is None:
		raise TypeError("portage.digestgen(): missing" + \
			" required 'mysettings' parameter")
	if myportdb is None:
		warnings.warn("portage.digestgen() called without 'myportdb' parameter",
			DeprecationWarning, stacklevel=2)
		myportdb = portage.portdb
	if overwrite is not None:
		warnings.warn("portage.digestgen() called with " + \
			"deprecated 'overwrite' parameter",
			DeprecationWarning, stacklevel=2)
	if manifestonly is not None:
		warnings.warn("portage.digestgen() called with " + \
			"deprecated 'manifestonly' parameter",
			DeprecationWarning, stacklevel=2)

	try:
		portage._doebuild_manifest_exempt_depend += 1
		distfiles_map = {}
		fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
		for cpv in fetchlist_dict:
			try:
				for myfile in fetchlist_dict[cpv]:
					distfiles_map.setdefault(myfile, []).append(cpv)
			except InvalidDependString as e:
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				del e
				return 0
		mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
		manifest1_compat = False
		mf = Manifest(mysettings["O"], mysettings["DISTDIR"],
			fetchlist_dict=fetchlist_dict, manifest1_compat=manifest1_compat)
		# Don't require all hashes since that can trigger excessive
		# fetches when sufficient digests already exist.  To ease transition
		# while Manifest 1 is being removed, only require hashes that will
		# exist before and after the transition.
		required_hash_types = set()
		required_hash_types.add("size")
		required_hash_types.add(MANIFEST2_REQUIRED_HASH)
		dist_hashes = mf.fhashdict.get("DIST", {})

		# To avoid accidental regeneration of digests with the incorrect
		# files (such as partially downloaded files), trigger the fetch
		# code if the file exists and it's size doesn't match the current
		# manifest entry. If there really is a legitimate reason for the
		# digest to change, `ebuild --force digest` can be used to avoid
		# triggering this code (or else the old digests can be manually
		# removed from the Manifest).
		missing_files = []
		for myfile in distfiles_map:
			myhashes = dist_hashes.get(myfile)
			if not myhashes:
				try:
					st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
				except OSError:
					st = None
				if st is None or st.st_size == 0:
					missing_files.append(myfile)
				continue
			size = myhashes.get("size")

			try:
				st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
			except OSError as e:
				if e.errno != errno.ENOENT:
					raise
				del e
				if size == 0:
					missing_files.append(myfile)
					continue
				if required_hash_types.difference(myhashes):
					missing_files.append(myfile)
					continue
			else:
				if st.st_size == 0 or size is not None and size != st.st_size:
					missing_files.append(myfile)
					continue

		if missing_files:
				mytree = os.path.realpath(os.path.dirname(
					os.path.dirname(mysettings["O"])))
				fetch_settings = config(clone=mysettings)
				debug = mysettings.get("PORTAGE_DEBUG") == "1"
				for myfile in missing_files:
					uris = set()
					for cpv in distfiles_map[myfile]:
						myebuild = os.path.join(mysettings["O"],
							catsplit(cpv)[1] + ".ebuild")
						# for RESTRICT=fetch, mirror, etc...
						doebuild_environment(myebuild, "fetch",
							mysettings["ROOT"], fetch_settings,
							debug, 1, myportdb)
						uris.update(myportdb.getFetchMap(
							cpv, mytree=mytree)[myfile])

					fetch_settings["A"] = myfile # for use by pkg_nofetch()

					try:
						st = os.stat(os.path.join(
							mysettings["DISTDIR"],myfile))
					except OSError:
						st = None

					if not fetch({myfile : uris}, fetch_settings):
						writemsg(_("!!! Fetch failed for %s, can't update "
							"Manifest\n") % myfile, noiselevel=-1)
						if myfile in dist_hashes and \
							st is not None and st.st_size > 0:
							# stat result is obtained before calling fetch(),
							# since fetch may rename the existing file if the
							# digest does not match.
							writemsg(_("!!! If you would like to "
								"forcefully replace the existing "
								"Manifest entry\n!!! for %s, use "
								"the following command:\n") % myfile + \
								"!!!    " + colorize("INFORM",
								"ebuild --force %s manifest" % \
								os.path.basename(myebuild)) + "\n",
								noiselevel=-1)
						return 0
		writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
		try:
			mf.create(assumeDistHashesSometimes=True,
				assumeDistHashesAlways=(
				"assume-digests" in mysettings.features))
		except FileNotFound as e:
			writemsg(_("!!! File %s doesn't exist, can't update "
				"Manifest\n") % e, noiselevel=-1)
			return 0
		except PortagePackageException as e:
			writemsg(("!!! %s\n") % (e,), noiselevel=-1)
			return 0
		try:
			mf.write(sign=False)
		except PermissionDenied as e:
			writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
			return 0
		if "assume-digests" not in mysettings.features:
			distlist = list(mf.fhashdict.get("DIST", {}))
			distlist.sort()
			auto_assumed = []
			for filename in distlist:
				if not os.path.exists(
					os.path.join(mysettings["DISTDIR"], filename)):
					auto_assumed.append(filename)
			if auto_assumed:
				mytree = os.path.realpath(
					os.path.dirname(os.path.dirname(mysettings["O"])))
				cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
				pkgs = myportdb.cp_list(cp, mytree=mytree)
				pkgs.sort()
				writemsg_stdout("  digest.assumed" + colorize("WARN",
					str(len(auto_assumed)).rjust(18)) + "\n")
				for pkg_key in pkgs:
					fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
					pv = pkg_key.split("/")[1]
					for filename in auto_assumed:
						if filename in fetchlist:
							writemsg_stdout(
								"   %s::%s\n" % (pv, filename))
		return 1
	finally:
		portage._doebuild_manifest_exempt_depend -= 1
コード例 #39
0
    def _iter_search(self):

        match_category = 0
        self.packagematches = []
        if self.searchdesc:
            self.searchdesc = 1
            self.matches = {"pkg": [], "desc": [], "set": []}
        else:
            self.searchdesc = 0
            self.matches = {"pkg": [], "set": []}
        writemsg_stdout("Searching...\n\n", noiselevel=-1)

        regexsearch = False
        if self.searchkey.startswith('%'):
            regexsearch = True
            self.searchkey = self.searchkey[1:]
        if self.searchkey.startswith('@'):
            match_category = 1
            self.searchkey = self.searchkey[1:]
        if regexsearch:
            self.searchre = re.compile(self.searchkey, re.I)
        else:
            self.searchre = re.compile(re.escape(self.searchkey), re.I)

        for package in self._cp_all():
            self._spinner_update()

            if match_category:
                match_string = package[:]
            else:
                match_string = package.split("/")[-1]

            if self.searchre.search(match_string):
                yield ("pkg", package)
            elif self.searchdesc:  # DESCRIPTION searching
                # Use _first_cp to avoid an expensive visibility check,
                # since the visibility check can be avoided entirely
                # when the DESCRIPTION does not match.
                full_package = self._first_cp(package)
                if not full_package:
                    continue
                try:
                    full_desc = self._aux_get(full_package, ["DESCRIPTION"])[0]
                except KeyError:
                    self._aux_get_error(full_package)
                    continue
                if not self.searchre.search(full_desc):
                    continue

                yield ("desc", package)

        self.sdict = self.setconfig.getSets()
        for setname in self.sdict:
            self._spinner_update()
            if match_category:
                match_string = setname
            else:
                match_string = setname.split("/")[-1]

            if self.searchre.search(match_string):
                yield ("set", setname)
            elif self.searchdesc:
                if self.searchre.search(
                        self.sdict[setname].getMetadata("DESCRIPTION")):
                    yield ("set", setname)
コード例 #40
0
ファイル: fetch.py プロジェクト: clickbeetle/portage-cb
def fetch(myuris, mysettings, listonly=0, fetchonly=0,
	locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
	allow_missing_digests=True):
	"fetch files.  Will use digest file if available."

	if not myuris:
		return 1

	features = mysettings.features
	restrict = mysettings.get("PORTAGE_RESTRICT","").split()

	userfetch = secpass >= 2 and "userfetch" in features
	userpriv = secpass >= 2 and "userpriv" in features

	# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
	restrict_mirror = "mirror" in restrict or "nomirror" in restrict
	if restrict_mirror:
		if ("mirror" in features) and ("lmirror" not in features):
			# lmirror should allow you to bypass mirror restrictions.
			# XXX: This is not a good thing, and is temporary at best.
			print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
			return 1

	# Generally, downloading the same file repeatedly from
	# every single available mirror is a waste of bandwidth
	# and time, so there needs to be a cap.
	checksum_failure_max_tries = 5
	v = checksum_failure_max_tries
	try:
		v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
			checksum_failure_max_tries))
	except (ValueError, OverflowError):
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains non-integer value: '%s'\n") % \
			mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	if v < 1:
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains value less than 1: '%s'\n") % v, noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	checksum_failure_max_tries = v
	del v

	fetch_resume_size_default = "350K"
	fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
	if fetch_resume_size is not None:
		fetch_resume_size = "".join(fetch_resume_size.split())
		if not fetch_resume_size:
			# If it's undefined or empty, silently use the default.
			fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
		if match is None or \
			(match.group(2).upper() not in _size_suffix_map):
			writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
				" contains an unrecognized format: '%s'\n") % \
				mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
			writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
				"default value: %s\n") % fetch_resume_size_default,
				noiselevel=-1)
			fetch_resume_size = None
	if fetch_resume_size is None:
		fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
	fetch_resume_size = int(match.group(1)) * \
		2 ** _size_suffix_map[match.group(2).upper()]

	# Behave like the package has RESTRICT="primaryuri" after a
	# couple of checksum failures, to increase the probablility
	# of success before checksum_failure_max_tries is reached.
	checksum_failure_primaryuri = 2
	thirdpartymirrors = mysettings.thirdpartymirrors()

	# In the background parallel-fetch process, it's safe to skip checksum
	# verification of pre-existing files in $DISTDIR that have the correct
	# file size. The parent process will verify their checksums prior to
	# the unpack phase.

	parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
	if parallel_fetchonly:
		fetchonly = 1

	check_config_instance(mysettings)

	custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
		CUSTOM_MIRRORS_FILE), recursive=1)

	mymirrors=[]

	if listonly or ("distlocks" not in features):
		use_locks = 0

	fetch_to_ro = 0
	if "skiprocheck" in features:
		fetch_to_ro = 1

	if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
		if use_locks:
			writemsg(colorize("BAD",
				_("!!! For fetching to a read-only filesystem, "
				"locking should be turned off.\n")), noiselevel=-1)
			writemsg(_("!!! This can be done by adding -distlocks to "
				"FEATURES in /etc/make.conf\n"), noiselevel=-1)
#			use_locks = 0

	# local mirrors are always added
	if "local" in custommirrors:
		mymirrors += custommirrors["local"]

	if restrict_mirror:
		# We don't add any mirrors.
		pass
	else:
		if try_mirrors:
			mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]

	hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
	if hash_filter.transparent:
		hash_filter = None
	skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
	if skip_manifest:
		allow_missing_digests = True
	pkgdir = mysettings.get("O")
	if digests is None and not (pkgdir is None or skip_manifest):
		mydigests = mysettings.repositories.get_repo_for_location(
			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
			pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
	elif digests is None or skip_manifest:
		# no digests because fetch was not called for a specific package
		mydigests = {}
	else:
		mydigests = digests

	ro_distdirs = [x for x in \
		shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
		if os.path.isdir(x)]

	fsmirrors = []
	for x in range(len(mymirrors)-1,-1,-1):
		if mymirrors[x] and mymirrors[x][0]=='/':
			fsmirrors += [mymirrors[x]]
			del mymirrors[x]

	restrict_fetch = "fetch" in restrict
	force_mirror = "force-mirror" in features and not restrict_mirror
	custom_local_mirrors = custommirrors.get("local", [])
	if restrict_fetch:
		# With fetch restriction, a normal uri may only be fetched from
		# custom local mirrors (if available).  A mirror:// uri may also
		# be fetched from specific mirrors (effectively overriding fetch
		# restriction, but only for specific mirrors).
		locations = custom_local_mirrors
	else:
		locations = mymirrors

	file_uri_tuples = []
	# Check for 'items' attribute since OrderedDict is not a dict.
	if hasattr(myuris, 'items'):
		for myfile, uri_set in myuris.items():
			for myuri in uri_set:
				file_uri_tuples.append((myfile, myuri))
	else:
		for myuri in myuris:
			file_uri_tuples.append((os.path.basename(myuri), myuri))

	filedict = OrderedDict()
	primaryuri_dict = {}
	thirdpartymirror_uris = {}
	for myfile, myuri in file_uri_tuples:
		if myfile not in filedict:
			filedict[myfile]=[]
			for y in range(0,len(locations)):
				filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
		if myuri[:9]=="mirror://":
			eidx = myuri.find("/", 9)
			if eidx != -1:
				mirrorname = myuri[9:eidx]
				path = myuri[eidx+1:]

				# Try user-defined mirrors first
				if mirrorname in custommirrors:
					for cmirr in custommirrors[mirrorname]:
						filedict[myfile].append(
							cmirr.rstrip("/") + "/" + path)

				# now try the official mirrors
				if mirrorname in thirdpartymirrors:
					uris = [locmirr.rstrip("/") + "/" + path \
						for locmirr in thirdpartymirrors[mirrorname]]
					random.shuffle(uris)
					filedict[myfile].extend(uris)
					thirdpartymirror_uris.setdefault(myfile, []).extend(uris)

				if not filedict[myfile]:
					writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
			else:
				writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
				writemsg("  %s\n" % (myuri), noiselevel=-1)
		else:
			if restrict_fetch or force_mirror:
				# Only fetch from specific mirrors is allowed.
				continue
			primaryuris = primaryuri_dict.get(myfile)
			if primaryuris is None:
				primaryuris = []
				primaryuri_dict[myfile] = primaryuris
			primaryuris.append(myuri)

	# Order primaryuri_dict values to match that in SRC_URI.
	for uris in primaryuri_dict.values():
		uris.reverse()

	# Prefer thirdpartymirrors over normal mirrors in cases when
	# the file does not yet exist on the normal mirrors.
	for myfile, uris in thirdpartymirror_uris.items():
		primaryuri_dict.setdefault(myfile, []).extend(uris)

	# Now merge primaryuri values into filedict (includes mirrors
	# explicitly referenced in SRC_URI).
	if "primaryuri" in restrict:
		for myfile, uris in filedict.items():
			filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
	else:
		for myfile in filedict:
			filedict[myfile] += primaryuri_dict.get(myfile, [])

	can_fetch=True

	if listonly:
		can_fetch = False

	if can_fetch and not fetch_to_ro:
		global _userpriv_test_write_file_cache
		dirmode  = 0o070
		filemode =   0o60
		modemask =    0o2
		dir_gid = portage_gid
		if "FAKED_MODE" in mysettings:
			# When inside fakeroot, directories with portage's gid appear
			# to have root's gid. Therefore, use root's gid instead of
			# portage's gid to avoid spurrious permissions adjustments
			# when inside fakeroot.
			dir_gid = 0
		distdir_dirs = [""]
		try:
			
			for x in distdir_dirs:
				mydir = os.path.join(mysettings["DISTDIR"], x)
				write_test_file = os.path.join(
					mydir, ".__portage_test_write__")

				try:
					st = os.stat(mydir)
				except OSError:
					st = None

				if st is not None and stat.S_ISDIR(st.st_mode):
					if not (userfetch or userpriv):
						continue
					if _userpriv_test_write_file(mysettings, write_test_file):
						continue

				_userpriv_test_write_file_cache.pop(write_test_file, None)
				if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
					if st is None:
						# The directory has just been created
						# and therefore it must be empty.
						continue
					writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
						noiselevel=-1)
					def onerror(e):
						raise # bail out on the first error that occurs during recursion
					if not apply_recursive_permissions(mydir,
						gid=dir_gid, dirmode=dirmode, dirmask=modemask,
						filemode=filemode, filemask=modemask, onerror=onerror):
						raise OperationNotPermitted(
							_("Failed to apply recursive permissions for the portage group."))
		except PortageException as e:
			if not os.path.isdir(mysettings["DISTDIR"]):
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
				writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)

	if can_fetch and \
		not fetch_to_ro and \
		not os.access(mysettings["DISTDIR"], os.W_OK):
		writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
			noiselevel=-1)
		can_fetch = False

	distdir_writable = can_fetch and not fetch_to_ro
	failed_files = set()
	restrict_fetch_msg = False

	for myfile in filedict:
		"""
		fetched  status
		0        nonexistent
		1        partially downloaded
		2        completely downloaded
		"""
		fetched = 0

		orig_digests = mydigests.get(myfile, {})

		if not (allow_missing_digests or listonly):
			verifiable_hash_types = set(orig_digests).intersection(hashfunc_map)
			verifiable_hash_types.discard("size")
			if not verifiable_hash_types:
				expected = set(hashfunc_map)
				expected.discard("size")
				expected = " ".join(sorted(expected))
				got = set(orig_digests)
				got.discard("size")
				got = " ".join(sorted(got))
				reason = (_("Insufficient data for checksum verification"),
					got, expected)
				writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
					noiselevel=-1)
				writemsg(_("!!! Reason: %s\n") % reason[0],
					noiselevel=-1)
				writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
					(reason[1], reason[2]), noiselevel=-1)

				if fetchonly:
					failed_files.add(myfile)
					continue
				else:
					return 0

		size = orig_digests.get("size")
		if size == 0:
			# Zero-byte distfiles are always invalid, so discard their digests.
			del mydigests[myfile]
			orig_digests.clear()
			size = None
		pruned_digests = orig_digests
		if parallel_fetchonly:
			pruned_digests = {}
			if size is not None:
				pruned_digests["size"] = size

		myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
		has_space = True
		has_space_superuser = True
		file_lock = None
		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		else:
			# check if there is enough space in DISTDIR to completely store myfile
			# overestimate the filesize so we aren't bitten by FS overhead
			vfs_stat = None
			if size is not None and hasattr(os, "statvfs"):
				try:
					vfs_stat = os.statvfs(mysettings["DISTDIR"])
				except OSError as e:
					writemsg_level("!!! statvfs('%s'): %s\n" %
						(mysettings["DISTDIR"], e),
						noiselevel=-1, level=logging.ERROR)
					del e

			if vfs_stat is not None:
				try:
					mysize = os.stat(myfile_path).st_size
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
					mysize = 0
				if (size - mysize + vfs_stat.f_bsize) >= \
					(vfs_stat.f_bsize * vfs_stat.f_bavail):

					if (size - mysize + vfs_stat.f_bsize) >= \
						(vfs_stat.f_bsize * vfs_stat.f_bfree):
						has_space_superuser = False

					if not has_space_superuser:
						has_space = False
					elif secpass < 2:
						has_space = False
					elif userfetch:
						has_space = False

			if distdir_writable and use_locks:

				lock_kwargs = {}
				if fetchonly:
					lock_kwargs["flags"] = os.O_NONBLOCK

				try:
					file_lock = lockfile(myfile_path,
						wantnewlockfile=1, **lock_kwargs)
				except TryAgain:
					writemsg(_(">>> File '%s' is already locked by "
						"another fetcher. Continuing...\n") % myfile,
						noiselevel=-1)
					continue
		try:
			if not listonly:

				eout = EOutput()
				eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
				match, mystat = _check_distfile(
					myfile_path, pruned_digests, eout, hash_filter=hash_filter)
				if match:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if distdir_writable and not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e
					continue

				if distdir_writable and mystat is None:
					# Remove broken symlinks if necessary.
					try:
						os.unlink(myfile_path)
					except OSError:
						pass

				if mystat is not None:
					if stat.S_ISDIR(mystat.st_mode):
						writemsg_level(
							_("!!! Unable to fetch file since "
							"a directory is in the way: \n"
							"!!!   %s\n") % myfile_path,
							level=logging.ERROR, noiselevel=-1)
						return 0

					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(myfile_path)
							except OSError:
								pass
					elif distdir_writable:
						if mystat.st_size < fetch_resume_size and \
							mystat.st_size < size:
							# If the file already exists and the size does not
							# match the existing digests, it may be that the
							# user is attempting to update the digest. In this
							# case, the digestgen() function will advise the
							# user to use `ebuild --force foo.ebuild manifest`
							# in order to force the old digests to be replaced.
							# Since the user may want to keep this file, rename
							# it instead of deleting it.
							writemsg(_(">>> Renaming distfile with size "
								"%d (smaller than " "PORTAGE_FETCH_RESU"
								"ME_MIN_SIZE)\n") % mystat.st_size)
							temp_filename = \
								_checksum_failure_temp_file(
								mysettings["DISTDIR"], myfile)
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)
						elif mystat.st_size >= size:
							temp_filename = \
								_checksum_failure_temp_file(
								mysettings["DISTDIR"], myfile)
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)

				if distdir_writable and ro_distdirs:
					readonly_file = None
					for x in ro_distdirs:
						filename = os.path.join(x, myfile)
						match, mystat = _check_distfile(
							filename, pruned_digests, eout, hash_filter=hash_filter)
						if match:
							readonly_file = filename
							break
					if readonly_file is not None:
						try:
							os.unlink(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
						os.symlink(readonly_file, myfile_path)
						continue

				# this message is shown only after we know that
				# the file is not already fetched
				if not has_space:
					writemsg(_("!!! Insufficient space to store %s in %s\n") % \
						(myfile, mysettings["DISTDIR"]), noiselevel=-1)

					if has_space_superuser:
						writemsg(_("!!! Insufficient privileges to use "
							"remaining space.\n"), noiselevel=-1)
						if userfetch:
							writemsg(_("!!! You may set FEATURES=\"-userfetch\""
								" in /etc/make.conf in order to fetch with\n"
								"!!! superuser privileges.\n"), noiselevel=-1)

				if fsmirrors and not os.path.exists(myfile_path) and has_space:
					for mydir in fsmirrors:
						mirror_file = os.path.join(mydir, myfile)
						try:
							shutil.copyfile(mirror_file, myfile_path)
							writemsg(_("Local mirror has file: %s\n") % myfile)
							break
						except (IOError, OSError) as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e

				try:
					mystat = os.stat(myfile_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
				else:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % (e,), noiselevel=-1)

					# If the file is empty then it's obviously invalid. Remove
					# the empty file and try to download if possible.
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(myfile_path)
							except EnvironmentError:
								pass
					elif myfile not in mydigests:
						# We don't have a digest, but the file exists.  We must
						# assume that it is fully downloaded.
						continue
					else:
						if mystat.st_size < mydigests[myfile]["size"] and \
							not restrict_fetch:
							fetched = 1 # Try to resume this download.
						elif parallel_fetchonly and \
							mystat.st_size == mydigests[myfile]["size"]:
							eout = EOutput()
							eout.quiet = \
								mysettings.get("PORTAGE_QUIET") == "1"
							eout.ebegin(
								"%s size ;-)" % (myfile, ))
							eout.eend(0)
							continue
						else:
							digests = _filter_unaccelarated_hashes(mydigests[myfile])
							if hash_filter is not None:
								digests = _apply_hash_filter(digests, hash_filter)
							verified_ok, reason = verify_all(myfile_path, digests)
							if not verified_ok:
								writemsg(_("!!! Previously fetched"
									" file: '%s'\n") % myfile, noiselevel=-1)
								writemsg(_("!!! Reason: %s\n") % reason[0],
									noiselevel=-1)
								writemsg(_("!!! Got:      %s\n"
									"!!! Expected: %s\n") % \
									(reason[1], reason[2]), noiselevel=-1)
								if reason[0] == _("Insufficient data for checksum verification"):
									return 0
								if distdir_writable:
									temp_filename = \
										_checksum_failure_temp_file(
										mysettings["DISTDIR"], myfile)
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
							else:
								eout = EOutput()
								eout.quiet = \
									mysettings.get("PORTAGE_QUIET", None) == "1"
								if digests:
									digests = list(digests)
									digests.sort()
									eout.ebegin(
										"%s %s ;-)" % (myfile, " ".join(digests)))
									eout.eend(0)
								continue # fetch any remaining files

			# Create a reversed list since that is optimal for list.pop().
			uri_list = filedict[myfile][:]
			uri_list.reverse()
			checksum_failure_count = 0
			tried_locations = set()
			while uri_list:
				loc = uri_list.pop()
				# Eliminate duplicates here in case we've switched to
				# "primaryuri" mode on the fly due to a checksum failure.
				if loc in tried_locations:
					continue
				tried_locations.add(loc)
				if listonly:
					writemsg_stdout(loc+" ", noiselevel=-1)
					continue
				# allow different fetchcommands per protocol
				protocol = loc[0:loc.find("://")]

				global_config_path = GLOBAL_CONFIG_PATH
				if mysettings['EPREFIX']:
					global_config_path = os.path.join(mysettings['EPREFIX'],
							GLOBAL_CONFIG_PATH.lstrip(os.sep))

				missing_file_param = False
				fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
				fetchcommand = mysettings.get(fetchcommand_var)
				if fetchcommand is None:
					fetchcommand_var = "FETCHCOMMAND"
					fetchcommand = mysettings.get(fetchcommand_var)
					if fetchcommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (fetchcommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in fetchcommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % fetchcommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
				resumecommand = mysettings.get(resumecommand_var)
				if resumecommand is None:
					resumecommand_var = "RESUMECOMMAND"
					resumecommand = mysettings.get(resumecommand_var)
					if resumecommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (resumecommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in resumecommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % resumecommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				if missing_file_param:
					writemsg_level(
						_("!!! Refer to the make.conf(5) man page for "
						"information about how to\n!!! correctly specify "
						"FETCHCOMMAND and RESUMECOMMAND.\n"),
						level=logging.ERROR, noiselevel=-1)
					if myfile != os.path.basename(loc):
						return 0

				if not can_fetch:
					if fetched != 2:
						try:
							mysize = os.stat(myfile_path).st_size
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							mysize = 0

						if mysize == 0:
							writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
								noiselevel=-1)
						elif size is None or size > mysize:
							writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
								noiselevel=-1)
						else:
							writemsg(_("!!! File %s is incorrect size, "
								"but unable to retry.\n") % myfile, noiselevel=-1)
						return 0
					else:
						continue

				if fetched != 2 and has_space:
					#we either need to resume or start the download
					if fetched == 1:
						try:
							mystat = os.stat(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:
							if mystat.st_size < fetch_resume_size:
								writemsg(_(">>> Deleting distfile with size "
									"%d (smaller than " "PORTAGE_FETCH_RESU"
									"ME_MIN_SIZE)\n") % mystat.st_size)
								try:
									os.unlink(myfile_path)
								except OSError as e:
									if e.errno not in \
										(errno.ENOENT, errno.ESTALE):
										raise
									del e
								fetched = 0
					if fetched == 1:
						#resume mode:
						writemsg(_(">>> Resuming download...\n"))
						locfetch=resumecommand
						command_var = resumecommand_var
					else:
						#normal mode:
						locfetch=fetchcommand
						command_var = fetchcommand_var
					writemsg_stdout(_(">>> Downloading '%s'\n") % \
						_hide_url_passwd(loc))
					variables = {
						"DISTDIR": mysettings["DISTDIR"],
						"URI":     loc,
						"FILE":    myfile
					}

					myfetch = shlex_split(locfetch)
					myfetch = [varexpand(x, mydict=variables) for x in myfetch]
					myret = -1
					try:

						myret = _spawn_fetch(mysettings, myfetch)

					finally:
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2)
						except FileNotFound:
							pass
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e

					# If the file is empty then it's obviously invalid.  Don't
					# trust the return value from the fetcher.  Remove the
					# empty file and try to download again.
					try:
						if os.stat(myfile_path).st_size == 0:
							os.unlink(myfile_path)
							fetched = 0
							continue
					except EnvironmentError:
						pass

					if mydigests is not None and myfile in mydigests:
						try:
							mystat = os.stat(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:

							if stat.S_ISDIR(mystat.st_mode):
								# This can happen if FETCHCOMMAND erroneously
								# contains wget's -P option where it should
								# instead have -O.
								writemsg_level(
									_("!!! The command specified in the "
									"%s variable appears to have\n!!! "
									"created a directory instead of a "
									"normal file.\n") % command_var,
									level=logging.ERROR, noiselevel=-1)
								writemsg_level(
									_("!!! Refer to the make.conf(5) "
									"man page for information about how "
									"to\n!!! correctly specify "
									"FETCHCOMMAND and RESUMECOMMAND.\n"),
									level=logging.ERROR, noiselevel=-1)
								return 0

							# no exception?  file exists. let digestcheck() report
							# an appropriately for size or checksum errors

							# If the fetcher reported success and the file is
							# too small, it's probably because the digest is
							# bad (upstream changed the distfile).  In this
							# case we don't want to attempt to resume. Show a
							# digest verification failure to that the user gets
							# a clue about what just happened.
							if myret != os.EX_OK and \
								mystat.st_size < mydigests[myfile]["size"]:
								# Fetch failed... Try the next one... Kill 404 files though.
								if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
									html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
									with io.open(
										_unicode_encode(myfile_path,
										encoding=_encodings['fs'], errors='strict'),
										mode='r', encoding=_encodings['content'], errors='replace'
										) as f:
										if html404.search(f.read()):
											try:
												os.unlink(mysettings["DISTDIR"]+"/"+myfile)
												writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
												fetched = 0
												continue
											except (IOError, OSError):
												pass
								fetched = 1
								continue
							if True:
								# File is the correct size--check the checksums for the fetched
								# file NOW, for those users who don't have a stable/continuous
								# net connection. This way we have a chance to try to download
								# from another mirror...
								digests = _filter_unaccelarated_hashes(mydigests[myfile])
								if hash_filter is not None:
									digests = _apply_hash_filter(digests, hash_filter)
								verified_ok, reason = verify_all(myfile_path, digests)
								if not verified_ok:
									writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
										noiselevel=-1)
									writemsg(_("!!! Reason: %s\n") % reason[0],
										noiselevel=-1)
									writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
										(reason[1], reason[2]), noiselevel=-1)
									if reason[0] == _("Insufficient data for checksum verification"):
										return 0
									temp_filename = \
										_checksum_failure_temp_file(
										mysettings["DISTDIR"], myfile)
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
									fetched=0
									checksum_failure_count += 1
									if checksum_failure_count == \
										checksum_failure_primaryuri:
										# Switch to "primaryuri" mode in order
										# to increase the probablility of
										# of success.
										primaryuris = \
											primaryuri_dict.get(myfile)
										if primaryuris:
											uri_list.extend(
												reversed(primaryuris))
									if checksum_failure_count >= \
										checksum_failure_max_tries:
										break
								else:
									eout = EOutput()
									eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
									if digests:
										eout.ebegin("%s %s ;-)" % \
											(myfile, " ".join(sorted(digests))))
										eout.eend(0)
									fetched=2
									break
					else:
						if not myret:
							fetched=2
							break
						elif mydigests!=None:
							writemsg(_("No digest file available and download failed.\n\n"),
								noiselevel=-1)
		finally:
			if use_locks and file_lock:
				unlockfile(file_lock)
				file_lock = None

		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		if fetched != 2:
			if restrict_fetch and not restrict_fetch_msg:
				restrict_fetch_msg = True
				msg = _("\n!!! %s/%s"
					" has fetch restriction turned on.\n"
					"!!! This probably means that this "
					"ebuild's files must be downloaded\n"
					"!!! manually.  See the comments in"
					" the ebuild for more information.\n\n") % \
					(mysettings["CATEGORY"], mysettings["PF"])
				writemsg_level(msg,
					level=logging.ERROR, noiselevel=-1)
			elif restrict_fetch:
				pass
			elif listonly:
				pass
			elif not filedict[myfile]:
				writemsg(_("Warning: No mirrors available for file"
					" '%s'\n") % (myfile), noiselevel=-1)
			else:
				writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
					noiselevel=-1)

			if listonly:
				failed_files.add(myfile)
				continue
			elif fetchonly:
				failed_files.add(myfile)
				continue
			return 0
	if failed_files:
		return 0
	return 1
コード例 #41
0
ファイル: copyrights.py プロジェクト: dol-sen/portage-1
def update_copyright(fn_path,
                     year,
                     pretend=False,
                     owner=None,
                     update_owner=False,
                     add_copyright=False):
    """
	Check file for a Copyright statement, and update its year.  The
	patterns used for replacing copyrights are taken from echangelog.
	Only the first lines of each file that start with a hash ('#') are
	considered, until a line is found that doesn't start with a hash.
	Files are read and written in binary mode, so that this function
	will work correctly with files encoded in any character set, as
	long as the copyright statements consist of plain ASCII.

	@param fn_path: file path
	@type str
	@param year: current year
	@type str
	@param pretend: pretend mode
	@type bool
	@rtype: bool
	@return: True if copyright update was needed, False otherwise
	"""

    try:
        fn_hdl = io.open(_unicode_encode(fn_path,
                                         encoding=_encodings['fs'],
                                         errors='strict'),
                         mode='rb')
    except EnvironmentError:
        return

    owner = _unicode_encode(owner) or b'Gentoo Authors'

    orig_header = []
    new_header = []
    has_copyright = False

    for line in fn_hdl:
        line_strip = line.strip()
        orig_header.append(line)
        if not line_strip or line_strip[:1] != b'#':
            new_header.append(line)
            break
        has_copyright = max(has_copyright, line.startswith(b'# Copyright '))
        # update date range
        line = update_copyright_year(year, line)
        # now check for and add COPYRIGHT_OWNER
        if update_owner and owner not in line:
            line = add_owner(owner, line)
        new_header.append(line)
    if not has_copyright and add_copyright:
        new_copyright = b' '.join(
            [b'# Copyright', _unicode_encode(year), owner]) + b'\n'
        new_header.insert(0, new_copyright)

    difflines = 0
    for diffline in difflib.unified_diff(
        [_unicode_decode(diffline) for diffline in orig_header],
        [_unicode_decode(diffline) for diffline in new_header],
            fromfile=fn_path,
            tofile=fn_path,
            n=0):
        util.writemsg_stdout(diffline, noiselevel=-1)
        difflines += 1
    util.writemsg_stdout("\n", noiselevel=-1)

    # unified diff has three lines to start with
    if difflines > 3 and not pretend:
        # write new file with changed header
        f, fnnew_path = mkstemp()
        f = io.open(f, mode='wb')
        for line in new_header:
            f.write(line)
        for line in fn_hdl:
            f.write(line)
        f.close()
        try:
            fn_stat = os.stat(fn_path)
        except OSError:
            fn_stat = None

        shutil.move(fnnew_path, fn_path)

        if fn_stat is None:
            util.apply_permissions(fn_path, mode=0o644)
        else:
            util.apply_stat_permissions(fn_path, fn_stat)
    fn_hdl.close()
    return difflines > 3
コード例 #42
0
ファイル: search.py プロジェクト: Neuvoo/legacy-portage
	def output(self):
		"""Outputs the results of the search."""
		msg = []
		msg.append("\b\b  \n[ Results for search key : " + \
			bold(self.searchkey) + " ]\n")
		msg.append("[ Applications found : " + \
			bold(str(self.mlen)) + " ]\n\n")
		vardb = self.vartree.dbapi
		for mtype in self.matches:
			for match,masked in self.matches[mtype]:
				full_package = None
				if mtype == "pkg":
					catpack = match
					full_package = self.portdb.xmatch(
						"bestmatch-visible", match)
					if not full_package:
						#no match found; we don't want to query description
						masked=1
						full_package = portage.best(
							self.portdb.xmatch("match-all",match))
				elif mtype == "desc":
					full_package = match
					match        = portage.cpv_getkey(match)
				elif mtype == "set":
					msg.append(green("*") + "  " + bold(match) + "\n")
					if self.verbose:
						msg.append("      " + darkgreen("Description:") + \
							"   " + \
							self.sdict[match].getMetadata("DESCRIPTION") \
							+ "\n\n")
					writemsg_stdout(''.join(msg), noiselevel=-1)
				if full_package:
					try:
						desc, homepage, license = self.portdb.aux_get(
							full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
					except KeyError:
						msg.append("emerge: search: aux_get() failed, skipping\n")
						continue
					if masked:
						msg.append(green("*") + "  " + \
							white(match) + " " + red("[ Masked ]") + "\n")
					else:
						msg.append(green("*") + "  " + bold(match) + "\n")
					myversion = self.getVersion(full_package, search.VERSION_RELEASE)

					mysum = [0,0]
					file_size_str = None
					mycat = match.split("/")[0]
					mypkg = match.split("/")[1]
					mycpv = match + "-" + myversion
					myebuild = self.portdb.findname(mycpv)
					if myebuild:
						pkgdir = os.path.dirname(myebuild)
						from portage import manifest
						mf = manifest.Manifest(
							pkgdir, self.settings["DISTDIR"])
						try:
							uri_map = self.portdb.getFetchMap(mycpv)
						except portage.exception.InvalidDependString as e:
							file_size_str = "Unknown (%s)" % (e,)
							del e
						else:
							try:
								mysum[0] = mf.getDistfilesSize(uri_map)
							except KeyError as e:
								file_size_str = "Unknown (missing " + \
									"digest for %s)" % (e,)
								del e

					available = False
					for db in self._dbs:
						if db is not vardb and \
							db.cpv_exists(mycpv):
							available = True
							if not myebuild and hasattr(db, "bintree"):
								myebuild = db.bintree.getname(mycpv)
								try:
									mysum[0] = os.stat(myebuild).st_size
								except OSError:
									myebuild = None
							break

					if myebuild and file_size_str is None:
						mystr = str(mysum[0] // 1024)
						mycount = len(mystr)
						while (mycount > 3):
							mycount -= 3
							mystr = mystr[:mycount] + "," + mystr[mycount:]
						file_size_str = mystr + " kB"

					if self.verbose:
						if available:
							msg.append("      %s %s\n" % \
								(darkgreen("Latest version available:"),
								myversion))
						msg.append("      %s\n" % \
							self.getInstallationStatus(mycat+'/'+mypkg))
						if myebuild:
							msg.append("      %s %s\n" % \
								(darkgreen("Size of files:"), file_size_str))
						msg.append("      " + darkgreen("Homepage:") + \
							"      " + homepage + "\n")
						msg.append("      " + darkgreen("Description:") \
							+ "   " + desc + "\n")
						msg.append("      " + darkgreen("License:") + \
							"       " + license + "\n\n")
		writemsg_stdout(''.join(msg), noiselevel=-1)
コード例 #43
0
ファイル: output.py プロジェクト: antarus12345/portage
    def print_changelog(self):
        """Prints the changelog text to std_out
		"""
        for chunk in self.changelogs:
            writemsg_stdout(chunk, noiselevel=-1)
コード例 #44
0
def digestgen(myarchives=None,
              mysettings=None,
              overwrite=None,
              manifestonly=None,
              myportdb=None):
    """
	Generates a digest file if missing. Fetches files if necessary.
	NOTE: myarchives and mysettings used to be positional arguments,
		so their order must be preserved for backward compatibility.
	@param mysettings: the ebuild config (mysettings["O"] must correspond
		to the ebuild's parent directory)
	@type mysettings: config
	@param myportdb: a portdbapi instance
	@type myportdb: portdbapi
	@rtype: int
	@returns: 1 on success and 0 on failure
	"""
    if mysettings is None:
        raise TypeError("portage.digestgen(): missing" + \
         " required 'mysettings' parameter")
    if myportdb is None:
        warnings.warn(
            "portage.digestgen() called without 'myportdb' parameter",
            DeprecationWarning,
            stacklevel=2)
        myportdb = portage.portdb
    if overwrite is not None:
        warnings.warn("portage.digestgen() called with " + \
         "deprecated 'overwrite' parameter",
         DeprecationWarning, stacklevel=2)
    if manifestonly is not None:
        warnings.warn("portage.digestgen() called with " + \
         "deprecated 'manifestonly' parameter",
         DeprecationWarning, stacklevel=2)

    try:
        portage._doebuild_manifest_exempt_depend += 1
        distfiles_map = {}
        fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
        for cpv in fetchlist_dict:
            try:
                for myfile in fetchlist_dict[cpv]:
                    distfiles_map.setdefault(myfile, []).append(cpv)
            except InvalidDependString as e:
                writemsg("!!! %s\n" % str(e), noiselevel=-1)
                del e
                return 0
        mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
        manifest1_compat = False
        mf = Manifest(mysettings["O"],
                      mysettings["DISTDIR"],
                      fetchlist_dict=fetchlist_dict,
                      manifest1_compat=manifest1_compat)
        # Don't require all hashes since that can trigger excessive
        # fetches when sufficient digests already exist.  To ease transition
        # while Manifest 1 is being removed, only require hashes that will
        # exist before and after the transition.
        required_hash_types = set()
        required_hash_types.add("size")
        required_hash_types.add(MANIFEST2_REQUIRED_HASH)
        dist_hashes = mf.fhashdict.get("DIST", {})

        # To avoid accidental regeneration of digests with the incorrect
        # files (such as partially downloaded files), trigger the fetch
        # code if the file exists and it's size doesn't match the current
        # manifest entry. If there really is a legitimate reason for the
        # digest to change, `ebuild --force digest` can be used to avoid
        # triggering this code (or else the old digests can be manually
        # removed from the Manifest).
        missing_files = []
        for myfile in distfiles_map:
            myhashes = dist_hashes.get(myfile)
            if not myhashes:
                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None
                if st is None or st.st_size == 0:
                    missing_files.append(myfile)
                continue
            size = myhashes.get("size")

            try:
                st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
            except OSError as e:
                if e.errno != errno.ENOENT:
                    raise
                del e
                if size == 0:
                    missing_files.append(myfile)
                    continue
                if required_hash_types.difference(myhashes):
                    missing_files.append(myfile)
                    continue
            else:
                if st.st_size == 0 or size is not None and size != st.st_size:
                    missing_files.append(myfile)
                    continue

        if missing_files:
            mytree = os.path.realpath(
                os.path.dirname(os.path.dirname(mysettings["O"])))
            fetch_settings = config(clone=mysettings)
            debug = mysettings.get("PORTAGE_DEBUG") == "1"
            for myfile in missing_files:
                uris = set()
                for cpv in distfiles_map[myfile]:
                    myebuild = os.path.join(mysettings["O"],
                                            catsplit(cpv)[1] + ".ebuild")
                    # for RESTRICT=fetch, mirror, etc...
                    doebuild_environment(myebuild, "fetch", mysettings["ROOT"],
                                         fetch_settings, debug, 1, myportdb)
                    uris.update(
                        myportdb.getFetchMap(cpv, mytree=mytree)[myfile])

                fetch_settings["A"] = myfile  # for use by pkg_nofetch()

                try:
                    st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
                except OSError:
                    st = None

                if not fetch({myfile: uris}, fetch_settings):
                    writemsg(_("!!! Fetch failed for %s, can't update "
                               "Manifest\n") % myfile,
                             noiselevel=-1)
                    if myfile in dist_hashes and \
                     st is not None and st.st_size > 0:
                        # stat result is obtained before calling fetch(),
                        # since fetch may rename the existing file if the
                        # digest does not match.
                        writemsg(_("!!! If you would like to "
                         "forcefully replace the existing "
                         "Manifest entry\n!!! for %s, use "
                         "the following command:\n") % myfile + \
                         "!!!    " + colorize("INFORM",
                         "ebuild --force %s manifest" % \
                         os.path.basename(myebuild)) + "\n",
                         noiselevel=-1)
                    return 0
        writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
        try:
            mf.create(assumeDistHashesSometimes=True,
                      assumeDistHashesAlways=("assume-digests"
                                              in mysettings.features))
        except FileNotFound as e:
            writemsg(_("!!! File %s doesn't exist, can't update "
                       "Manifest\n") % e,
                     noiselevel=-1)
            return 0
        except PortagePackageException as e:
            writemsg(("!!! %s\n") % (e, ), noiselevel=-1)
            return 0
        try:
            mf.write(sign=False)
        except PermissionDenied as e:
            writemsg(_("!!! Permission Denied: %s\n") % (e, ), noiselevel=-1)
            return 0
        if "assume-digests" not in mysettings.features:
            distlist = list(mf.fhashdict.get("DIST", {}))
            distlist.sort()
            auto_assumed = []
            for filename in distlist:
                if not os.path.exists(
                        os.path.join(mysettings["DISTDIR"], filename)):
                    auto_assumed.append(filename)
            if auto_assumed:
                mytree = os.path.realpath(
                    os.path.dirname(os.path.dirname(mysettings["O"])))
                cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
                pkgs = myportdb.cp_list(cp, mytree=mytree)
                pkgs.sort()
                writemsg_stdout("  digest.assumed" +
                                colorize("WARN",
                                         str(len(auto_assumed)).rjust(18)) +
                                "\n")
                for pkg_key in pkgs:
                    fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
                    pv = pkg_key.split("/")[1]
                    for filename in auto_assumed:
                        if filename in fetchlist:
                            writemsg_stdout("   %s::%s\n" % (pv, filename))
        return 1
    finally:
        portage._doebuild_manifest_exempt_depend -= 1
コード例 #45
0
def emerge_main():
	global portage	# NFC why this is necessary now - genone
	portage._disable_legacy_globals()
	# Disable color until we're sure that it should be enabled (after
	# EMERGE_DEFAULT_OPTS has been parsed).
	portage.output.havecolor = 0
	# This first pass is just for options that need to be known as early as
	# possible, such as --config-root.  They will be parsed again later,
	# together with EMERGE_DEFAULT_OPTS (which may vary depending on the
	# the value of --config-root).
	myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
	if "--debug" in myopts:
		os.environ["PORTAGE_DEBUG"] = "1"
	if "--config-root" in myopts:
		os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
	if "--root" in myopts:
		os.environ["ROOT"] = myopts["--root"]
	if "--accept-properties" in myopts:
		os.environ["ACCEPT_PROPERTIES"] = myopts["--accept-properties"]

	# Portage needs to ensure a sane umask for the files it creates.
	os.umask(0o22)
	settings, trees, mtimedb = load_emerge_config()
	portdb = trees[settings["ROOT"]]["porttree"].dbapi
	rval = profile_check(trees, myaction)
	if rval != os.EX_OK:
		return rval

	if myaction not in ('help', 'info', 'version') and \
		_global_updates(trees, mtimedb["updates"]):
		mtimedb.commit()
		# Reload the whole config from scratch.
		settings, trees, mtimedb = load_emerge_config(trees=trees)
		portdb = trees[settings["ROOT"]]["porttree"].dbapi

	xterm_titles = "notitles" not in settings.features
	if xterm_titles:
		xtermTitle("emerge")

	tmpcmdline = []
	if "--ignore-default-opts" not in myopts:
		tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
	tmpcmdline.extend(sys.argv[1:])
	myaction, myopts, myfiles = parse_opts(tmpcmdline)

	if "--digest" in myopts:
		os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
		# Reload the whole config from scratch so that the portdbapi internal
		# config is updated with new FEATURES.
		settings, trees, mtimedb = load_emerge_config(trees=trees)
		portdb = trees[settings["ROOT"]]["porttree"].dbapi

	adjust_configs(myopts, trees)
	apply_priorities(settings)

	if myaction == 'version':
		writemsg_stdout(getportageversion(
			settings["PORTDIR"], settings["ROOT"],
			settings.profile_path, settings["CHOST"],
			trees[settings["ROOT"]]["vartree"].dbapi) + '\n', noiselevel=-1)
		return 0
	elif myaction == 'help':
		_emerge.help.help(myopts, portage.output.havecolor)
		return 0

	spinner = stdout_spinner()
	if "candy" in settings.features:
		spinner.update = spinner.update_scroll

	if "--quiet" not in myopts:
		portage.deprecated_profile_check(settings=settings)
		repo_name_check(trees)
		repo_name_duplicate_check(trees)
		config_protect_check(trees)
	check_procfs()

	if "getbinpkg" in settings.features:
		myopts["--getbinpkg"] = True

	if "--getbinpkgonly" in myopts:
		myopts["--getbinpkg"] = True

	if "--getbinpkgonly" in myopts:
		myopts["--usepkgonly"] = True

	if "--getbinpkg" in myopts:
		myopts["--usepkg"] = True

	if "--usepkgonly" in myopts:
		myopts["--usepkg"] = True

	if "buildpkg" in settings.features or "--buildpkgonly" in myopts:
		myopts["--buildpkg"] = True

	if "--buildpkgonly" in myopts:
		# --buildpkgonly will not merge anything, so
		# it cancels all binary package options.
		for opt in ("--getbinpkg", "--getbinpkgonly",
			"--usepkg", "--usepkgonly"):
			myopts.pop(opt, None)

	for mytrees in trees.values():
		mydb = mytrees["porttree"].dbapi
		# Freeze the portdbapi for performance (memoize all xmatch results).
		mydb.freeze()

		if "--usepkg" in myopts:
			# Populate the bintree with current --getbinpkg setting.
			# This needs to happen before expand_set_arguments(), in case
			# any sets use the bintree.
			mytrees["bintree"].populate(
				getbinpkgs="--getbinpkg" in myopts)

	del mytrees, mydb

	if "moo" in myfiles:
		print("""

  Larry loves Gentoo (""" + platform.system() + """)

 _______________________
< Have you mooed today? >
 -----------------------
        \   ^__^
         \  (oo)\_______
            (__)\       )\/\ 
                ||----w |
                ||     ||

""")

	for x in myfiles:
		ext = os.path.splitext(x)[1]
		if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
			print(colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n"))
			break

	root_config = trees[settings["ROOT"]]["root_config"]
	if myaction == "list-sets":
		writemsg_stdout("".join("%s\n" % s for s in sorted(root_config.sets)))
		return os.EX_OK

	ensure_required_sets(trees)

	# only expand sets for actions taking package arguments
	oldargs = myfiles[:]
	if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
		myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
		if retval != os.EX_OK:
			return retval

		# Need to handle empty sets specially, otherwise emerge will react 
		# with the help message for empty argument lists
		if oldargs and not myfiles:
			print("emerge: no targets left after set expansion")
			return 0

	if ("--tree" in myopts) and ("--columns" in myopts):
		print("emerge: can't specify both of \"--tree\" and \"--columns\".")
		return 1

	if '--emptytree' in myopts and '--noreplace' in myopts:
		writemsg_level("emerge: can't specify both of " + \
			"\"--emptytree\" and \"--noreplace\".\n",
			level=logging.ERROR, noiselevel=-1)
		return 1

	if ("--quiet" in myopts):
		spinner.update = spinner.update_quiet
		portage.util.noiselimit = -1

	if "--fetch-all-uri" in myopts:
		myopts["--fetchonly"] = True

	if "--skipfirst" in myopts and "--resume" not in myopts:
		myopts["--resume"] = True

	# Allow -p to remove --ask
	if "--pretend" in myopts:
		myopts.pop("--ask", None)

	# forbid --ask when not in a terminal
	# note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
	if ("--ask" in myopts) and (not sys.stdin.isatty()):
		portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
			noiselevel=-1)
		return 1

	if settings.get("PORTAGE_DEBUG", "") == "1":
		spinner.update = spinner.update_quiet
		portage.debug=1
		if "python-trace" in settings.features:
			import portage.debug
			portage.debug.set_trace(True)

	if not ("--quiet" in myopts):
		if '--nospinner' in myopts or \
			settings.get('TERM') == 'dumb' or \
			not sys.stdout.isatty():
			spinner.update = spinner.update_basic

	if "--debug" in myopts:
		print("myaction", myaction)
		print("myopts", myopts)

	if not myaction and not myfiles and "--resume" not in myopts:
		_emerge.help.help(myopts, portage.output.havecolor)
		return 1

	pretend = "--pretend" in myopts
	fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
	buildpkgonly = "--buildpkgonly" in myopts

	# check if root user is the current user for the actions where emerge needs this
	if portage.secpass < 2:
		# We've already allowed "--version" and "--help" above.
		if "--pretend" not in myopts and myaction not in ("search","info"):
			need_superuser = myaction in ('clean', 'depclean', 'deselect',
				'prune', 'unmerge') or not \
				(fetchonly or \
				(buildpkgonly and secpass >= 1) or \
				myaction in ("metadata", "regen") or \
				(myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
			if portage.secpass < 1 or \
				need_superuser:
				if need_superuser:
					access_desc = "superuser"
				else:
					access_desc = "portage group"
				# Always show portage_group_warning() when only portage group
				# access is required but the user is not in the portage group.
				from portage.data import portage_group_warning
				if "--ask" in myopts:
					myopts["--pretend"] = True
					del myopts["--ask"]
					print(("%s access is required... " + \
						"adding --pretend to options\n") % access_desc)
					if portage.secpass < 1 and not need_superuser:
						portage_group_warning()
				else:
					sys.stderr.write(("emerge: %s access is required\n") \
						% access_desc)
					if portage.secpass < 1 and not need_superuser:
						portage_group_warning()
					return 1

	disable_emergelog = False
	for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
		if x in myopts:
			disable_emergelog = True
			break
	if myaction in ("search", "info"):
		disable_emergelog = True
	if disable_emergelog:
		""" Disable emergelog for everything except build or unmerge
		operations.  This helps minimize parallel emerge.log entries that can
		confuse log parsers.  We especially want it disabled during
		parallel-fetch, which uses --resume --fetchonly."""
		_emerge.emergelog._disable = True

	else:
		if 'EMERGE_LOG_DIR' in settings:
			try:
				# At least the parent needs to exist for the lock file.
				portage.util.ensure_dirs(settings['EMERGE_LOG_DIR'])
			except portage.exception.PortageException as e:
				writemsg_level("!!! Error creating directory for " + \
					"EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
					(settings['EMERGE_LOG_DIR'], e),
					noiselevel=-1, level=logging.ERROR)
			else:
				global _emerge_log_dir
				_emerge_log_dir = settings['EMERGE_LOG_DIR']

	if not "--pretend" in myopts:
		emergelog(xterm_titles, "Started emerge on: "+\
			_unicode_decode(
				time.strftime("%b %d, %Y %H:%M:%S", time.localtime()),
				encoding=_encodings['content'], errors='replace'))
		myelogstr=""
		if myopts:
			myelogstr=" ".join(myopts)
		if myaction:
			myelogstr+=" "+myaction
		if myfiles:
			myelogstr += " " + " ".join(oldargs)
		emergelog(xterm_titles, " *** emerge " + myelogstr)
	del oldargs

	def emergeexitsig(signum, frame):
		signal.signal(signal.SIGINT, signal.SIG_IGN)
		signal.signal(signal.SIGTERM, signal.SIG_IGN)
		portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
		sys.exit(100+signum)
	signal.signal(signal.SIGINT, emergeexitsig)
	signal.signal(signal.SIGTERM, emergeexitsig)

	def emergeexit():
		"""This gets out final log message in before we quit."""
		if "--pretend" not in myopts:
			emergelog(xterm_titles, " *** terminating.")
		if xterm_titles:
			xtermTitleReset()
	portage.atexit_register(emergeexit)

	if myaction in ("config", "metadata", "regen", "sync"):
		if "--pretend" in myopts:
			sys.stderr.write(("emerge: The '%s' action does " + \
				"not support '--pretend'.\n") % myaction)
			return 1

	if "sync" == myaction:
		return action_sync(settings, trees, mtimedb, myopts, myaction)
	elif "metadata" == myaction:
		action_metadata(settings, portdb, myopts)
	elif myaction=="regen":
		validate_ebuild_environment(trees)
		return action_regen(settings, portdb, myopts.get("--jobs"),
			myopts.get("--load-average"))
	# HELP action
	elif "config"==myaction:
		validate_ebuild_environment(trees)
		action_config(settings, trees, myopts, myfiles)

	# SEARCH action
	elif "search"==myaction:
		validate_ebuild_environment(trees)
		action_search(trees[settings["ROOT"]]["root_config"],
			myopts, myfiles, spinner)

	elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
		validate_ebuild_environment(trees)
		rval = action_uninstall(settings, trees, mtimedb["ldpath"],
			myopts, myaction, myfiles, spinner)
		if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
			post_emerge(root_config, myopts, mtimedb, rval)
		return rval

	elif myaction == 'info':

		# Ensure atoms are valid before calling unmerge().
		vardb = trees[settings["ROOT"]]["vartree"].dbapi
		portdb = trees[settings["ROOT"]]["porttree"].dbapi
		bindb = trees[settings["ROOT"]]["bintree"].dbapi
		valid_atoms = []
		for x in myfiles:
			if is_valid_package_atom(x):
				try:
					#look at the installed files first, if there is no match
					#look at the ebuilds, since EAPI 4 allows running pkg_info
					#on non-installed packages
					valid_atom = dep_expand(x, mydb=vardb, settings=settings)
					if valid_atom.cp.split("/")[0] == "null":
						valid_atom = dep_expand(x, mydb=portdb, settings=settings)
					if valid_atom.cp.split("/")[0] == "null" and "--usepkg" in myopts:
						valid_atom = dep_expand(x, mydb=bindb, settings=settings)
					valid_atoms.append(valid_atom)
				except portage.exception.AmbiguousPackageName as e:
					msg = "The short ebuild name \"" + x + \
						"\" is ambiguous.  Please specify " + \
						"one of the following " + \
						"fully-qualified ebuild names instead:"
					for line in textwrap.wrap(msg, 70):
						writemsg_level("!!! %s\n" % (line,),
							level=logging.ERROR, noiselevel=-1)
					for i in e[0]:
						writemsg_level("    %s\n" % colorize("INFORM", i),
							level=logging.ERROR, noiselevel=-1)
					writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
					return 1
				continue
			msg = []
			msg.append("'%s' is not a valid package atom." % (x,))
			msg.append("Please check ebuild(5) for full details.")
			writemsg_level("".join("!!! %s\n" % line for line in msg),
				level=logging.ERROR, noiselevel=-1)
			return 1

		return action_info(settings, trees, myopts, valid_atoms)

	# "update", "system", or just process files:
	else:
		validate_ebuild_environment(trees)

		for x in myfiles:
			if x.startswith(SETPREFIX) or \
				is_valid_package_atom(x):
				continue
			if x[:1] == os.sep:
				continue
			try:
				os.lstat(x)
				continue
			except OSError:
				pass
			msg = []
			msg.append("'%s' is not a valid package atom." % (x,))
			msg.append("Please check ebuild(5) for full details.")
			writemsg_level("".join("!!! %s\n" % line for line in msg),
				level=logging.ERROR, noiselevel=-1)
			return 1

		if "--pretend" not in myopts:
			display_news_notification(root_config, myopts)
		retval = action_build(settings, trees, mtimedb,
			myopts, myaction, myfiles, spinner)
		root_config = trees[settings["ROOT"]]["root_config"]
		post_emerge(root_config, myopts, mtimedb, retval)

		return retval
コード例 #46
0
ファイル: output.py プロジェクト: pombredanne/portage-3
	def __call__(self, depgraph, mylist, favorites=None, verbosity=None):
		"""The main operation to format and display the resolver output.

		@param depgraph: dependency grah
		@param mylist: list of packages being processed
		@param favorites: list, defaults to []
		@param verbosity: verbose level, defaults to None
		Modifies self.conf, self.myfetchlist, self.portdb, self.vardb,
			self.pkgsettings, self.verboseadd, self.oldlp, self.newlp,
			self.print_msg,
		"""
		if favorites is None:
			favorites = []
		self.conf = _DisplayConfig(depgraph, mylist, favorites, verbosity)
		mylist = self.get_display_list(self.conf.mylist)
		# files to fetch list - avoids counting a same file twice
		# in size display (verbose mode)
		self.myfetchlist = set()

		self.quiet_repo_display = "--quiet-repo-display" in depgraph._frozen_config.myopts
		if self.quiet_repo_display:
			# Use this set to detect when all the "repoadd" strings are "[0]"
			# and disable the entire repo display in this case.
			repoadd_set = set()

		self.verbose_main_repo_display = "--verbose-main-repo-display" in depgraph._frozen_config.myopts
		self.restrict_fetch_list = {}

		for mylist_index in range(len(mylist)):
			pkg, depth, ordered = mylist[mylist_index]
			self.portdb = self.conf.trees[pkg.root]["porttree"].dbapi
			self.vardb = self.conf.trees[pkg.root]["vartree"].dbapi
			self.pkgsettings = self.conf.pkgsettings[pkg.root]
			self.indent = " " * depth

			if isinstance(pkg, Blocker):
				self._blockers(pkg)
			else:
				pkg_info = self.set_pkg_info(pkg, ordered)
				pkg_info.oldbest_list, myinslotlist = \
					self._get_installed_best(pkg, pkg_info)
				if ordered and pkg_info.merge and \
					not pkg_info.attr_display.new:
					for arg, atom in depgraph._iter_atoms_for_pkg(pkg):
						if arg.force_reinstall:
							pkg_info.attr_display.force_reinstall = True
							break

				self.verboseadd = ""
				if self.quiet_repo_display:
					self.repoadd = None
				self._display_use(pkg, pkg_info)
				if self.conf.verbosity == 3:
					if self.quiet_repo_display:
						self.verbose_size(pkg, repoadd_set, pkg_info)
					else:
						self.verbose_size(pkg, None, pkg_info)

				self.oldlp = self.conf.columnwidth - 30
				self.newlp = self.oldlp - 30
				pkg_info.oldbest = self.convert_myoldbest(pkg, pkg_info)
				pkg_info.system, pkg_info.world = \
					self.check_system_world(pkg)
				if 'interactive' in pkg.properties and \
					pkg.operation == 'merge':
					pkg_info.attr_display.interactive = True
					if ordered:
						self.counters.interactive += 1

				if self.include_mask_str():
					pkg_info.attr_display.mask = self.gen_mask_str(pkg)

				if pkg.root_config.settings["ROOT"] != "/":
					if pkg_info.oldbest:
						pkg_info.oldbest += " "
					if self.conf.columns:
						myprint = self._set_non_root_columns(pkg, pkg_info)
					else:
						pkg_str = pkg.cpv
						if self.conf.verbosity == 3:
							pkg_str = self._append_slot(pkg_str, pkg, pkg_info)
							pkg_str = self._append_repository(pkg_str, pkg, pkg_info)
						if not pkg_info.merge:
							addl = self.empty_space_in_brackets()
							myprint = "[%s%s] " % (
								self.pkgprint(pkg_info.operation.ljust(13),
								pkg_info), addl,
								)
						else:
							myprint = "[%s %s] " % (
								self.pkgprint(pkg.type_name, pkg_info),
								pkg_info.attr_display)
						myprint += self.indent + \
							self.pkgprint(pkg_str, pkg_info) + " " + \
							pkg_info.oldbest + darkgreen("to " + pkg.root)
				else:
					if self.conf.columns:
						myprint = self._set_root_columns(pkg, pkg_info)
					else:
						myprint = self._set_no_columns(pkg, pkg_info)

				if self.conf.columns and pkg.operation == "uninstall":
					continue
				if self.quiet_repo_display:
					self.print_msg.append((myprint, self.verboseadd, self.repoadd))
				else:
					self.print_msg.append((myprint, self.verboseadd, None))

		show_repos = self.quiet_repo_display and repoadd_set and repoadd_set != set(["0"])

		# now finally print out the messages
		self.print_messages(show_repos)
		self.print_blockers()
		if self.conf.verbosity == 3:
			self.print_verbose(show_repos)
		for pkg, pkg_info in self.restrict_fetch_list.items():
			writemsg_stdout("\nFetch instructions for %s:\n" % (pkg.cpv,),
							noiselevel=-1)
			spawn_nofetch(self.conf.trees[pkg.root]["porttree"].dbapi,
				pkg_info.ebuild_path)
		if self.conf.changelog:
			self.print_changelog()

		return os.EX_OK
コード例 #47
0
    def output(self):
        """Outputs the results of the search."""
        msg = []
        msg.append("\b\b  \n[ Results for search key : " + \
         bold(self.searchkey) + " ]\n")
        msg.append("[ Applications found : " + \
         bold(str(self.mlen)) + " ]\n\n")
        vardb = self.vartree.dbapi
        metadata_keys = set(Package.metadata_keys)
        metadata_keys.update(["DESCRIPTION", "HOMEPAGE", "LICENSE", "SRC_URI"])
        metadata_keys = tuple(metadata_keys)
        for mtype in self.matches:
            for match, masked in self.matches[mtype]:
                full_package = None
                if mtype == "pkg":
                    full_package = self._xmatch("bestmatch-visible", match)
                    if not full_package:
                        #no match found; we don't want to query description
                        masked = 1
                        full_package = portage.best(
                            self._xmatch("match-all", match))
                elif mtype == "desc":
                    full_package = match
                    match = portage.cpv_getkey(match)
                elif mtype == "set":
                    msg.append(green("*") + "  " + bold(match) + "\n")
                    if self.verbose:
                        msg.append("      " + darkgreen("Description:") + \
                         "   " + \
                         self.sdict[match].getMetadata("DESCRIPTION") \
                         + "\n\n")
                if full_package:
                    try:
                        metadata = dict(
                            zip(metadata_keys,
                                self._aux_get(full_package, metadata_keys)))
                    except KeyError:
                        msg.append(
                            "emerge: search: aux_get() failed, skipping\n")
                        continue

                    desc = metadata["DESCRIPTION"]
                    homepage = metadata["HOMEPAGE"]
                    license = metadata["LICENSE"]

                    if masked:
                        msg.append(green("*") + "  " + \
                         white(match) + " " + red("[ Masked ]") + "\n")
                    else:
                        msg.append(green("*") + "  " + bold(match) + "\n")
                    myversion = self.getVersion(full_package,
                                                search.VERSION_RELEASE)

                    mysum = [0, 0]
                    file_size_str = None
                    mycat = match.split("/")[0]
                    mypkg = match.split("/")[1]
                    mycpv = match + "-" + myversion
                    myebuild = self._findname(mycpv)
                    if myebuild:
                        pkg = Package(built=False,
                                      cpv=mycpv,
                                      installed=False,
                                      metadata=metadata,
                                      root_config=self.root_config,
                                      type_name="ebuild")
                        pkgdir = os.path.dirname(myebuild)
                        mf = self.settings.repositories.get_repo_for_location(
                            os.path.dirname(os.path.dirname(pkgdir)))
                        mf = mf.load_manifest(pkgdir, self.settings["DISTDIR"])
                        try:
                            uri_map = _parse_uri_map(mycpv,
                                                     metadata,
                                                     use=pkg.use.enabled)
                        except portage.exception.InvalidDependString as e:
                            file_size_str = "Unknown (%s)" % (e, )
                            del e
                        else:
                            try:
                                mysum[0] = mf.getDistfilesSize(uri_map)
                            except KeyError as e:
                                file_size_str = "Unknown (missing " + \
                                 "digest for %s)" % (e,)
                                del e

                    available = False
                    for db in self._dbs:
                        if db is not vardb and \
                         db.cpv_exists(mycpv):
                            available = True
                            if not myebuild and hasattr(db, "bintree"):
                                myebuild = db.bintree.getname(mycpv)
                                try:
                                    mysum[0] = os.stat(myebuild).st_size
                                except OSError:
                                    myebuild = None
                            break

                    if myebuild and file_size_str is None:
                        file_size_str = localized_size(mysum[0])

                    if self.verbose:
                        if available:
                            msg.append("      %s %s\n" % \
                             (darkgreen("Latest version available:"),
                             myversion))
                        msg.append("      %s\n" % \
                         self.getInstallationStatus(mycat+'/'+mypkg))
                        if myebuild:
                            msg.append("      %s %s\n" % \
                             (darkgreen("Size of files:"), file_size_str))
                        msg.append("      " + darkgreen("Homepage:") + \
                         "      " + homepage + "\n")
                        msg.append("      " + darkgreen("Description:") \
                         + "   " + desc + "\n")
                        msg.append("      " + darkgreen("License:") + \
                         "       " + license + "\n\n")
        writemsg_stdout(''.join(msg), noiselevel=-1)
コード例 #48
0
ファイル: copyrights.py プロジェクト: zorry/zobsc
def update_copyright(fn_path, year, pretend=False):
	"""
	Check file for a Copyright statement, and update its year.  The
	patterns used for replacing copyrights are taken from echangelog.
	Only the first lines of each file that start with a hash ('#') are
	considered, until a line is found that doesn't start with a hash.
	Files are read and written in binary mode, so that this function
	will work correctly with files encoded in any character set, as
	long as the copyright statements consist of plain ASCII.
	"""

	try:
		fn_hdl = io.open(_unicode_encode(
			fn_path, encoding=_encodings['fs'], errors='strict'),
			mode='rb')
	except EnvironmentError:
		return

	orig_header = []
	new_header = []

	for line in fn_hdl:
		line_strip = line.strip()
		orig_header.append(line)
		if not line_strip or line_strip[:1] != b'#':
			new_header.append(line)
			break

		line = update_copyright_year(year, line)
		new_header.append(line)

	difflines = 0
	for diffline in difflib.unified_diff(
		[_unicode_decode(diffline) for diffline in orig_header],
		[_unicode_decode(diffline) for diffline in new_header],
		fromfile=fn_path, tofile=fn_path, n=0):
		util.writemsg_stdout(diffline, noiselevel=-1)
		difflines += 1
	util.writemsg_stdout("\n", noiselevel=-1)

	# unified diff has three lines to start with
	if difflines > 3 and not pretend:
		# write new file with changed header
		f, fnnew_path = mkstemp()
		f = io.open(f, mode='wb')
		for line in new_header:
			f.write(line)
		for line in fn_hdl:
			f.write(line)
		f.close()
		try:
			fn_stat = os.stat(fn_path)
		except OSError:
			fn_stat = None

		shutil.move(fnnew_path, fn_path)

		if fn_stat is None:
			util.apply_permissions(fn_path, mode=0o644)
		else:
			util.apply_stat_permissions(fn_path, fn_stat)
	fn_hdl.close()
コード例 #49
0
ファイル: rsync.py プロジェクト: simonvanderveldt/portage
    def update(self):
        '''Internal update function which performs the transfer'''
        opts = self.options.get('emerge_config').opts
        self.usersync_uid = self.options.get('usersync_uid', None)
        enter_invalid = '--ask-enter-invalid' in opts
        out = portage.output.EOutput()
        syncuri = self.repo.sync_uri
        if self.repo.module_specific_options.get('sync-rsync-vcs-ignore',
                                                 'false').lower() == 'true':
            vcs_dirs = ()
        else:
            vcs_dirs = frozenset(VCS_DIRS)
            vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))

        for vcs_dir in vcs_dirs:
            writemsg_level(("!!! %s appears to be under revision " + \
             "control (contains %s).\n!!! Aborting rsync sync "
             "(override with \"sync-rsync-vcs-ignore = true\" in repos.conf).\n") % \
             (self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
            return (1, False)
        self.timeout = 180

        rsync_opts = []
        if self.settings["PORTAGE_RSYNC_OPTS"] == "":
            rsync_opts = self._set_rsync_defaults()
        else:
            rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
        self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)

        self.extra_rsync_opts = list()
        if self.repo.module_specific_options.get('sync-rsync-extra-opts'):
            self.extra_rsync_opts.extend(
                portage.util.shlex_split(
                    self.repo.module_specific_options['sync-rsync-extra-opts'])
            )

        # Real local timestamp file.
        self.servertimestampfile = os.path.join(self.repo.location, "metadata",
                                                "timestamp.chk")

        content = portage.util.grabfile(self.servertimestampfile)
        timestamp = 0
        if content:
            try:
                timestamp = time.mktime(
                    time.strptime(content[0], TIMESTAMP_FORMAT))
            except (OverflowError, ValueError):
                pass
        del content

        try:
            self.rsync_initial_timeout = \
             int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
        except ValueError:
            self.rsync_initial_timeout = 15

        try:
            maxretries = int(self.settings["PORTAGE_RSYNC_RETRIES"])
        except SystemExit as e:
            raise  # Needed else can't exit
        except:
            maxretries = -1  #default number of retries

        if syncuri.startswith("file://"):
            self.proto = "file"
            dosyncuri = syncuri[7:]
            is_synced, exitcode, updatecache_flg = self._do_rsync(
                dosyncuri, timestamp, opts)
            self._process_exitcode(exitcode, dosyncuri, out, 1)
            return (exitcode, updatecache_flg)

        retries = 0
        try:
            self.proto, user_name, hostname, port = re.split(
                r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
                syncuri,
                maxsplit=4)[1:5]
        except ValueError:
            writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
                           noiselevel=-1,
                           level=logging.ERROR)
            return (1, False)

        self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")

        if port is None:
            port = ""
        if user_name is None:
            user_name = ""
        if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
            getaddrinfo_host = hostname
        else:
            # getaddrinfo needs the brackets stripped
            getaddrinfo_host = hostname[1:-1]
        updatecache_flg = False
        all_rsync_opts = set(self.rsync_opts)
        all_rsync_opts.update(self.extra_rsync_opts)

        family = socket.AF_UNSPEC
        if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
            family = socket.AF_INET
        elif socket.has_ipv6 and \
         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
            family = socket.AF_INET6

        addrinfos = None
        uris = []

        try:
            addrinfos = getaddrinfo_validate(
                socket.getaddrinfo(getaddrinfo_host, None, family,
                                   socket.SOCK_STREAM))
        except socket.error as e:
            writemsg_level("!!! getaddrinfo failed for '%s': %s\n" %
                           (_unicode_decode(hostname), _unicode(e)),
                           noiselevel=-1,
                           level=logging.ERROR)

        if addrinfos:

            AF_INET = socket.AF_INET
            AF_INET6 = None
            if socket.has_ipv6:
                AF_INET6 = socket.AF_INET6

            ips_v4 = []
            ips_v6 = []

            for addrinfo in addrinfos:
                if addrinfo[0] == AF_INET:
                    ips_v4.append("%s" % addrinfo[4][0])
                elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
                    # IPv6 addresses need to be enclosed in square brackets
                    ips_v6.append("[%s]" % addrinfo[4][0])

            random.shuffle(ips_v4)
            random.shuffle(ips_v6)

            # Give priority to the address family that
            # getaddrinfo() returned first.
            if AF_INET6 is not None and addrinfos and \
             addrinfos[0][0] == AF_INET6:
                ips = ips_v6 + ips_v4
            else:
                ips = ips_v4 + ips_v6

            for ip in ips:
                uris.append(
                    syncuri.replace("//" + user_name + hostname + port + "/",
                                    "//" + user_name + ip + port + "/", 1))

        if not uris:
            # With some configurations we need to use the plain hostname
            # rather than try to resolve the ip addresses (bug #340817).
            uris.append(syncuri)

        # reverse, for use with pop()
        uris.reverse()
        uris_orig = uris[:]

        effective_maxretries = maxretries
        if effective_maxretries < 0:
            effective_maxretries = len(uris) - 1

        while (1):
            if uris:
                dosyncuri = uris.pop()
            elif maxretries < 0 or retries > maxretries:
                writemsg("!!! Exhausted addresses for %s\n" %
                         _unicode_decode(hostname),
                         noiselevel=-1)
                return (1, False)
            else:
                uris.extend(uris_orig)
                dosyncuri = uris.pop()

            if (retries == 0):
                if "--ask" in opts:
                    uq = UserQuery(opts)
                    if uq.query("Do you want to sync your Portage tree " + \
                     "with the mirror at\n" + blue(dosyncuri) + bold("?"),
                     enter_invalid) == "No":
                        print()
                        print("Quitting.")
                        print()
                        sys.exit(128 + signal.SIGINT)
                self.logger(self.xterm_titles,
                            ">>> Starting rsync with " + dosyncuri)
                if "--quiet" not in opts:
                    print(">>> Starting rsync with " + dosyncuri + "...")
            else:
                self.logger(self.xterm_titles,
                 ">>> Starting retry %d of %d with %s" % \
                  (retries, effective_maxretries, dosyncuri))
                writemsg_stdout(
                 "\n\n>>> Starting retry %d of %d with %s\n" % \
                 (retries, effective_maxretries, dosyncuri), noiselevel=-1)

            if dosyncuri.startswith('ssh://'):
                dosyncuri = dosyncuri[6:].replace('/', ':/', 1)

            is_synced, exitcode, updatecache_flg = self._do_rsync(
                dosyncuri, timestamp, opts)
            if is_synced:
                break

            retries = retries + 1

            if maxretries < 0 or retries <= maxretries:
                print(">>> Retrying...")
            else:
                # over retries
                # exit loop
                exitcode = EXCEEDED_MAX_RETRIES
                break
        self._process_exitcode(exitcode, dosyncuri, out, maxretries)
        return (exitcode, updatecache_flg)
コード例 #50
0
def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
    root = trees._running_eroot
    mysettings = trees[root]["vartree"].settings
    portdb = trees[root]["porttree"].dbapi
    vardb = trees[root]["vartree"].dbapi
    bindb = trees[root]["bintree"].dbapi

    world_file = os.path.join(mysettings['EROOT'], WORLD_FILE)
    world_list = grabfile(world_file)
    world_modified = False
    world_warnings = set()
    updpath_map = {}
    # Maps repo_name to list of updates. If a given repo has no updates
    # directory, it will be omitted. If a repo has an updates directory
    # but none need to be applied (according to timestamp logic), the
    # value in the dict will be an empty list.
    repo_map = {}
    timestamps = {}

    retupd = False
    update_notice_printed = False
    for repo_name in portdb.getRepositories():
        repo = portdb.getRepositoryPath(repo_name)
        updpath = os.path.join(repo, "profiles", "updates")
        if not os.path.isdir(updpath):
            continue

        if updpath in updpath_map:
            repo_map[repo_name] = updpath_map[updpath]
            continue

        try:
            if if_mtime_changed:
                update_data = grab_updates(updpath, prev_mtimes=prev_mtimes)
            else:
                update_data = grab_updates(updpath)
        except DirectoryNotFound:
            continue
        myupd = []
        updpath_map[updpath] = myupd
        repo_map[repo_name] = myupd
        if len(update_data) > 0:
            for mykey, mystat, mycontent in update_data:
                if not update_notice_printed:
                    update_notice_printed = True
                    writemsg_stdout("\n")
                    writemsg_stdout(
                        colorize("GOOD", _("Performing Global Updates\n")))
                    writemsg_stdout(
                        _("(Could take a couple of minutes if you have a lot of binary packages.)\n"
                          ))
                    if not quiet:
                        writemsg_stdout(_("  %s='update pass'  %s='binary update'  "
                         "%s='/var/db update'  %s='/var/db move'\n"
                         "  %s='/var/db SLOT move'  %s='binary move'  "
                         "%s='binary SLOT move'\n  %s='update /etc/portage/package.*'\n") % \
                         (bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
                valid_updates, errors = parse_updates(mycontent)
                myupd.extend(valid_updates)
                if not quiet:
                    writemsg_stdout(bold(mykey))
                    writemsg_stdout(len(valid_updates) * "." + "\n")
                if len(errors) == 0:
                    # Update our internal mtime since we
                    # processed all of our directives.
                    timestamps[mykey] = mystat[stat.ST_MTIME]
                else:
                    for msg in errors:
                        writemsg("%s\n" % msg, noiselevel=-1)
            if myupd:
                retupd = True

    if retupd:
        if os.access(bindb.bintree.pkgdir, os.W_OK):
            # Call binarytree.populate(), since we want to make sure it's
            # only populated with local packages here (getbinpkgs=0).
            bindb.bintree.populate()
        else:
            bindb = None

    master_repo = portdb.repositories.mainRepo()
    if master_repo is not None:
        master_repo = master_repo.name
    if master_repo in repo_map:
        repo_map['DEFAULT'] = repo_map[master_repo]

    for repo_name, myupd in repo_map.items():
        if repo_name == 'DEFAULT':
            continue
        if not myupd:
            continue

        def repo_match(repository):
            return repository == repo_name or \
             (repo_name == master_repo and repository not in repo_map)

        def _world_repo_match(atoma, atomb):
            """
			Check whether to perform a world change from atoma to atomb.
			If best vardb match for atoma comes from the same repository
			as the update file, allow that. Additionally, if portdb still
			can find a match for old atom name, warn about that.
			"""
            matches = vardb.match(atoma)
            if not matches:
                matches = vardb.match(atomb)
            if matches and \
             repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
                if portdb.match(atoma):
                    world_warnings.add((atoma, atomb))
                return True
            else:
                return False

        for update_cmd in myupd:
            for pos, atom in enumerate(world_list):
                new_atom = update_dbentry(update_cmd, atom)
                if atom != new_atom:
                    if _world_repo_match(atom, new_atom):
                        world_list[pos] = new_atom
                        world_modified = True

        for update_cmd in myupd:
            if update_cmd[0] == "move":
                moves = vardb.move_ent(update_cmd, repo_match=repo_match)
                if moves:
                    writemsg_stdout(moves * "@")
                if bindb:
                    moves = bindb.move_ent(update_cmd, repo_match=repo_match)
                    if moves:
                        writemsg_stdout(moves * "%")
            elif update_cmd[0] == "slotmove":
                moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
                if moves:
                    writemsg_stdout(moves * "s")
                if bindb:
                    moves = bindb.move_slot_ent(update_cmd,
                                                repo_match=repo_match)
                    if moves:
                        writemsg_stdout(moves * "S")

    if world_modified:
        world_list.sort()
        write_atomic(world_file, "".join("%s\n" % (x, ) for x in world_list))
        if world_warnings:
            # XXX: print warning that we've updated world entries
            # and the old name still matches something (from an overlay)?
            pass

    if retupd:

        def _config_repo_match(repo_name, atoma, atomb):
            """
			Check whether to perform a world change from atoma to atomb.
			If best vardb match for atoma comes from the same repository
			as the update file, allow that. Additionally, if portdb still
			can find a match for old atom name, warn about that.
			"""
            matches = vardb.match(atoma)
            if not matches:
                matches = vardb.match(atomb)
                if not matches:
                    return False
            repository = vardb.aux_get(best(matches), ['repository'])[0]
            return repository == repo_name or \
             (repo_name == master_repo and repository not in repo_map)

        update_config_files(root,
                            shlex_split(mysettings.get("CONFIG_PROTECT", "")),
                            shlex_split(
                                mysettings.get("CONFIG_PROTECT_MASK", "")),
                            repo_map,
                            match_callback=_config_repo_match,
                            case_insensitive="case-insensitive-fs"
                            in mysettings.features)

        # The above global updates proceed quickly, so they
        # are considered a single mtimedb transaction.
        if timestamps:
            # We do not update the mtime in the mtimedb
            # until after _all_ of the above updates have
            # been processed because the mtimedb will
            # automatically commit when killed by ctrl C.
            for mykey, mtime in timestamps.items():
                prev_mtimes[mykey] = mtime

        do_upgrade_packagesmessage = False
        # We gotta do the brute force updates for these now.
        if True:

            def onUpdate(_maxval, curval):
                if curval > 0:
                    writemsg_stdout("#")

            if quiet:
                onUpdate = None
            vardb.update_ents(repo_map, onUpdate=onUpdate)
            if bindb:

                def onUpdate(_maxval, curval):
                    if curval > 0:
                        writemsg_stdout("*")

                if quiet:
                    onUpdate = None
                bindb.update_ents(repo_map, onUpdate=onUpdate)
        else:
            do_upgrade_packagesmessage = 1

        # Update progress above is indicated by characters written to stdout so
        # we print a couple new lines here to separate the progress output from
        # what follows.
        writemsg_stdout("\n\n")

        if do_upgrade_packagesmessage and bindb and \
         bindb.cpv_all():
            writemsg_stdout(
                _(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"
                  ))
            writemsg_stdout(bold(_("Note: This can take a very long time.")))
            writemsg_stdout("\n")

    return retupd
コード例 #51
0
ファイル: _deprecated.py プロジェクト: wosigh/widk_portage
def pkgmerge(mytbz2,
             myroot,
             mysettings,
             mydbapi=None,
             vartree=None,
             prev_mtimes=None,
             blockers=None):
    """will merge a .tbz2 file, returning a list of runtime dependencies
		that must be satisfied, or None if there was a merge error.	This
		code assumes the package exists."""

    warnings.warn("portage.pkgmerge() is deprecated",
                  DeprecationWarning,
                  stacklevel=2)

    if mydbapi is None:
        mydbapi = portage.db[myroot]["bintree"].dbapi
    if vartree is None:
        vartree = portage.db[myroot]["vartree"]
    if mytbz2[-5:] != ".tbz2":
        print(_("!!! Not a .tbz2 file"))
        return 1

    tbz2_lock = None
    mycat = None
    mypkg = None
    did_merge_phase = False
    success = False
    try:
        """ Don't lock the tbz2 file because the filesytem could be readonly or
		shared by a cluster."""
        #tbz2_lock = portage.locks.lockfile(mytbz2, wantnewlockfile=1)

        mypkg = os.path.basename(mytbz2)[:-5]
        xptbz2 = portage.xpak.tbz2(mytbz2)
        mycat = xptbz2.getfile(
            _unicode_encode("CATEGORY", encoding=_encodings['repo.content']))
        if not mycat:
            writemsg(
                _("!!! CATEGORY info missing from info chunk, aborting...\n"),
                noiselevel=-1)
            return 1
        mycat = _unicode_decode(mycat,
                                encoding=_encodings['repo.content'],
                                errors='replace')
        mycat = mycat.strip()

        # These are the same directories that would be used at build time.
        builddir = os.path.join(mysettings["PORTAGE_TMPDIR"], "portage", mycat,
                                mypkg)
        catdir = os.path.dirname(builddir)
        pkgloc = os.path.join(builddir, "image")
        infloc = os.path.join(builddir, "build-info")
        myebuild = os.path.join(infloc,
                                os.path.basename(mytbz2)[:-4] + "ebuild")
        portage.util.ensure_dirs(os.path.dirname(catdir),
                                 uid=portage_uid,
                                 gid=portage_gid,
                                 mode=0o70,
                                 mask=0)
        portage.util.ensure_dirs(catdir,
                                 uid=portage_uid,
                                 gid=portage_gid,
                                 mode=0o70,
                                 mask=0)
        try:
            shutil.rmtree(builddir)
        except (IOError, OSError) as e:
            if e.errno != errno.ENOENT:
                raise
            del e
        for mydir in (builddir, pkgloc, infloc):
            portage.util.ensure_dirs(mydir,
                                     uid=portage_uid,
                                     gid=portage_gid,
                                     mode=0o755)
        writemsg_stdout(_(">>> Extracting info\n"))
        xptbz2.unpackinfo(infloc)
        mysettings.setcpv(mycat + "/" + mypkg, mydb=mydbapi)
        # Store the md5sum in the vdb.
        fp = open(_unicode_encode(os.path.join(infloc, 'BINPKGMD5')), 'w')
        fp.write(str(portage.checksum.perform_md5(mytbz2)) + "\n")
        fp.close()

        # This gives bashrc users an opportunity to do various things
        # such as remove binary packages after they're installed.
        mysettings["PORTAGE_BINPKG_FILE"] = mytbz2
        mysettings.backup_changes("PORTAGE_BINPKG_FILE")
        debug = mysettings.get("PORTAGE_DEBUG", "") == "1"

        # Eventually we'd like to pass in the saved ebuild env here.
        retval = portage.doebuild(myebuild,
                                  "setup",
                                  myroot,
                                  mysettings,
                                  debug=debug,
                                  tree="bintree",
                                  mydbapi=mydbapi,
                                  vartree=vartree)
        if retval != os.EX_OK:
            writemsg(_("!!! Setup failed: %s\n") % retval, noiselevel=-1)
            return retval

        writemsg_stdout(_(">>> Extracting %s\n") % mypkg)
        retval = portage.process.spawn_bash(
            "bzip2 -dqc -- '%s' | tar -xp -C '%s' -f -" % (mytbz2, pkgloc),
            env=mysettings.environ())
        if retval != os.EX_OK:
            writemsg(_("!!! Error Extracting '%s'\n") % mytbz2, noiselevel=-1)
            return retval
        #portage.locks.unlockfile(tbz2_lock)
        #tbz2_lock = None

        mylink = portage.dblink(mycat,
                                mypkg,
                                myroot,
                                mysettings,
                                vartree=vartree,
                                treetype="bintree",
                                blockers=blockers)
        retval = mylink.merge(pkgloc,
                              infloc,
                              myroot,
                              myebuild,
                              cleanup=0,
                              mydbapi=mydbapi,
                              prev_mtimes=prev_mtimes)
        did_merge_phase = True
        success = retval == os.EX_OK
        return retval
    finally:
        mysettings.pop("PORTAGE_BINPKG_FILE", None)
        if tbz2_lock:
            portage.locks.unlockfile(tbz2_lock)
        if True:
            if not did_merge_phase:
                # The merge phase handles this already.  Callers don't know how
                # far this function got, so we have to call elog_process() here
                # so that it's only called once.
                from portage.elog import elog_process
                elog_process(mycat + "/" + mypkg, mysettings)
            try:
                if success:
                    shutil.rmtree(builddir)
            except (IOError, OSError) as e:
                if e.errno != errno.ENOENT:
                    raise
                del e