예제 #1
0
	def _display_success(self):
		stdout_orig = sys.stdout
		stderr_orig = sys.stderr
		global_havecolor = portage.output.havecolor
		out = io.StringIO()
		try:
			sys.stdout = out
			sys.stderr = out
			if portage.output.havecolor:
				portage.output.havecolor = not self.background

			path = self._pkg_path
			if path.endswith(".partial"):
				path = path[:-len(".partial")]
			eout = EOutput()
			eout.ebegin("%s %s ;-)" % (os.path.basename(path),
				" ".join(sorted(self._digests))))
			eout.eend(0)

		finally:
			sys.stdout = stdout_orig
			sys.stderr = stderr_orig
			portage.output.havecolor = global_havecolor

		self.scheduler.output(out.getvalue(), log_path=self.logfile,
			background=self.background)
예제 #2
0
def _finalize():
	global _items
	printer = EOutput()
	for root, key, logentries in _items:
		print()
		if root == "/":
			printer.einfo(_("Messages for package %s:") %
				colorize("INFORM", key))
		else:
			printer.einfo(_("Messages for package %(pkg)s merged to %(root)s:") %
				{"pkg": colorize("INFORM", key), "root": root})
		print()
		for phase in EBUILD_PHASES:
			if phase not in logentries:
				continue
			for msgtype, msgcontent in logentries[phase]:
				fmap = {"INFO": printer.einfo,
						"WARN": printer.ewarn,
						"ERROR": printer.eerror,
						"LOG": printer.einfo,
						"QA": printer.ewarn}
				if isinstance(msgcontent, basestring):
					msgcontent = [msgcontent]
				for line in msgcontent:
					fmap[msgtype](line.strip("\n"))
	_items = []
	return
예제 #3
0
def finalize(mysettings=None):
	"""The mysettings parameter is just for backward compatibility since
	an older version of portage will import the module from a newer version
	when it upgrades itself."""
	global _items
	printer = EOutput()
	for root, key, logentries in _items:
		print()
		if root == "/":
			printer.einfo(_("Messages for package %s:") %
				colorize("INFORM", key))
		else:
			printer.einfo(_("Messages for package %(pkg)s merged to %(root)s:") %
				{"pkg": colorize("INFORM", key), "root": root})
		print()
		for phase in EBUILD_PHASES:
			if phase not in logentries:
				continue
			for msgtype, msgcontent in logentries[phase]:
				fmap = {"INFO": printer.einfo,
						"WARN": printer.ewarn,
						"ERROR": printer.eerror,
						"LOG": printer.einfo,
						"QA": printer.ewarn}
				if isinstance(msgcontent, basestring):
					msgcontent = [msgcontent]
				for line in msgcontent:
					fmap[msgtype](line.strip("\n"))
	_items = []
	return
예제 #4
0
 def __init__(self,
              phase,
              settings,
              myopts=None,
              myaction=None,
              mytargets=None):
     self.myopts = myopts
     self.myaction = myaction
     self.mytargets = mytargets
     check_config_instance(settings)
     self.settings = settings
     self.path = os.path.join(settings["PORTAGE_CONFIGROOT"], HOOKS_PATH,
                              phase + '.d')
     self.output = EOutput()
예제 #5
0
def _finalize():
    global _items
    printer = EOutput()
    for root, key, logentries, logfile in _items:
        print()
        if root == "/":
            printer.einfo(
                _("Messages for package %s:") % colorize("INFORM", key))
        else:
            printer.einfo(
                _("Messages for package %(pkg)s merged to %(root)s:") % {
                    "pkg": colorize("INFORM", key),
                    "root": root
                })
        if logfile is not None:
            printer.einfo(_("Log file: %s") % colorize("INFORM", logfile))
        print()
        for phase in EBUILD_PHASES:
            if phase not in logentries:
                continue
            for msgtype, msgcontent in logentries[phase]:
                fmap = {
                    "INFO": printer.einfo,
                    "WARN": printer.ewarn,
                    "ERROR": printer.eerror,
                    "LOG": printer.einfo,
                    "QA": printer.ewarn
                }
                if isinstance(msgcontent, str):
                    msgcontent = [msgcontent]
                for line in msgcontent:
                    fmap[msgtype](line.strip("\n"))
    _items = []
    return
예제 #6
0
class HookDirectory(object):

	def __init__ (self, phase, settings, myopts=None, myaction=None, mytargets=None):
		self.myopts = myopts
		self.myaction = myaction
		self.mytargets = mytargets
		check_config_instance(settings)
		self.settings = settings
		self.path = os.path.join(settings["PORTAGE_CONFIGROOT"], HOOKS_PATH, phase + '.d')
		self.output = EOutput()

	def execute (self, path=None):
		if "hooks" not in self.settings['FEATURES']:
			return
		
		if not path:
			path = self.path
		
		path = normalize_path(path)
		
		if not os.path.exists(path):
			if self.myopts and "--debug" in self.myopts:
				# behavior mimicked by hook.sh
				self.output.ewarn('This hook path could not be found; ignored: ' + path)
			return
		
		if os.path.isdir(path):
			command=[HOOKS_SH_BINARY]
			if self.myopts:
				for myopt in self.myopts:
					command.extend(['--opt', myopt])
			if self.myaction:
				command.extend(['--action', self.myaction])
			if self.mytargets:
				for mytarget in self.mytargets:
					command.extend(['--target', mytarget])
			
			command=[BASH_BINARY, '-c', 'cd "'+path+'" && source "' + PORTAGE_BIN_PATH + '/isolated-functions.sh" && source ' + ' '.join(command)]
			if self.myopts and "--verbose" in self.myopts:
				self.output.einfo('Executing hooks directory "' + self.path + '"...')
			code = spawn(mycommand=command, env=self.settings.environ())
			if code: # if failure
				# behavior mimicked by hook.sh
				raise PortageException('!!! Hook directory %s failed with exit code %s' % (self.path, code))
		
		else:
			raise InvalidLocation('This hook path ought to be a directory: ' + path)
예제 #7
0
	def __init__ (self, phase, settings, myopts=None, myaction=None, mytargets=None):
		self.myopts = myopts
		self.myaction = myaction
		self.mytargets = mytargets
		check_config_instance(settings)
		self.settings = settings
		self.path = os.path.join(settings["PORTAGE_CONFIGROOT"], HOOKS_PATH, phase + '.d')
		self.output = EOutput()
예제 #8
0
def old_tree_timestamp_warn(portdir, settings):
	unixtime = time.time()
	default_warnsync = 30

	timestamp_file = os.path.join(portdir, "metadata/timestamp.x")
	try:
		lastsync = grabfile(timestamp_file)
	except PortageException:
		return False

	if not lastsync:
		return False

	lastsync = lastsync[0].split()
	if not lastsync:
		return False

	try:
		lastsync = int(lastsync[0])
	except ValueError:
		return False

	var_name = 'PORTAGE_SYNC_STALE'
	try:
		warnsync = float(settings.get(var_name, default_warnsync))
	except ValueError:
		writemsg_level("!!! %s contains non-numeric value: %s\n" % \
			(var_name, settings[var_name]),
			level=logging.ERROR, noiselevel=-1)
		return False

	if warnsync <= 0:
		return False

	if (unixtime - 86400 * warnsync) > lastsync:
		out = EOutput()
		if have_english_locale():
			out.ewarn("Last emerge --sync was %s ago." % \
				whenago(unixtime - lastsync))
		else:
			out.ewarn(_("Last emerge --sync was %s.") % \
				_unicode_decode(time.strftime(
				'%c', time.localtime(lastsync))))
		return True
	return False
예제 #9
0
def finalize(mysettings=None):
    """The mysettings parameter is just for backward compatibility since
	an older version of portage will import the module from a newer version
	when it upgrades itself."""
    global _items
    printer = EOutput()
    for root, key, logentries in _items:
        print()
        if root == "/":
            printer.einfo(
                _("Messages for package %s:") % colorize("INFORM", key))
        else:
            printer.einfo(
                _("Messages for package %(pkg)s merged to %(root)s:") % {
                    "pkg": colorize("INFORM", key),
                    "root": root
                })
        print()
        for phase in EBUILD_PHASES:
            if phase not in logentries:
                continue
            for msgtype, msgcontent in logentries[phase]:
                fmap = {
                    "INFO": printer.einfo,
                    "WARN": printer.ewarn,
                    "ERROR": printer.eerror,
                    "LOG": printer.einfo,
                    "QA": printer.ewarn
                }
                if isinstance(msgcontent, basestring):
                    msgcontent = [msgcontent]
                for line in msgcontent:
                    fmap[msgtype](line.strip("\n"))
    _items = []
    return
예제 #10
0
    def _display_success(self):
        stdout_orig = sys.stdout
        stderr_orig = sys.stderr
        global_havecolor = portage.output.havecolor
        out = io.StringIO()
        try:
            sys.stdout = out
            sys.stderr = out
            if portage.output.havecolor:
                portage.output.havecolor = not self.background

            path = self._pkg_path
            if path.endswith(".partial"):
                path = path[:-len(".partial")]
            eout = EOutput()
            eout.ebegin(
                "%s %s ;-)" %
                (os.path.basename(path), " ".join(sorted(self._digests))))
            eout.eend(0)

        finally:
            sys.stdout = stdout_orig
            sys.stderr = stderr_orig
            portage.output.havecolor = global_havecolor

        self.scheduler.output(out.getvalue(),
                              log_path=self.logfile,
                              background=self.background)
예제 #11
0
    def set_query(self, query):
        self.current_query = query
        if query is None:
            return

        if query in self.queries:
            return

        if self.config["format"]:
            output = EOutputMem()
        else:
            output = EOutput()

        self.queries[query] = {
            "output": output,
            "result": [],
            "metadata": {},
        }
예제 #12
0
def old_tree_timestamp_warn(portdir, settings):
    unixtime = time.time()
    default_warnsync = 30

    timestamp_file = os.path.join(portdir, "metadata/timestamp.x")
    try:
        lastsync = grabfile(timestamp_file)
    except PortageException:
        return False

    if not lastsync:
        return False

    lastsync = lastsync[0].split()
    if not lastsync:
        return False

    try:
        lastsync = int(lastsync[0])
    except ValueError:
        return False

    var_name = "PORTAGE_SYNC_STALE"
    try:
        warnsync = float(settings.get(var_name, default_warnsync))
    except ValueError:
        writemsg_level(
            "!!! %s contains non-numeric value: %s\n" %
            (var_name, settings[var_name]),
            level=logging.ERROR,
            noiselevel=-1,
        )
        return False

    if warnsync <= 0:
        return False

    if (unixtime - 86400 * warnsync) > lastsync:
        out = EOutput()
        if have_english_locale():
            out.ewarn("Last emerge --sync was %s ago." %
                      whenago(unixtime - lastsync))
        else:
            out.ewarn(
                _("Last emerge --sync was %s.") %
                _unicode_decode(time.strftime("%c", time.localtime(lastsync))))
        return True
    return False
예제 #13
0
def digestcheck(myfiles, mysettings, strict=0, justmanifest=0):
	"""
	Verifies checksums. Assumes all files have been downloaded.
	@rtype: int
	@returns: 1 on success and 0 on failure
	"""
	if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
		return 1
	pkgdir = mysettings["O"]
	manifest_path = os.path.join(pkgdir, "Manifest")
	if not os.path.exists(manifest_path):
		writemsg(_("!!! Manifest file not found: '%s'\n") % manifest_path,
			noiselevel=-1)
		if strict:
			return 0
		else:
			return 1
	mf = Manifest(pkgdir, mysettings["DISTDIR"])
	manifest_empty = True
	for d in mf.fhashdict.values():
		if d:
			manifest_empty = False
			break
	if manifest_empty:
		writemsg(_("!!! Manifest is empty: '%s'\n") % manifest_path,
			noiselevel=-1)
		if strict:
			return 0
		else:
			return 1
	eout = EOutput()
	eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
	try:
		if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
			eout.ebegin(_("checking ebuild checksums ;-)"))
			mf.checkTypeHashes("EBUILD")
			eout.eend(0)
			eout.ebegin(_("checking auxfile checksums ;-)"))
			mf.checkTypeHashes("AUX")
			eout.eend(0)
			eout.ebegin(_("checking miscfile checksums ;-)"))
			mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
			eout.eend(0)
		for f in myfiles:
			eout.ebegin(_("checking %s ;-)") % f)
			ftype = mf.findFile(f)
			if ftype is None:
				raise KeyError(f)
			mf.checkFileHashes(ftype, f)
			eout.eend(0)
	except KeyError as e:
		eout.eend(1)
		writemsg(_("\n!!! Missing digest for %s\n") % str(e), noiselevel=-1)
		return 0
	except FileNotFound as e:
		eout.eend(1)
		writemsg(_("\n!!! A file listed in the Manifest could not be found: %s\n") % str(e),
			noiselevel=-1)
		return 0
	except DigestException as e:
		eout.eend(1)
		writemsg(_("\n!!! Digest verification failed:\n"), noiselevel=-1)
		writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
		writemsg(_("!!! Reason: %s\n") % e.value[1], noiselevel=-1)
		writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1)
		writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1)
		return 0
	# Make sure that all of the ebuilds are actually listed in the Manifest.
	glep55 = 'parse-eapi-glep-55' in mysettings.features
	for f in os.listdir(pkgdir):
		pf = None
		if glep55:
			pf, eapi = _split_ebuild_name_glep55(f)
		elif f[-7:] == '.ebuild':
			pf = f[:-7]
		if pf is not None and not mf.hasFile("EBUILD", f):
			writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
				os.path.join(pkgdir, f), noiselevel=-1)
			if strict:
				return 0
	""" epatch will just grab all the patches out of a directory, so we have to
	make sure there aren't any foreign files that it might grab."""
	filesdir = os.path.join(pkgdir, "files")

	for parent, dirs, files in os.walk(filesdir):
		try:
			parent = _unicode_decode(parent,
				encoding=_encodings['fs'], errors='strict')
		except UnicodeDecodeError:
			parent = _unicode_decode(parent,
				encoding=_encodings['fs'], errors='replace')
			writemsg(_("!!! Path contains invalid "
				"character(s) for encoding '%s': '%s'") \
				% (_encodings['fs'], parent), noiselevel=-1)
			if strict:
				return 0
			continue
		for d in dirs:
			d_bytes = d
			try:
				d = _unicode_decode(d,
					encoding=_encodings['fs'], errors='strict')
			except UnicodeDecodeError:
				d = _unicode_decode(d,
					encoding=_encodings['fs'], errors='replace')
				writemsg(_("!!! Path contains invalid "
					"character(s) for encoding '%s': '%s'") \
					% (_encodings['fs'], os.path.join(parent, d)),
					noiselevel=-1)
				if strict:
					return 0
				dirs.remove(d_bytes)
				continue
			if d.startswith(".") or d == "CVS":
				dirs.remove(d_bytes)
		for f in files:
			try:
				f = _unicode_decode(f,
					encoding=_encodings['fs'], errors='strict')
			except UnicodeDecodeError:
				f = _unicode_decode(f,
					encoding=_encodings['fs'], errors='replace')
				if f.startswith("."):
					continue
				f = os.path.join(parent, f)[len(filesdir) + 1:]
				writemsg(_("!!! File name contains invalid "
					"character(s) for encoding '%s': '%s'") \
					% (_encodings['fs'], f), noiselevel=-1)
				if strict:
					return 0
				continue
			if f.startswith("."):
				continue
			f = os.path.join(parent, f)[len(filesdir) + 1:]
			file_type = mf.findFile(f)
			if file_type != "AUX" and not f.startswith("digest-"):
				writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
					os.path.join(filesdir, f), noiselevel=-1)
				if strict:
					return 0
	return 1
예제 #14
0
def digestcheck(myfiles, mysettings, strict=False, justmanifest=None):
    """
	Verifies checksums. Assumes all files have been downloaded.
	@rtype: int
	@returns: 1 on success and 0 on failure
	"""

    if justmanifest is not None:
        warnings.warn("The justmanifest parameter of the " + \
         "portage.package.ebuild.digestcheck.digestcheck()" + \
         " function is now unused.",
         DeprecationWarning, stacklevel=2)
        justmanifest = None

    if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
        return 1
    pkgdir = mysettings["O"]
    manifest_path = os.path.join(pkgdir, "Manifest")
    if not os.path.exists(manifest_path):
        writemsg(_("!!! Manifest file not found: '%s'\n") % manifest_path,
                 noiselevel=-1)
        if strict:
            return 0
        else:
            return 1
    mf = Manifest(pkgdir, mysettings["DISTDIR"])
    manifest_empty = True
    for d in mf.fhashdict.values():
        if d:
            manifest_empty = False
            break
    if manifest_empty:
        writemsg(_("!!! Manifest is empty: '%s'\n") % manifest_path,
                 noiselevel=-1)
        if strict:
            return 0
        else:
            return 1
    eout = EOutput()
    eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
    try:
        if strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
            eout.ebegin(_("checking ebuild checksums ;-)"))
            mf.checkTypeHashes("EBUILD")
            eout.eend(0)
            eout.ebegin(_("checking auxfile checksums ;-)"))
            mf.checkTypeHashes("AUX")
            eout.eend(0)
            eout.ebegin(_("checking miscfile checksums ;-)"))
            mf.checkTypeHashes("MISC", ignoreMissingFiles=True)
            eout.eend(0)
        for f in myfiles:
            eout.ebegin(_("checking %s ;-)") % f)
            ftype = mf.findFile(f)
            if ftype is None:
                raise KeyError(f)
            mf.checkFileHashes(ftype, f)
            eout.eend(0)
    except KeyError as e:
        eout.eend(1)
        writemsg(_("\n!!! Missing digest for %s\n") % str(e), noiselevel=-1)
        return 0
    except FileNotFound as e:
        eout.eend(1)
        writemsg(
            _("\n!!! A file listed in the Manifest could not be found: %s\n") %
            str(e),
            noiselevel=-1)
        return 0
    except DigestException as e:
        eout.eend(1)
        writemsg(_("\n!!! Digest verification failed:\n"), noiselevel=-1)
        writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
        writemsg(_("!!! Reason: %s\n") % e.value[1], noiselevel=-1)
        writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1)
        writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1)
        return 0
    # Make sure that all of the ebuilds are actually listed in the Manifest.
    for f in os.listdir(pkgdir):
        pf = None
        if f[-7:] == '.ebuild':
            pf = f[:-7]
        if pf is not None and not mf.hasFile("EBUILD", f):
            writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
             os.path.join(pkgdir, f), noiselevel=-1)
            if strict:
                return 0
    """ epatch will just grab all the patches out of a directory, so we have to
	make sure there aren't any foreign files that it might grab."""
    filesdir = os.path.join(pkgdir, "files")

    for parent, dirs, files in os.walk(filesdir):
        try:
            parent = _unicode_decode(parent,
                                     encoding=_encodings['fs'],
                                     errors='strict')
        except UnicodeDecodeError:
            parent = _unicode_decode(parent,
                                     encoding=_encodings['fs'],
                                     errors='replace')
            writemsg(_("!!! Path contains invalid "
             "character(s) for encoding '%s': '%s'") \
             % (_encodings['fs'], parent), noiselevel=-1)
            if strict:
                return 0
            continue
        for d in dirs:
            d_bytes = d
            try:
                d = _unicode_decode(d,
                                    encoding=_encodings['fs'],
                                    errors='strict')
            except UnicodeDecodeError:
                d = _unicode_decode(d,
                                    encoding=_encodings['fs'],
                                    errors='replace')
                writemsg(_("!!! Path contains invalid "
                 "character(s) for encoding '%s': '%s'") \
                 % (_encodings['fs'], os.path.join(parent, d)),
                 noiselevel=-1)
                if strict:
                    return 0
                dirs.remove(d_bytes)
                continue
            if d.startswith(".") or d == "CVS":
                dirs.remove(d_bytes)
        for f in files:
            try:
                f = _unicode_decode(f,
                                    encoding=_encodings['fs'],
                                    errors='strict')
            except UnicodeDecodeError:
                f = _unicode_decode(f,
                                    encoding=_encodings['fs'],
                                    errors='replace')
                if f.startswith("."):
                    continue
                f = os.path.join(parent, f)[len(filesdir) + 1:]
                writemsg(_("!!! File name contains invalid "
                 "character(s) for encoding '%s': '%s'") \
                 % (_encodings['fs'], f), noiselevel=-1)
                if strict:
                    return 0
                continue
            if f.startswith("."):
                continue
            f = os.path.join(parent, f)[len(filesdir) + 1:]
            file_type = mf.findFile(f)
            if file_type != "AUX" and not f.startswith("digest-"):
                writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
                 os.path.join(filesdir, f), noiselevel=-1)
                if strict:
                    return 0
    return 1
예제 #15
0
파일: git.py 프로젝트: mgorny/portage
	def verify_head(self, revision='-1'):
		if (self.repo.module_specific_options.get(
				'sync-git-verify-commit-signature', 'false') != 'true'):
			return True

		if self.repo.sync_openpgp_key_path is not None:
			if gemato is None:
				writemsg_level("!!! Verifying against specified key requires gemato-11.0+ installed\n",
					level=logging.ERROR, noiselevel=-1)
				return False
			openpgp_env = gemato.openpgp.OpenPGPEnvironment()
		else:
			openpgp_env = None

		try:
			out = EOutput()
			env = None
			if openpgp_env is not None:
				try:
					out.einfo('Using keys from %s' % (self.repo.sync_openpgp_key_path,))
					with io.open(self.repo.sync_openpgp_key_path, 'rb') as f:
						openpgp_env.import_key(f)
					out.ebegin('Refreshing keys from keyserver')
					openpgp_env.refresh_keys()
					out.eend(0)
				except GematoException as e:
					writemsg_level("!!! Verification impossible due to keyring problem:\n%s\n"
							% (e,),
							level=logging.ERROR, noiselevel=-1)
					return False

				env = os.environ.copy()
				env['GNUPGHOME'] = openpgp_env.home

			rev_cmd = [self.bin_command, "log", "-n1", "--pretty=format:%G?", revision]
			try:
				status = (portage._unicode_decode(
					subprocess.check_output(rev_cmd,
						cwd=portage._unicode_encode(self.repo.location),
						env=env))
					.strip())
			except subprocess.CalledProcessError:
				return False

			if status == 'G':  # good signature is good
				out.einfo('Trusted signature found on top commit')
				return True
			elif status == 'U':  # untrusted
				out.ewarn('Top commit signature is valid but not trusted')
				return True
			else:
				if status == 'B':
					expl = 'bad signature'
				elif status == 'X':
					expl = 'expired signature'
				elif status == 'Y':
					expl = 'expired key'
				elif status == 'R':
					expl = 'revoked key'
				elif status == 'E':
					expl = 'unable to verify signature (missing key?)'
				elif status == 'N':
					expl = 'no signature'
				else:
					expl = 'unknown issue'
				out.eerror('No valid signature found: %s' % (expl,))
				return False
		finally:
			if openpgp_env is not None:
				openpgp_env.close()
예제 #16
0
def lockfile(mypath,
             wantnewlockfile=0,
             unlinkfile=0,
             waiting_msg=None,
             flags=0):
    """
	If wantnewlockfile is True then this creates a lockfile in the parent
	directory as the file: '.' + basename + '.portage_lockfile'.
	"""
    import fcntl

    if not mypath:
        raise InvalidData(_("Empty path given"))

    if isinstance(mypath, basestring) and mypath[-1] == '/':
        mypath = mypath[:-1]

    if hasattr(mypath, 'fileno'):
        mypath = mypath.fileno()
    if isinstance(mypath, int):
        lockfilename = mypath
        wantnewlockfile = 0
        unlinkfile = 0
    elif wantnewlockfile:
        base, tail = os.path.split(mypath)
        lockfilename = os.path.join(base, "." + tail + ".portage_lockfile")
        del base, tail
        unlinkfile = 1
    else:
        lockfilename = mypath

    if isinstance(mypath, basestring):
        if not os.path.exists(os.path.dirname(mypath)):
            raise DirectoryNotFound(os.path.dirname(mypath))
        preexisting = os.path.exists(lockfilename)
        old_mask = os.umask(000)
        try:
            try:
                myfd = os.open(lockfilename, os.O_CREAT | os.O_RDWR, 0o660)
            except OSError as e:
                func_call = "open('%s')" % lockfilename
                if e.errno == OperationNotPermitted.errno:
                    raise OperationNotPermitted(func_call)
                elif e.errno == PermissionDenied.errno:
                    raise PermissionDenied(func_call)
                else:
                    raise

            if not preexisting:
                try:
                    if os.stat(lockfilename).st_gid != portage_gid:
                        os.chown(lockfilename, -1, portage_gid)
                except OSError as e:
                    if e.errno in (errno.ENOENT, errno.ESTALE):
                        return lockfile(mypath,
                                        wantnewlockfile=wantnewlockfile,
                                        unlinkfile=unlinkfile,
                                        waiting_msg=waiting_msg,
                                        flags=flags)
                    else:
                        writemsg("%s: chown('%s', -1, %d)\n" % \
                         (e, lockfilename, portage_gid), noiselevel=-1)
                        writemsg(_("Cannot chown a lockfile: '%s'\n") % \
                         lockfilename, noiselevel=-1)
                        writemsg(_("Group IDs of current user: %s\n") % \
                         " ".join(str(n) for n in os.getgroups()),
                         noiselevel=-1)
        finally:
            os.umask(old_mask)

    elif isinstance(mypath, int):
        myfd = mypath

    else:
        raise ValueError(_("Unknown type passed in '%s': '%s'") % \
         (type(mypath), mypath))

    # try for a non-blocking lock, if it's held, throw a message
    # we're waiting on lockfile and use a blocking attempt.
    locking_method = fcntl.lockf
    try:
        fcntl.lockf(myfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError as e:
        if "errno" not in dir(e):
            raise
        if e.errno in (errno.EACCES, errno.EAGAIN):
            # resource temp unavailable; eg, someone beat us to the lock.
            if flags & os.O_NONBLOCK:
                raise TryAgain(mypath)

            global _quiet
            out = EOutput()
            out.quiet = _quiet
            if waiting_msg is None:
                if isinstance(mypath, int):
                    waiting_msg = _("waiting for lock on fd %i") % myfd
                else:
                    waiting_msg = _("waiting for lock on %s\n") % lockfilename
            out.ebegin(waiting_msg)
            # try for the exclusive lock now.
            try:
                fcntl.lockf(myfd, fcntl.LOCK_EX)
            except EnvironmentError as e:
                out.eend(1, str(e))
                raise
            out.eend(os.EX_OK)
        elif e.errno == errno.ENOLCK:
            # We're not allowed to lock on this FS.
            os.close(myfd)
            link_success = False
            if lockfilename == str(lockfilename):
                if wantnewlockfile:
                    try:
                        if os.stat(lockfilename)[stat.ST_NLINK] == 1:
                            os.unlink(lockfilename)
                    except OSError:
                        pass
                    link_success = hardlink_lockfile(lockfilename)
            if not link_success:
                raise
            locking_method = None
            myfd = HARDLINK_FD
        else:
            raise


    if isinstance(lockfilename, basestring) and \
     myfd != HARDLINK_FD and _fstat_nlink(myfd) == 0:
        # The file was deleted on us... Keep trying to make one...
        os.close(myfd)
        writemsg(_("lockfile recurse\n"), 1)
        lockfilename, myfd, unlinkfile, locking_method = lockfile(
            mypath,
            wantnewlockfile=wantnewlockfile,
            unlinkfile=unlinkfile,
            waiting_msg=waiting_msg,
            flags=flags)

    writemsg(str((lockfilename, myfd, unlinkfile)) + "\n", 1)
    return (lockfilename, myfd, unlinkfile, locking_method)
예제 #17
0
import sys

from itertools import chain

try:
	from lxml import etree
	from lxml.etree import ParserError
except (SystemExit, KeyboardInterrupt):
	raise
except (ImportError, SystemError, RuntimeError, Exception):
	# broken or missing xml support
	# https://bugs.python.org/issue14988
	msg = ["Please emerge dev-python/lxml in order to use repoman."]
	from portage.output import EOutput
	out = EOutput()
	for line in msg:
		out.eerror(line)
	sys.exit(1)

# import our initialized portage instance
from repoman._portage import portage
from repoman.metadata import metadata_dtd_uri
from repoman.modules.scan.scanbase import ScanBase

from portage.exception import InvalidAtom
from portage import os
from portage.dep import Atom
from portage.xml.metadata import parse_metadata_use

from .use_flags import USEFlagChecks
예제 #18
0
def fetch(myuris, mysettings, listonly=0, fetchonly=0,
	locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
	allow_missing_digests=True, force=False):
	"""
	Fetch files to DISTDIR and also verify digests if they are available.

	@param myuris: Maps each file name to a tuple of available fetch URIs.
	@type myuris: dict
	@param mysettings: Portage config instance.
	@type mysettings: portage.config
	@param listonly: Only print URIs and do not actually fetch them.
	@type listonly: bool
	@param fetchonly: Do not block for files that are locked by a
		concurrent fetcher process. This means that the function can
		return successfully *before* all files have been successfully
		fetched!
	@type fetchonly: bool
	@param use_locks: Enable locks. This parameter is ineffective if
		FEATURES=distlocks is disabled in the portage config!
	@type use_locks: bool
	@param digests: Maps each file name to a dict of digest types and values.
	@type digests: dict
	@param allow_missing_digests: Enable fetch even if there are no digests
		available for verification.
	@type allow_missing_digests: bool
	@param force: Force download, even when a file already exists in
		DISTDIR. This is most useful when there are no digests available,
		since otherwise download will be automatically forced if the
		existing file does not match the available digests. Also, this
		avoids the need to remove the existing file in advance, which
		makes it possible to atomically replace the file and avoid
		interference with concurrent processes.
	@type force: bool
	@rtype: int
	@return: 1 if successful, 0 otherwise.
	"""

	if force and digests:
		# Since the force parameter can trigger unnecessary fetch when the
		# digests match, do not allow force=True when digests are provided.
		raise PortageException(_('fetch: force=True is not allowed when digests are provided'))

	if not myuris:
		return 1

	features = mysettings.features
	restrict = mysettings.get("PORTAGE_RESTRICT","").split()
	userfetch = portage.data.secpass >= 2 and "userfetch" in features

	# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
	restrict_mirror = "mirror" in restrict or "nomirror" in restrict
	if restrict_mirror:
		if ("mirror" in features) and ("lmirror" not in features):
			# lmirror should allow you to bypass mirror restrictions.
			# XXX: This is not a good thing, and is temporary at best.
			print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
			return 1

	# Generally, downloading the same file repeatedly from
	# every single available mirror is a waste of bandwidth
	# and time, so there needs to be a cap.
	checksum_failure_max_tries = 5
	v = checksum_failure_max_tries
	try:
		v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
			checksum_failure_max_tries))
	except (ValueError, OverflowError):
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains non-integer value: '%s'\n") % \
			mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	if v < 1:
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains value less than 1: '%s'\n") % v, noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	checksum_failure_max_tries = v
	del v

	fetch_resume_size_default = "350K"
	fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
	if fetch_resume_size is not None:
		fetch_resume_size = "".join(fetch_resume_size.split())
		if not fetch_resume_size:
			# If it's undefined or empty, silently use the default.
			fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
		if match is None or \
			(match.group(2).upper() not in _size_suffix_map):
			writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
				" contains an unrecognized format: '%s'\n") % \
				mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
			writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
				"default value: %s\n") % fetch_resume_size_default,
				noiselevel=-1)
			fetch_resume_size = None
	if fetch_resume_size is None:
		fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
	fetch_resume_size = int(match.group(1)) * \
		2 ** _size_suffix_map[match.group(2).upper()]

	# Behave like the package has RESTRICT="primaryuri" after a
	# couple of checksum failures, to increase the probablility
	# of success before checksum_failure_max_tries is reached.
	checksum_failure_primaryuri = 2
	thirdpartymirrors = mysettings.thirdpartymirrors()

	# In the background parallel-fetch process, it's safe to skip checksum
	# verification of pre-existing files in $DISTDIR that have the correct
	# file size. The parent process will verify their checksums prior to
	# the unpack phase.

	parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
	if parallel_fetchonly:
		fetchonly = 1

	check_config_instance(mysettings)

	custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
		CUSTOM_MIRRORS_FILE), recursive=1)

	mymirrors=[]

	if listonly or ("distlocks" not in features):
		use_locks = 0

	distdir_writable = os.access(mysettings["DISTDIR"], os.W_OK)
	fetch_to_ro = 0
	if "skiprocheck" in features:
		fetch_to_ro = 1

	if not distdir_writable and fetch_to_ro:
		if use_locks:
			writemsg(colorize("BAD",
				_("!!! For fetching to a read-only filesystem, "
				"locking should be turned off.\n")), noiselevel=-1)
			writemsg(_("!!! This can be done by adding -distlocks to "
				"FEATURES in /etc/portage/make.conf\n"), noiselevel=-1)
#			use_locks = 0

	# local mirrors are always added
	if try_mirrors and "local" in custommirrors:
		mymirrors += custommirrors["local"]

	if restrict_mirror:
		# We don't add any mirrors.
		pass
	else:
		if try_mirrors:
			mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]

	hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
	if hash_filter.transparent:
		hash_filter = None
	skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
	if skip_manifest:
		allow_missing_digests = True
	pkgdir = mysettings.get("O")
	if digests is None and not (pkgdir is None or skip_manifest):
		mydigests = mysettings.repositories.get_repo_for_location(
			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
			pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
	elif digests is None or skip_manifest:
		# no digests because fetch was not called for a specific package
		mydigests = {}
	else:
		mydigests = digests

	ro_distdirs = [x for x in \
		shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
		if os.path.isdir(x)]

	fsmirrors = []
	for x in range(len(mymirrors)-1,-1,-1):
		if mymirrors[x] and mymirrors[x][0]=='/':
			fsmirrors += [mymirrors[x]]
			del mymirrors[x]

	restrict_fetch = "fetch" in restrict
	force_mirror = "force-mirror" in features and not restrict_mirror
	custom_local_mirrors = custommirrors.get("local", [])
	if restrict_fetch:
		# With fetch restriction, a normal uri may only be fetched from
		# custom local mirrors (if available).  A mirror:// uri may also
		# be fetched from specific mirrors (effectively overriding fetch
		# restriction, but only for specific mirrors).
		locations = custom_local_mirrors
	else:
		locations = mymirrors

	file_uri_tuples = []
	# Check for 'items' attribute since OrderedDict is not a dict.
	if hasattr(myuris, 'items'):
		for myfile, uri_set in myuris.items():
			for myuri in uri_set:
				file_uri_tuples.append((myfile, myuri))
			if not uri_set:
				file_uri_tuples.append((myfile, None))
	else:
		for myuri in myuris:
			if urlparse(myuri).scheme:
				file_uri_tuples.append((os.path.basename(myuri), myuri))
			else:
				file_uri_tuples.append((os.path.basename(myuri), None))

	filedict = OrderedDict()
	primaryuri_dict = {}
	thirdpartymirror_uris = {}
	for myfile, myuri in file_uri_tuples:
		if myfile not in filedict:
			filedict[myfile]=[]
			if distdir_writable:
				mirror_cache = os.path.join(mysettings["DISTDIR"],
						".mirror-cache.json")
			else:
				mirror_cache = None
			for l in locations:
				filedict[myfile].append(functools.partial(
					get_mirror_url, l, myfile, mysettings, mirror_cache))
		if myuri is None:
			continue
		if myuri[:9]=="mirror://":
			eidx = myuri.find("/", 9)
			if eidx != -1:
				mirrorname = myuri[9:eidx]
				path = myuri[eidx+1:]

				# Try user-defined mirrors first
				if mirrorname in custommirrors:
					for cmirr in custommirrors[mirrorname]:
						filedict[myfile].append(
							cmirr.rstrip("/") + "/" + path)

				# now try the official mirrors
				if mirrorname in thirdpartymirrors:
					uris = [locmirr.rstrip("/") + "/" + path \
						for locmirr in thirdpartymirrors[mirrorname]]
					random.shuffle(uris)
					filedict[myfile].extend(uris)
					thirdpartymirror_uris.setdefault(myfile, []).extend(uris)

				if mirrorname not in custommirrors and \
					mirrorname not in thirdpartymirrors:
					writemsg(_("!!! No known mirror by the name: %s\n") % (mirrorname))
			else:
				writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
				writemsg("  %s\n" % (myuri), noiselevel=-1)
		else:
			if restrict_fetch or force_mirror:
				# Only fetch from specific mirrors is allowed.
				continue
			primaryuris = primaryuri_dict.get(myfile)
			if primaryuris is None:
				primaryuris = []
				primaryuri_dict[myfile] = primaryuris
			primaryuris.append(myuri)

	# Order primaryuri_dict values to match that in SRC_URI.
	for uris in primaryuri_dict.values():
		uris.reverse()

	# Prefer thirdpartymirrors over normal mirrors in cases when
	# the file does not yet exist on the normal mirrors.
	for myfile, uris in thirdpartymirror_uris.items():
		primaryuri_dict.setdefault(myfile, []).extend(uris)

	# Now merge primaryuri values into filedict (includes mirrors
	# explicitly referenced in SRC_URI).
	if "primaryuri" in restrict:
		for myfile, uris in filedict.items():
			filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
	else:
		for myfile in filedict:
			filedict[myfile] += primaryuri_dict.get(myfile, [])

	can_fetch=True

	if listonly:
		can_fetch = False

	if can_fetch and not fetch_to_ro:
		try:
			_ensure_distdir(mysettings, mysettings["DISTDIR"])
		except PortageException as e:
			if not os.path.isdir(mysettings["DISTDIR"]):
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
				writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)

	if can_fetch and \
		not fetch_to_ro and \
		not os.access(mysettings["DISTDIR"], os.W_OK):
		writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
			noiselevel=-1)
		can_fetch = False

	distdir_writable = can_fetch and not fetch_to_ro
	failed_files = set()
	restrict_fetch_msg = False
	valid_hashes = set(get_valid_checksum_keys())
	valid_hashes.discard("size")

	for myfile in filedict:
		"""
		fetched  status
		0        nonexistent
		1        partially downloaded
		2        completely downloaded
		"""
		fetched = 0

		orig_digests = mydigests.get(myfile, {})

		if not (allow_missing_digests or listonly):
			verifiable_hash_types = set(orig_digests).intersection(valid_hashes)
			if not verifiable_hash_types:
				expected = " ".join(sorted(valid_hashes))
				got = set(orig_digests)
				got.discard("size")
				got = " ".join(sorted(got))
				reason = (_("Insufficient data for checksum verification"),
					got, expected)
				writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
					noiselevel=-1)
				writemsg(_("!!! Reason: %s\n") % reason[0],
					noiselevel=-1)
				writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
					(reason[1], reason[2]), noiselevel=-1)

				if fetchonly:
					failed_files.add(myfile)
					continue
				else:
					return 0

		size = orig_digests.get("size")
		if size == 0:
			# Zero-byte distfiles are always invalid, so discard their digests.
			del mydigests[myfile]
			orig_digests.clear()
			size = None
		pruned_digests = orig_digests
		if parallel_fetchonly:
			pruned_digests = {}
			if size is not None:
				pruned_digests["size"] = size

		myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
		download_path = myfile_path if fetch_to_ro else myfile_path + _download_suffix
		has_space = True
		has_space_superuser = True
		file_lock = None
		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		else:
			# check if there is enough space in DISTDIR to completely store myfile
			# overestimate the filesize so we aren't bitten by FS overhead
			vfs_stat = None
			if size is not None and hasattr(os, "statvfs"):
				try:
					vfs_stat = os.statvfs(mysettings["DISTDIR"])
				except OSError as e:
					writemsg_level("!!! statvfs('%s'): %s\n" %
						(mysettings["DISTDIR"], e),
						noiselevel=-1, level=logging.ERROR)
					del e

			if vfs_stat is not None:
				try:
					mysize = os.stat(myfile_path).st_size
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
					mysize = 0
				if (size - mysize + vfs_stat.f_bsize) >= \
					(vfs_stat.f_bsize * vfs_stat.f_bavail):

					if (size - mysize + vfs_stat.f_bsize) >= \
						(vfs_stat.f_bsize * vfs_stat.f_bfree):
						has_space_superuser = False

					if not has_space_superuser:
						has_space = False
					elif portage.data.secpass < 2:
						has_space = False
					elif userfetch:
						has_space = False

			if distdir_writable and use_locks:

				lock_kwargs = {}
				if fetchonly:
					lock_kwargs["flags"] = os.O_NONBLOCK

				try:
					file_lock = lockfile(myfile_path,
						wantnewlockfile=1, **lock_kwargs)
				except TryAgain:
					writemsg(_(">>> File '%s' is already locked by "
						"another fetcher. Continuing...\n") % myfile,
						noiselevel=-1)
					continue
		try:
			if not listonly:

				eout = EOutput()
				eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
				match, mystat = _check_distfile(
					myfile_path, pruned_digests, eout, hash_filter=hash_filter)
				if match and not force:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if distdir_writable and not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e
					continue

				# Remove broken symlinks or symlinks to files which
				# _check_distfile did not match above.
				if distdir_writable and mystat is None or os.path.islink(myfile_path):
					try:
						os.unlink(myfile_path)
					except OSError as e:
						if e.errno not in (errno.ENOENT, errno.ESTALE):
							raise
					mystat = None

				if mystat is not None:
					if stat.S_ISDIR(mystat.st_mode):
						writemsg_level(
							_("!!! Unable to fetch file since "
							"a directory is in the way: \n"
							"!!!   %s\n") % myfile_path,
							level=logging.ERROR, noiselevel=-1)
						return 0

					if distdir_writable and not force:
						# Since _check_distfile did not match above, the file
						# is either corrupt or its identity has changed since
						# the last time it was fetched, so rename it.
						temp_filename = _checksum_failure_temp_file(
							mysettings, mysettings["DISTDIR"], myfile)
						writemsg_stdout(_("Refetching... "
							"File renamed to '%s'\n\n") % \
							temp_filename, noiselevel=-1)

				# Stat the temporary download file for comparison with
				# fetch_resume_size.
				try:
					mystat = os.stat(download_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					mystat = None

				if mystat is not None:
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(download_path)
							except OSError:
								pass
					elif distdir_writable and size is not None:
						if mystat.st_size < fetch_resume_size and \
							mystat.st_size < size:
							# If the file already exists and the size does not
							# match the existing digests, it may be that the
							# user is attempting to update the digest. In this
							# case, the digestgen() function will advise the
							# user to use `ebuild --force foo.ebuild manifest`
							# in order to force the old digests to be replaced.
							# Since the user may want to keep this file, rename
							# it instead of deleting it.
							writemsg(_(">>> Renaming distfile with size "
								"%d (smaller than " "PORTAGE_FETCH_RESU"
								"ME_MIN_SIZE)\n") % mystat.st_size)
							temp_filename = \
								_checksum_failure_temp_file(
									mysettings, mysettings["DISTDIR"],
									os.path.basename(download_path))
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)
						elif mystat.st_size >= size:
							temp_filename = \
								_checksum_failure_temp_file(
									mysettings, mysettings["DISTDIR"],
									os.path.basename(download_path))
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)

				if distdir_writable and ro_distdirs:
					readonly_file = None
					for x in ro_distdirs:
						filename = os.path.join(x, myfile)
						match, mystat = _check_distfile(
							filename, pruned_digests, eout, hash_filter=hash_filter)
						if match:
							readonly_file = filename
							break
					if readonly_file is not None:
						try:
							os.unlink(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
						os.symlink(readonly_file, myfile_path)
						continue

				# this message is shown only after we know that
				# the file is not already fetched
				if not has_space:
					writemsg(_("!!! Insufficient space to store %s in %s\n") % \
						(myfile, mysettings["DISTDIR"]), noiselevel=-1)

					if has_space_superuser:
						writemsg(_("!!! Insufficient privileges to use "
							"remaining space.\n"), noiselevel=-1)
						if userfetch:
							writemsg(_("!!! You may set FEATURES=\"-userfetch\""
								" in /etc/portage/make.conf in order to fetch with\n"
								"!!! superuser privileges.\n"), noiselevel=-1)

				if fsmirrors and not os.path.exists(myfile_path) and has_space:
					for mydir in fsmirrors:
						mirror_file = os.path.join(mydir, myfile)
						try:
							shutil.copyfile(mirror_file, download_path)
							writemsg(_("Local mirror has file: %s\n") % myfile)
							break
						except (IOError, OSError) as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e

				try:
					mystat = os.stat(download_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
				else:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if not os.path.islink(download_path):
						try:
							apply_secpass_permissions(download_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(download_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % (e,), noiselevel=-1)

					# If the file is empty then it's obviously invalid. Remove
					# the empty file and try to download if possible.
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(download_path)
							except EnvironmentError:
								pass
					elif not orig_digests:
						# We don't have a digest, but the file exists.  We must
						# assume that it is fully downloaded.
						if not force:
							continue
					else:
						if (mydigests[myfile].get("size") is not None
								and mystat.st_size < mydigests[myfile]["size"]
								and not restrict_fetch):
							fetched = 1 # Try to resume this download.
						elif parallel_fetchonly and \
							mystat.st_size == mydigests[myfile]["size"]:
							eout = EOutput()
							eout.quiet = \
								mysettings.get("PORTAGE_QUIET") == "1"
							eout.ebegin(
								"%s size ;-)" % (myfile, ))
							eout.eend(0)
							continue
						else:
							digests = _filter_unaccelarated_hashes(mydigests[myfile])
							if hash_filter is not None:
								digests = _apply_hash_filter(digests, hash_filter)
							verified_ok, reason = verify_all(download_path, digests)
							if not verified_ok:
								writemsg(_("!!! Previously fetched"
									" file: '%s'\n") % myfile, noiselevel=-1)
								writemsg(_("!!! Reason: %s\n") % reason[0],
									noiselevel=-1)
								writemsg(_("!!! Got:      %s\n"
									"!!! Expected: %s\n") % \
									(reason[1], reason[2]), noiselevel=-1)
								if reason[0] == _("Insufficient data for checksum verification"):
									return 0
								if distdir_writable:
									temp_filename = \
										_checksum_failure_temp_file(
											mysettings, mysettings["DISTDIR"],
											os.path.basename(download_path))
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
							else:
								if not fetch_to_ro:
									_movefile(download_path, myfile_path, mysettings=mysettings)
								eout = EOutput()
								eout.quiet = \
									mysettings.get("PORTAGE_QUIET", None) == "1"
								if digests:
									digests = list(digests)
									digests.sort()
									eout.ebegin(
										"%s %s ;-)" % (myfile, " ".join(digests)))
									eout.eend(0)
								continue # fetch any remaining files

			# Create a reversed list since that is optimal for list.pop().
			uri_list = filedict[myfile][:]
			uri_list.reverse()
			checksum_failure_count = 0
			tried_locations = set()
			while uri_list:
				loc = uri_list.pop()
				if isinstance(loc, functools.partial):
					loc = loc()
				# Eliminate duplicates here in case we've switched to
				# "primaryuri" mode on the fly due to a checksum failure.
				if loc in tried_locations:
					continue
				tried_locations.add(loc)
				if listonly:
					writemsg_stdout(loc+" ", noiselevel=-1)
					continue
				# allow different fetchcommands per protocol
				protocol = loc[0:loc.find("://")]

				global_config_path = GLOBAL_CONFIG_PATH
				if portage.const.EPREFIX:
					global_config_path = os.path.join(portage.const.EPREFIX,
							GLOBAL_CONFIG_PATH.lstrip(os.sep))

				missing_file_param = False
				fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
				fetchcommand = mysettings.get(fetchcommand_var)
				if fetchcommand is None:
					fetchcommand_var = "FETCHCOMMAND"
					fetchcommand = mysettings.get(fetchcommand_var)
					if fetchcommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (fetchcommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in fetchcommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % fetchcommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
				resumecommand = mysettings.get(resumecommand_var)
				if resumecommand is None:
					resumecommand_var = "RESUMECOMMAND"
					resumecommand = mysettings.get(resumecommand_var)
					if resumecommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (resumecommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in resumecommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % resumecommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				if missing_file_param:
					writemsg_level(
						_("!!! Refer to the make.conf(5) man page for "
						"information about how to\n!!! correctly specify "
						"FETCHCOMMAND and RESUMECOMMAND.\n"),
						level=logging.ERROR, noiselevel=-1)
					if myfile != os.path.basename(loc):
						return 0

				if not can_fetch:
					if fetched != 2:
						try:
							mysize = os.stat(download_path).st_size
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							mysize = 0

						if mysize == 0:
							writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
								noiselevel=-1)
						elif size is None or size > mysize:
							writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
								noiselevel=-1)
						else:
							writemsg(_("!!! File %s is incorrect size, "
								"but unable to retry.\n") % myfile, noiselevel=-1)
						return 0
					else:
						continue

				if fetched != 2 and has_space:
					#we either need to resume or start the download
					if fetched == 1:
						try:
							mystat = os.stat(download_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:
							if distdir_writable and mystat.st_size < fetch_resume_size:
								writemsg(_(">>> Deleting distfile with size "
									"%d (smaller than " "PORTAGE_FETCH_RESU"
									"ME_MIN_SIZE)\n") % mystat.st_size)
								try:
									os.unlink(download_path)
								except OSError as e:
									if e.errno not in \
										(errno.ENOENT, errno.ESTALE):
										raise
									del e
								fetched = 0
					if fetched == 1:
						#resume mode:
						writemsg(_(">>> Resuming download...\n"))
						locfetch=resumecommand
						command_var = resumecommand_var
					else:
						#normal mode:
						locfetch=fetchcommand
						command_var = fetchcommand_var
					writemsg_stdout(_(">>> Downloading '%s'\n") % \
						_hide_url_passwd(loc))
					variables = {
						"URI":     loc,
						"FILE":    os.path.basename(download_path)
					}

					for k in ("DISTDIR", "PORTAGE_SSH_OPTS"):
						v = mysettings.get(k)
						if v is not None:
							variables[k] = v

					myfetch = shlex_split(locfetch)
					myfetch = [varexpand(x, mydict=variables) for x in myfetch]
					myret = -1
					try:

						myret = _spawn_fetch(mysettings, myfetch)

					finally:
						try:
							apply_secpass_permissions(download_path,
								gid=portage_gid, mode=0o664, mask=0o2)
						except FileNotFound:
							pass
						except PortageException as e:
							if not os.access(download_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e

					# If the file is empty then it's obviously invalid.  Don't
					# trust the return value from the fetcher.  Remove the
					# empty file and try to download again.
					try:
						mystat = os.lstat(download_path)
						if mystat.st_size == 0 or (stat.S_ISLNK(mystat.st_mode) and not os.path.exists(download_path)):
							os.unlink(download_path)
							fetched = 0
							continue
					except EnvironmentError:
						pass

					if mydigests is not None and myfile in mydigests:
						try:
							mystat = os.stat(download_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:

							if stat.S_ISDIR(mystat.st_mode):
								# This can happen if FETCHCOMMAND erroneously
								# contains wget's -P option where it should
								# instead have -O.
								writemsg_level(
									_("!!! The command specified in the "
									"%s variable appears to have\n!!! "
									"created a directory instead of a "
									"normal file.\n") % command_var,
									level=logging.ERROR, noiselevel=-1)
								writemsg_level(
									_("!!! Refer to the make.conf(5) "
									"man page for information about how "
									"to\n!!! correctly specify "
									"FETCHCOMMAND and RESUMECOMMAND.\n"),
									level=logging.ERROR, noiselevel=-1)
								return 0

							# no exception?  file exists. let digestcheck() report
							# an appropriately for size or checksum errors

							# If the fetcher reported success and the file is
							# too small, it's probably because the digest is
							# bad (upstream changed the distfile).  In this
							# case we don't want to attempt to resume. Show a
							# digest verification failure to that the user gets
							# a clue about what just happened.
							if myret != os.EX_OK and \
								mystat.st_size < mydigests[myfile]["size"]:
								# Fetch failed... Try the next one... Kill 404 files though.
								if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
									html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
									with io.open(
										_unicode_encode(download_path,
										encoding=_encodings['fs'], errors='strict'),
										mode='r', encoding=_encodings['content'], errors='replace'
										) as f:
										if html404.search(f.read()):
											try:
												os.unlink(download_path)
												writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
												fetched = 0
												continue
											except (IOError, OSError):
												pass
								fetched = 1
								continue
							if True:
								# File is the correct size--check the checksums for the fetched
								# file NOW, for those users who don't have a stable/continuous
								# net connection. This way we have a chance to try to download
								# from another mirror...
								digests = _filter_unaccelarated_hashes(mydigests[myfile])
								if hash_filter is not None:
									digests = _apply_hash_filter(digests, hash_filter)
								verified_ok, reason = verify_all(download_path, digests)
								if not verified_ok:
									writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
										noiselevel=-1)
									writemsg(_("!!! Reason: %s\n") % reason[0],
										noiselevel=-1)
									writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
										(reason[1], reason[2]), noiselevel=-1)
									if reason[0] == _("Insufficient data for checksum verification"):
										return 0
									if distdir_writable:
										temp_filename = \
											_checksum_failure_temp_file(
												mysettings, mysettings["DISTDIR"],
												os.path.basename(download_path))
										writemsg_stdout(_("Refetching... "
											"File renamed to '%s'\n\n") % \
											temp_filename, noiselevel=-1)
									fetched=0
									checksum_failure_count += 1
									if checksum_failure_count == \
										checksum_failure_primaryuri:
										# Switch to "primaryuri" mode in order
										# to increase the probablility of
										# of success.
										primaryuris = \
											primaryuri_dict.get(myfile)
										if primaryuris:
											uri_list.extend(
												reversed(primaryuris))
									if checksum_failure_count >= \
										checksum_failure_max_tries:
										break
								else:
									if not fetch_to_ro:
										_movefile(download_path, myfile_path, mysettings=mysettings)
									eout = EOutput()
									eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
									if digests:
										eout.ebegin("%s %s ;-)" % \
											(myfile, " ".join(sorted(digests))))
										eout.eend(0)
									fetched=2
									break
					else: # no digests available
						if not myret:
							if not fetch_to_ro:
								_movefile(download_path, myfile_path, mysettings=mysettings)
							fetched=2
							break
						elif mydigests!=None:
							writemsg(_("No digest file available and download failed.\n\n"),
								noiselevel=-1)
		finally:
			if use_locks and file_lock:
				unlockfile(file_lock)
				file_lock = None

		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		if fetched != 2:
			if restrict_fetch and not restrict_fetch_msg:
				restrict_fetch_msg = True
				msg = _("\n!!! %s/%s"
					" has fetch restriction turned on.\n"
					"!!! This probably means that this "
					"ebuild's files must be downloaded\n"
					"!!! manually.  See the comments in"
					" the ebuild for more information.\n\n") % \
					(mysettings["CATEGORY"], mysettings["PF"])
				writemsg_level(msg,
					level=logging.ERROR, noiselevel=-1)
			elif restrict_fetch:
				pass
			elif listonly:
				pass
			elif not filedict[myfile]:
				writemsg(_("Warning: No mirrors available for file"
					" '%s'\n") % (myfile), noiselevel=-1)
			else:
				writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
					noiselevel=-1)

			if listonly:
				failed_files.add(myfile)
				continue
			elif fetchonly:
				failed_files.add(myfile)
				continue
			return 0
	if failed_files:
		return 0
	return 1
from portage.versions import vercmp
from portage.output import EOutput
from _emerge.stdout_spinner import stdout_spinner

###############
## Variables ##
###############
quiet = False
force = False
root = '/'
gir_dirs = ['/usr/share/gir-1.0']
girs = {}
girversion = ''
gi = 'gobject-introspection'
settings = portage.settings
eoutput = EOutput()
spinner = stdout_spinner()

###############
## Functions ##
###############
def usage():
    print """gir-rebuilder: Rebuilds gobject-introspection typelibs and girs in the following directories:
 %s

Usage: %s [--force] [ARGUMENTS TO EMERGE]

Arguments:
  --help            This text
  --force           Force rebuilding of *all* girs and typelibs
예제 #20
0
def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None):
	"""
	Verifies checksums. Assumes all files have been downloaded.
	@rtype: int
	@return: 1 on success and 0 on failure
	"""

	if justmanifest is not None:
		warnings.warn("The justmanifest parameter of the " + \
			"portage.package.ebuild.digestcheck.digestcheck()" + \
			" function is now unused.",
			DeprecationWarning, stacklevel=2)
		justmanifest = None

	if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
		return 1
	pkgdir = mysettings["O"]
	hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
	if hash_filter.transparent:
		hash_filter = None
	if mf is None:
		mf = mysettings.repositories.get_repo_for_location(
			os.path.dirname(os.path.dirname(pkgdir)))
		mf = mf.load_manifest(pkgdir, mysettings["DISTDIR"])
	eout = EOutput()
	eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
	try:
		if not mf.thin and strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
			if mf.fhashdict.get("EBUILD"):
				eout.ebegin(_("checking ebuild checksums ;-)"))
				mf.checkTypeHashes("EBUILD", hash_filter=hash_filter)
				eout.eend(0)
			if mf.fhashdict.get("AUX"):
				eout.ebegin(_("checking auxfile checksums ;-)"))
				mf.checkTypeHashes("AUX", hash_filter=hash_filter)
				eout.eend(0)
			if mf.fhashdict.get("MISC"):
				eout.ebegin(_("checking miscfile checksums ;-)"))
				mf.checkTypeHashes("MISC", ignoreMissingFiles=True,
					hash_filter=hash_filter)
				eout.eend(0)
		for f in myfiles:
			eout.ebegin(_("checking %s ;-)") % f)
			ftype = mf.findFile(f)
			if ftype is None:
				if mf.allow_missing:
					continue
				eout.eend(1)
				writemsg(_("\n!!! Missing digest for '%s'\n") % (f,),
					noiselevel=-1)
				return 0
			mf.checkFileHashes(ftype, f, hash_filter=hash_filter)
			eout.eend(0)
	except FileNotFound as e:
		eout.eend(1)
		writemsg(_("\n!!! A file listed in the Manifest could not be found: %s\n") % str(e),
			noiselevel=-1)
		return 0
	except DigestException as e:
		eout.eend(1)
		writemsg(_("\n!!! Digest verification failed:\n"), noiselevel=-1)
		writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
		writemsg(_("!!! Reason: %s\n") % e.value[1], noiselevel=-1)
		writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1)
		writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1)
		return 0
	if mf.thin or mf.allow_missing:
		# In this case we ignore any missing digests that
		# would otherwise be detected below.
		return 1
	# Make sure that all of the ebuilds are actually listed in the Manifest.
	for f in os.listdir(pkgdir):
		pf = None
		if f[-7:] == '.ebuild':
			pf = f[:-7]
		if pf is not None and not mf.hasFile("EBUILD", f):
			writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
				os.path.join(pkgdir, f), noiselevel=-1)
			if strict:
				return 0
	# epatch will just grab all the patches out of a directory, so we have to
	# make sure there aren't any foreign files that it might grab.
	filesdir = os.path.join(pkgdir, "files")

	for parent, dirs, files in os.walk(filesdir):
		try:
			parent = _unicode_decode(parent,
				encoding=_encodings['fs'], errors='strict')
		except UnicodeDecodeError:
			parent = _unicode_decode(parent,
				encoding=_encodings['fs'], errors='replace')
			writemsg(_("!!! Path contains invalid "
				"character(s) for encoding '%s': '%s'") \
				% (_encodings['fs'], parent), noiselevel=-1)
			if strict:
				return 0
			continue
		for d in dirs:
			d_bytes = d
			try:
				d = _unicode_decode(d,
					encoding=_encodings['fs'], errors='strict')
			except UnicodeDecodeError:
				d = _unicode_decode(d,
					encoding=_encodings['fs'], errors='replace')
				writemsg(_("!!! Path contains invalid "
					"character(s) for encoding '%s': '%s'") \
					% (_encodings['fs'], os.path.join(parent, d)),
					noiselevel=-1)
				if strict:
					return 0
				dirs.remove(d_bytes)
				continue
			if d.startswith(".") or d == "CVS":
				dirs.remove(d_bytes)
		for f in files:
			try:
				f = _unicode_decode(f,
					encoding=_encodings['fs'], errors='strict')
			except UnicodeDecodeError:
				f = _unicode_decode(f,
					encoding=_encodings['fs'], errors='replace')
				if f.startswith("."):
					continue
				f = os.path.join(parent, f)[len(filesdir) + 1:]
				writemsg(_("!!! File name contains invalid "
					"character(s) for encoding '%s': '%s'") \
					% (_encodings['fs'], f), noiselevel=-1)
				if strict:
					return 0
				continue
			if f.startswith("."):
				continue
			f = os.path.join(parent, f)[len(filesdir) + 1:]
			file_type = mf.findFile(f)
			if file_type != "AUX" and not f.startswith("digest-"):
				writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
					os.path.join(filesdir, f), noiselevel=-1)
				if strict:
					return 0
	return 1
예제 #21
0
 def _elog(self, elog_funcname, lines):
     elog_func = getattr(EOutput(), elog_funcname)
     for line in lines:
         elog_func(line)
예제 #22
0
from portage.versions import vercmp
from portage.output import EOutput
from _emerge.stdout_spinner import stdout_spinner

###############
## Variables ##
###############
quiet = False
force = False
root = '/'
gir_dirs = ['/usr/share/gir-1.0']
girs = {}
girversion = ''
gi = 'gobject-introspection'
settings = portage.settings
eoutput = EOutput()
spinner = stdout_spinner()


###############
## Functions ##
###############
def usage():
    print """gir-rebuilder: Rebuilds gobject-introspection typelibs and girs in the following directories:
 %s

Usage: %s [--force] [ARGUMENTS TO EMERGE]

Arguments:
  --help            This text
  --force           Force rebuilding of *all* girs and typelibs
###################
REMOVE_OBSOLETE = True # Remove obsolete ebuilds?
GSTLIB = sys.argv[1]
GSTLIBVER = sys.argv[2]
GSTCOREVER = ''
GSTBASEVER = ''
if len(sys.argv) == 5:
    GSTCOREVER = sys.argv[3]
    GSTBASEVER = sys.argv[4]
elif len(sys.argv) == 4:
    GSTCOREVER = sys.argv[3]

##################
## Parse Config ##
##################
eoutput = EOutput()
PORTDIR = portage.settings["PORTDIR"]
portdb = portage.portdb
settings = portage.settings
portdb.porttrees = [PORTDIR]

GSTPREFIX = 'gst-plugins-'
GSTECLASS = GSTPREFIX + GSTLIB
GSTLIB = 'media-libs/' + GSTPREFIX + GSTLIB
GSTCAT = 'media-plugins'
GSTLIBS = {'media-libs/gstreamer': GSTCOREVER,
           'media-libs/gst-plugins-base': GSTBASEVER,
           GSTLIB: GSTLIBVER,}

###############
## Functions ##
예제 #24
0
class pkgsys_portage(package_system):
    pkgsysname = 'portage'

    def __init__(self):
        package_system.__init__(self)
        self.eout = EOutput()

    def begin(self, msg):
        self.eout.ebegin(msg)

    def end(self, success_ornot):
        if success_ornot: self.eout.eend(0)
        else: self.eout.eend(1)

    def info(self, msg):
        self.eout.einfo(msg)

    def error(self, msg):
        self.eout.eerror(msg)

    def init_options(self, options):
        options['--portage-distfiles'] = portage.settings['DISTDIR']
        options['--portage-dir'] = os.path.join('/usr/local/portage', pypi_dir)

    def finalize_options(self, options):
        map(lambda diropt: ensure_dir(options[diropt]),
            ['--portage-distfiles', '--portage-dir'])

    def setup_args(self, args):
        if args['license'] != None and args['license'] != '':
            args['license'] = LicenseConvert(args['name'], args['license'])
        elif 'classifiers' in args and args['classifiers'] is not None:
            for cfline in args['classifiers']:
                cflist = map(lambda cf: cf.strip(), cfline.split('::'))
                if len(cflist) == 3 and \
                        cflist[0] == 'License' and cflist[1] == 'OSI Approved':
                    args['license'] = LicenseConvert(args['name'], cflist[2])
        if args['license'] == '': args['license'] = 'UNKNOWN'

        iuse_arr = ['doc'] + args['extras_require'].keys()
        args['iuse'] = string.join(iuse_arr)

        rdepend = map(lambda dep: DepConvert(dep), args['install_requires'])
        args['rdepend'] = string.join(rdepend, '\n\t')

        ereq = {}
        for k in args['extras_require']:
            if args['extras_require'][k] == []: continue
            ereq[k] = args['extras_require'][k]
        depend = []
        if args['name'] != 'setuptools':
            depend.append('dev-python/setuptools')
        depend.append('doc? ( dev-python/pudge dev-python/buildutils )')
        depend.extend(map(lambda extra: '%s? ( %s )' % \
                              (extra, string.join(map(lambda edep:
                                                          DepConvert(edep),
                                                      ereq[extra]))),
                          ereq.keys()))
        args['depend'] = string.join(depend, '\n\t')
        args['patchlist'] = \
            string.join(map(lambda p: 'epatch "${FILESDIR}/%s"' % p,
                            args['patches']), '\n\t')

        # Setup ebuild.
        fullname = MakeFullname(args['name'], args['version'])
        if 'rev' not in args:
            ebuild_fn = '%s.ebuild' % fullname
        else:
            ebuild_fn = '%s-r%s.ebuild' % (fullname, args['rev'])
        ebuild_dir = os.path.join(args['--portage-dir'],
                                  NameConvert(args['name']))
        ebuild_path = os.path.join(ebuild_dir, ebuild_fn)
        for pkgtype in ['setup.py', 'single.py']:
            if args['pkgtype'] == pkgtype:
                args['template'] = os.path.join('portage', '%s.tmpl' % pkgtype)
                break
        if 'template' not in args:
            raise RuntimeError, 'Unsupported package type %s' % args['pkgtype']
        args['output'] = ebuild_path
        args['filedir'] = args['--portage-distfiles']
        args['patchdir'] = os.path.join(ebuild_dir, 'files')

    def process(self, args):
        # Call ebuild ... digest
        try:
            portage._doebuild_manifest_exempt_depend += 1
            pkgdir = os.path.dirname(args['output'])
            fetchlist_dict = portage.FetchlistDict(pkgdir, portage.settings,
                                                   portage.portdb)
            mf = Manifest(pkgdir,
                          args['--portage-distfiles'],
                          fetchlist_dict=fetchlist_dict,
                          manifest1_compat=False)
            mf.create(requiredDistfiles=None,
                      assumeDistHashesSometimes=True,
                      assumeDistHashesAlways=True)
            mf.write()
        finally:
            portage._doebuild_manifest_exempt_depend -= 1
예제 #25
0
 def __init__(self):
     package_system.__init__(self)
     self.eout = EOutput()
예제 #26
0
def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None):
    """
    Verifies checksums. Assumes all files have been downloaded.
    @rtype: int
    @return: 1 on success and 0 on failure
    """

    if justmanifest is not None:
        warnings.warn(
            "The justmanifest parameter of the " +
            "portage.package.ebuild.digestcheck.digestcheck()" +
            " function is now unused.",
            DeprecationWarning,
            stacklevel=2,
        )
        justmanifest = None

    if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
        return 1
    pkgdir = mysettings["O"]
    hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
    if hash_filter.transparent:
        hash_filter = None
    if mf is None:
        mf = mysettings.repositories.get_repo_for_location(
            os.path.dirname(os.path.dirname(pkgdir)))
        mf = mf.load_manifest(pkgdir, mysettings["DISTDIR"])
    eout = EOutput()
    eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
    try:
        if not mf.thin and strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
            if mf.fhashdict.get("EBUILD"):
                eout.ebegin(_("checking ebuild checksums ;-)"))
                mf.checkTypeHashes("EBUILD", hash_filter=hash_filter)
                eout.eend(0)
            if mf.fhashdict.get("AUX"):
                eout.ebegin(_("checking auxfile checksums ;-)"))
                mf.checkTypeHashes("AUX", hash_filter=hash_filter)
                eout.eend(0)
            if mf.strict_misc_digests and mf.fhashdict.get("MISC"):
                eout.ebegin(_("checking miscfile checksums ;-)"))
                mf.checkTypeHashes("MISC",
                                   ignoreMissingFiles=True,
                                   hash_filter=hash_filter)
                eout.eend(0)
        for f in myfiles:
            eout.ebegin(_("checking %s ;-)") % f)
            ftype = mf.findFile(f)
            if ftype is None:
                if mf.allow_missing:
                    continue
                eout.eend(1)
                writemsg(_("\n!!! Missing digest for '%s'\n") % (f, ),
                         noiselevel=-1)
                return 0
            mf.checkFileHashes(ftype, f, hash_filter=hash_filter)
            eout.eend(0)
    except FileNotFound as e:
        eout.eend(1)
        writemsg(
            _("\n!!! A file listed in the Manifest could not be found: %s\n") %
            str(e),
            noiselevel=-1,
        )
        return 0
    except DigestException as e:
        eout.eend(1)
        writemsg(_("\n!!! Digest verification failed:\n"), noiselevel=-1)
        writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
        writemsg(_("!!! Reason: %s\n") % e.value[1], noiselevel=-1)
        writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1)
        writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1)
        return 0
    if mf.thin or mf.allow_missing:
        # In this case we ignore any missing digests that
        # would otherwise be detected below.
        return 1
    # Make sure that all of the ebuilds are actually listed in the Manifest.
    for f in os.listdir(pkgdir):
        pf = None
        if f[-7:] == ".ebuild":
            pf = f[:-7]
        if pf is not None and not mf.hasFile("EBUILD", f):
            writemsg(
                _("!!! A file is not listed in the Manifest: '%s'\n") %
                os.path.join(pkgdir, f),
                noiselevel=-1,
            )
            if strict:
                return 0
    # epatch will just grab all the patches out of a directory, so we have to
    # make sure there aren't any foreign files that it might grab.
    filesdir = os.path.join(pkgdir, "files")

    for parent, dirs, files in os.walk(filesdir):
        try:
            parent = _unicode_decode(parent,
                                     encoding=_encodings["fs"],
                                     errors="strict")
        except UnicodeDecodeError:
            parent = _unicode_decode(parent,
                                     encoding=_encodings["fs"],
                                     errors="replace")
            writemsg(
                _("!!! Path contains invalid "
                  "character(s) for encoding '%s': '%s'") %
                (_encodings["fs"], parent),
                noiselevel=-1,
            )
            if strict:
                return 0
            continue
        for d in dirs:
            d_bytes = d
            try:
                d = _unicode_decode(d,
                                    encoding=_encodings["fs"],
                                    errors="strict")
            except UnicodeDecodeError:
                d = _unicode_decode(d,
                                    encoding=_encodings["fs"],
                                    errors="replace")
                writemsg(
                    _("!!! Path contains invalid "
                      "character(s) for encoding '%s': '%s'") %
                    (_encodings["fs"], os.path.join(parent, d)),
                    noiselevel=-1,
                )
                if strict:
                    return 0
                dirs.remove(d_bytes)
                continue
            if d.startswith(".") or d == "CVS":
                dirs.remove(d_bytes)
        for f in files:
            try:
                f = _unicode_decode(f,
                                    encoding=_encodings["fs"],
                                    errors="strict")
            except UnicodeDecodeError:
                f = _unicode_decode(f,
                                    encoding=_encodings["fs"],
                                    errors="replace")
                if f.startswith("."):
                    continue
                f = os.path.join(parent, f)[len(filesdir) + 1:]
                writemsg(
                    _("!!! File name contains invalid "
                      "character(s) for encoding '%s': '%s'") %
                    (_encodings["fs"], f),
                    noiselevel=-1,
                )
                if strict:
                    return 0
                continue
            if f.startswith("."):
                continue
            f = os.path.join(parent, f)[len(filesdir) + 1:]
            file_type = mf.findFile(f)
            if file_type != "AUX" and not f.startswith("digest-"):
                writemsg(
                    _("!!! A file is not listed in the Manifest: '%s'\n") %
                    os.path.join(filesdir, f),
                    noiselevel=-1,
                )
                if strict:
                    return 0
    return 1
예제 #27
0
###################
REMOVE_OBSOLETE = True  # Remove obsolete ebuilds?
GSTLIB = sys.argv[1]
GSTLIBVER = sys.argv[2]
GSTCOREVER = ''
GSTBASEVER = ''
if len(sys.argv) == 5:
    GSTCOREVER = sys.argv[3]
    GSTBASEVER = sys.argv[4]
elif len(sys.argv) == 4:
    GSTCOREVER = sys.argv[3]

##################
## Parse Config ##
##################
eoutput = EOutput()
PORTDIR = portage.settings["PORTDIR"]
portdb = portage.portdb
settings = portage.settings
portdb.porttrees = [PORTDIR]

GSTPREFIX = 'gst-plugins-'
GSTECLASS = GSTPREFIX + GSTLIB
GSTLIB = 'media-libs/' + GSTPREFIX + GSTLIB
GSTCAT = 'media-plugins'
GSTLIBS = {
    'media-libs/gstreamer': GSTCOREVER,
    'media-libs/gst-plugins-base': GSTBASEVER,
    GSTLIB: GSTLIBVER,
}
예제 #28
0
def fetch(myuris, mysettings, listonly=0, fetchonly=0,
	locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
	allow_missing_digests=True):
	"fetch files.  Will use digest file if available."

	if not myuris:
		return 1

	features = mysettings.features
	restrict = mysettings.get("PORTAGE_RESTRICT","").split()

	userfetch = secpass >= 2 and "userfetch" in features
	userpriv = secpass >= 2 and "userpriv" in features

	# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
	restrict_mirror = "mirror" in restrict or "nomirror" in restrict
	if restrict_mirror:
		if ("mirror" in features) and ("lmirror" not in features):
			# lmirror should allow you to bypass mirror restrictions.
			# XXX: This is not a good thing, and is temporary at best.
			print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
			return 1

	# Generally, downloading the same file repeatedly from
	# every single available mirror is a waste of bandwidth
	# and time, so there needs to be a cap.
	checksum_failure_max_tries = 5
	v = checksum_failure_max_tries
	try:
		v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
			checksum_failure_max_tries))
	except (ValueError, OverflowError):
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains non-integer value: '%s'\n") % \
			mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	if v < 1:
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains value less than 1: '%s'\n") % v, noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	checksum_failure_max_tries = v
	del v

	fetch_resume_size_default = "350K"
	fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
	if fetch_resume_size is not None:
		fetch_resume_size = "".join(fetch_resume_size.split())
		if not fetch_resume_size:
			# If it's undefined or empty, silently use the default.
			fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
		if match is None or \
			(match.group(2).upper() not in _size_suffix_map):
			writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
				" contains an unrecognized format: '%s'\n") % \
				mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
			writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
				"default value: %s\n") % fetch_resume_size_default,
				noiselevel=-1)
			fetch_resume_size = None
	if fetch_resume_size is None:
		fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
	fetch_resume_size = int(match.group(1)) * \
		2 ** _size_suffix_map[match.group(2).upper()]

	# Behave like the package has RESTRICT="primaryuri" after a
	# couple of checksum failures, to increase the probablility
	# of success before checksum_failure_max_tries is reached.
	checksum_failure_primaryuri = 2
	thirdpartymirrors = mysettings.thirdpartymirrors()

	# In the background parallel-fetch process, it's safe to skip checksum
	# verification of pre-existing files in $DISTDIR that have the correct
	# file size. The parent process will verify their checksums prior to
	# the unpack phase.

	parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
	if parallel_fetchonly:
		fetchonly = 1

	check_config_instance(mysettings)

	custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
		CUSTOM_MIRRORS_FILE), recursive=1)

	mymirrors=[]

	if listonly or ("distlocks" not in features):
		use_locks = 0

	fetch_to_ro = 0
	if "skiprocheck" in features:
		fetch_to_ro = 1

	if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
		if use_locks:
			writemsg(colorize("BAD",
				_("!!! For fetching to a read-only filesystem, "
				"locking should be turned off.\n")), noiselevel=-1)
			writemsg(_("!!! This can be done by adding -distlocks to "
				"FEATURES in /etc/make.conf\n"), noiselevel=-1)
#			use_locks = 0

	# local mirrors are always added
	if "local" in custommirrors:
		mymirrors += custommirrors["local"]

	if restrict_mirror:
		# We don't add any mirrors.
		pass
	else:
		if try_mirrors:
			mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]

	hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
	if hash_filter.transparent:
		hash_filter = None
	skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
	if skip_manifest:
		allow_missing_digests = True
	pkgdir = mysettings.get("O")
	if digests is None and not (pkgdir is None or skip_manifest):
		mydigests = mysettings.repositories.get_repo_for_location(
			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
			pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
	elif digests is None or skip_manifest:
		# no digests because fetch was not called for a specific package
		mydigests = {}
	else:
		mydigests = digests

	ro_distdirs = [x for x in \
		shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
		if os.path.isdir(x)]

	fsmirrors = []
	for x in range(len(mymirrors)-1,-1,-1):
		if mymirrors[x] and mymirrors[x][0]=='/':
			fsmirrors += [mymirrors[x]]
			del mymirrors[x]

	restrict_fetch = "fetch" in restrict
	force_mirror = "force-mirror" in features and not restrict_mirror
	custom_local_mirrors = custommirrors.get("local", [])
	if restrict_fetch:
		# With fetch restriction, a normal uri may only be fetched from
		# custom local mirrors (if available).  A mirror:// uri may also
		# be fetched from specific mirrors (effectively overriding fetch
		# restriction, but only for specific mirrors).
		locations = custom_local_mirrors
	else:
		locations = mymirrors

	file_uri_tuples = []
	# Check for 'items' attribute since OrderedDict is not a dict.
	if hasattr(myuris, 'items'):
		for myfile, uri_set in myuris.items():
			for myuri in uri_set:
				file_uri_tuples.append((myfile, myuri))
	else:
		for myuri in myuris:
			file_uri_tuples.append((os.path.basename(myuri), myuri))

	filedict = OrderedDict()
	primaryuri_dict = {}
	thirdpartymirror_uris = {}
	for myfile, myuri in file_uri_tuples:
		if myfile not in filedict:
			filedict[myfile]=[]
			for y in range(0,len(locations)):
				filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
		if myuri[:9]=="mirror://":
			eidx = myuri.find("/", 9)
			if eidx != -1:
				mirrorname = myuri[9:eidx]
				path = myuri[eidx+1:]

				# Try user-defined mirrors first
				if mirrorname in custommirrors:
					for cmirr in custommirrors[mirrorname]:
						filedict[myfile].append(
							cmirr.rstrip("/") + "/" + path)

				# now try the official mirrors
				if mirrorname in thirdpartymirrors:
					uris = [locmirr.rstrip("/") + "/" + path \
						for locmirr in thirdpartymirrors[mirrorname]]
					random.shuffle(uris)
					filedict[myfile].extend(uris)
					thirdpartymirror_uris.setdefault(myfile, []).extend(uris)

				if not filedict[myfile]:
					writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
			else:
				writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
				writemsg("  %s\n" % (myuri), noiselevel=-1)
		else:
			if restrict_fetch or force_mirror:
				# Only fetch from specific mirrors is allowed.
				continue
			primaryuris = primaryuri_dict.get(myfile)
			if primaryuris is None:
				primaryuris = []
				primaryuri_dict[myfile] = primaryuris
			primaryuris.append(myuri)

	# Order primaryuri_dict values to match that in SRC_URI.
	for uris in primaryuri_dict.values():
		uris.reverse()

	# Prefer thirdpartymirrors over normal mirrors in cases when
	# the file does not yet exist on the normal mirrors.
	for myfile, uris in thirdpartymirror_uris.items():
		primaryuri_dict.setdefault(myfile, []).extend(uris)

	# Now merge primaryuri values into filedict (includes mirrors
	# explicitly referenced in SRC_URI).
	if "primaryuri" in restrict:
		for myfile, uris in filedict.items():
			filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
	else:
		for myfile in filedict:
			filedict[myfile] += primaryuri_dict.get(myfile, [])

	can_fetch=True

	if listonly:
		can_fetch = False

	if can_fetch and not fetch_to_ro:
		global _userpriv_test_write_file_cache
		dirmode  = 0o070
		filemode =   0o60
		modemask =    0o2
		dir_gid = portage_gid
		if "FAKED_MODE" in mysettings:
			# When inside fakeroot, directories with portage's gid appear
			# to have root's gid. Therefore, use root's gid instead of
			# portage's gid to avoid spurrious permissions adjustments
			# when inside fakeroot.
			dir_gid = 0
		distdir_dirs = [""]
		try:
			
			for x in distdir_dirs:
				mydir = os.path.join(mysettings["DISTDIR"], x)
				write_test_file = os.path.join(
					mydir, ".__portage_test_write__")

				try:
					st = os.stat(mydir)
				except OSError:
					st = None

				if st is not None and stat.S_ISDIR(st.st_mode):
					if not (userfetch or userpriv):
						continue
					if _userpriv_test_write_file(mysettings, write_test_file):
						continue

				_userpriv_test_write_file_cache.pop(write_test_file, None)
				if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
					if st is None:
						# The directory has just been created
						# and therefore it must be empty.
						continue
					writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
						noiselevel=-1)
					def onerror(e):
						raise # bail out on the first error that occurs during recursion
					if not apply_recursive_permissions(mydir,
						gid=dir_gid, dirmode=dirmode, dirmask=modemask,
						filemode=filemode, filemask=modemask, onerror=onerror):
						raise OperationNotPermitted(
							_("Failed to apply recursive permissions for the portage group."))
		except PortageException as e:
			if not os.path.isdir(mysettings["DISTDIR"]):
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
				writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)

	if can_fetch and \
		not fetch_to_ro and \
		not os.access(mysettings["DISTDIR"], os.W_OK):
		writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
			noiselevel=-1)
		can_fetch = False

	distdir_writable = can_fetch and not fetch_to_ro
	failed_files = set()
	restrict_fetch_msg = False

	for myfile in filedict:
		"""
		fetched  status
		0        nonexistent
		1        partially downloaded
		2        completely downloaded
		"""
		fetched = 0

		orig_digests = mydigests.get(myfile, {})

		if not (allow_missing_digests or listonly):
			verifiable_hash_types = set(orig_digests).intersection(hashfunc_map)
			verifiable_hash_types.discard("size")
			if not verifiable_hash_types:
				expected = set(hashfunc_map)
				expected.discard("size")
				expected = " ".join(sorted(expected))
				got = set(orig_digests)
				got.discard("size")
				got = " ".join(sorted(got))
				reason = (_("Insufficient data for checksum verification"),
					got, expected)
				writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
					noiselevel=-1)
				writemsg(_("!!! Reason: %s\n") % reason[0],
					noiselevel=-1)
				writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
					(reason[1], reason[2]), noiselevel=-1)

				if fetchonly:
					failed_files.add(myfile)
					continue
				else:
					return 0

		size = orig_digests.get("size")
		if size == 0:
			# Zero-byte distfiles are always invalid, so discard their digests.
			del mydigests[myfile]
			orig_digests.clear()
			size = None
		pruned_digests = orig_digests
		if parallel_fetchonly:
			pruned_digests = {}
			if size is not None:
				pruned_digests["size"] = size

		myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
		has_space = True
		has_space_superuser = True
		file_lock = None
		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		else:
			# check if there is enough space in DISTDIR to completely store myfile
			# overestimate the filesize so we aren't bitten by FS overhead
			vfs_stat = None
			if size is not None and hasattr(os, "statvfs"):
				try:
					vfs_stat = os.statvfs(mysettings["DISTDIR"])
				except OSError as e:
					writemsg_level("!!! statvfs('%s'): %s\n" %
						(mysettings["DISTDIR"], e),
						noiselevel=-1, level=logging.ERROR)
					del e

			if vfs_stat is not None:
				try:
					mysize = os.stat(myfile_path).st_size
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
					mysize = 0
				if (size - mysize + vfs_stat.f_bsize) >= \
					(vfs_stat.f_bsize * vfs_stat.f_bavail):

					if (size - mysize + vfs_stat.f_bsize) >= \
						(vfs_stat.f_bsize * vfs_stat.f_bfree):
						has_space_superuser = False

					if not has_space_superuser:
						has_space = False
					elif secpass < 2:
						has_space = False
					elif userfetch:
						has_space = False

			if distdir_writable and use_locks:

				lock_kwargs = {}
				if fetchonly:
					lock_kwargs["flags"] = os.O_NONBLOCK

				try:
					file_lock = lockfile(myfile_path,
						wantnewlockfile=1, **lock_kwargs)
				except TryAgain:
					writemsg(_(">>> File '%s' is already locked by "
						"another fetcher. Continuing...\n") % myfile,
						noiselevel=-1)
					continue
		try:
			if not listonly:

				eout = EOutput()
				eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
				match, mystat = _check_distfile(
					myfile_path, pruned_digests, eout, hash_filter=hash_filter)
				if match:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if distdir_writable and not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e
					continue

				if distdir_writable and mystat is None:
					# Remove broken symlinks if necessary.
					try:
						os.unlink(myfile_path)
					except OSError:
						pass

				if mystat is not None:
					if stat.S_ISDIR(mystat.st_mode):
						writemsg_level(
							_("!!! Unable to fetch file since "
							"a directory is in the way: \n"
							"!!!   %s\n") % myfile_path,
							level=logging.ERROR, noiselevel=-1)
						return 0

					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(myfile_path)
							except OSError:
								pass
					elif distdir_writable:
						if mystat.st_size < fetch_resume_size and \
							mystat.st_size < size:
							# If the file already exists and the size does not
							# match the existing digests, it may be that the
							# user is attempting to update the digest. In this
							# case, the digestgen() function will advise the
							# user to use `ebuild --force foo.ebuild manifest`
							# in order to force the old digests to be replaced.
							# Since the user may want to keep this file, rename
							# it instead of deleting it.
							writemsg(_(">>> Renaming distfile with size "
								"%d (smaller than " "PORTAGE_FETCH_RESU"
								"ME_MIN_SIZE)\n") % mystat.st_size)
							temp_filename = \
								_checksum_failure_temp_file(
								mysettings["DISTDIR"], myfile)
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)
						elif mystat.st_size >= size:
							temp_filename = \
								_checksum_failure_temp_file(
								mysettings["DISTDIR"], myfile)
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)

				if distdir_writable and ro_distdirs:
					readonly_file = None
					for x in ro_distdirs:
						filename = os.path.join(x, myfile)
						match, mystat = _check_distfile(
							filename, pruned_digests, eout, hash_filter=hash_filter)
						if match:
							readonly_file = filename
							break
					if readonly_file is not None:
						try:
							os.unlink(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
						os.symlink(readonly_file, myfile_path)
						continue

				# this message is shown only after we know that
				# the file is not already fetched
				if not has_space:
					writemsg(_("!!! Insufficient space to store %s in %s\n") % \
						(myfile, mysettings["DISTDIR"]), noiselevel=-1)

					if has_space_superuser:
						writemsg(_("!!! Insufficient privileges to use "
							"remaining space.\n"), noiselevel=-1)
						if userfetch:
							writemsg(_("!!! You may set FEATURES=\"-userfetch\""
								" in /etc/make.conf in order to fetch with\n"
								"!!! superuser privileges.\n"), noiselevel=-1)

				if fsmirrors and not os.path.exists(myfile_path) and has_space:
					for mydir in fsmirrors:
						mirror_file = os.path.join(mydir, myfile)
						try:
							shutil.copyfile(mirror_file, myfile_path)
							writemsg(_("Local mirror has file: %s\n") % myfile)
							break
						except (IOError, OSError) as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e

				try:
					mystat = os.stat(myfile_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
				else:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % (e,), noiselevel=-1)

					# If the file is empty then it's obviously invalid. Remove
					# the empty file and try to download if possible.
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(myfile_path)
							except EnvironmentError:
								pass
					elif myfile not in mydigests:
						# We don't have a digest, but the file exists.  We must
						# assume that it is fully downloaded.
						continue
					else:
						if mystat.st_size < mydigests[myfile]["size"] and \
							not restrict_fetch:
							fetched = 1 # Try to resume this download.
						elif parallel_fetchonly and \
							mystat.st_size == mydigests[myfile]["size"]:
							eout = EOutput()
							eout.quiet = \
								mysettings.get("PORTAGE_QUIET") == "1"
							eout.ebegin(
								"%s size ;-)" % (myfile, ))
							eout.eend(0)
							continue
						else:
							digests = _filter_unaccelarated_hashes(mydigests[myfile])
							if hash_filter is not None:
								digests = _apply_hash_filter(digests, hash_filter)
							verified_ok, reason = verify_all(myfile_path, digests)
							if not verified_ok:
								writemsg(_("!!! Previously fetched"
									" file: '%s'\n") % myfile, noiselevel=-1)
								writemsg(_("!!! Reason: %s\n") % reason[0],
									noiselevel=-1)
								writemsg(_("!!! Got:      %s\n"
									"!!! Expected: %s\n") % \
									(reason[1], reason[2]), noiselevel=-1)
								if reason[0] == _("Insufficient data for checksum verification"):
									return 0
								if distdir_writable:
									temp_filename = \
										_checksum_failure_temp_file(
										mysettings["DISTDIR"], myfile)
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
							else:
								eout = EOutput()
								eout.quiet = \
									mysettings.get("PORTAGE_QUIET", None) == "1"
								if digests:
									digests = list(digests)
									digests.sort()
									eout.ebegin(
										"%s %s ;-)" % (myfile, " ".join(digests)))
									eout.eend(0)
								continue # fetch any remaining files

			# Create a reversed list since that is optimal for list.pop().
			uri_list = filedict[myfile][:]
			uri_list.reverse()
			checksum_failure_count = 0
			tried_locations = set()
			while uri_list:
				loc = uri_list.pop()
				# Eliminate duplicates here in case we've switched to
				# "primaryuri" mode on the fly due to a checksum failure.
				if loc in tried_locations:
					continue
				tried_locations.add(loc)
				if listonly:
					writemsg_stdout(loc+" ", noiselevel=-1)
					continue
				# allow different fetchcommands per protocol
				protocol = loc[0:loc.find("://")]

				global_config_path = GLOBAL_CONFIG_PATH
				if mysettings['EPREFIX']:
					global_config_path = os.path.join(mysettings['EPREFIX'],
							GLOBAL_CONFIG_PATH.lstrip(os.sep))

				missing_file_param = False
				fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
				fetchcommand = mysettings.get(fetchcommand_var)
				if fetchcommand is None:
					fetchcommand_var = "FETCHCOMMAND"
					fetchcommand = mysettings.get(fetchcommand_var)
					if fetchcommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (fetchcommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in fetchcommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % fetchcommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
				resumecommand = mysettings.get(resumecommand_var)
				if resumecommand is None:
					resumecommand_var = "RESUMECOMMAND"
					resumecommand = mysettings.get(resumecommand_var)
					if resumecommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (resumecommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in resumecommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % resumecommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				if missing_file_param:
					writemsg_level(
						_("!!! Refer to the make.conf(5) man page for "
						"information about how to\n!!! correctly specify "
						"FETCHCOMMAND and RESUMECOMMAND.\n"),
						level=logging.ERROR, noiselevel=-1)
					if myfile != os.path.basename(loc):
						return 0

				if not can_fetch:
					if fetched != 2:
						try:
							mysize = os.stat(myfile_path).st_size
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							mysize = 0

						if mysize == 0:
							writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
								noiselevel=-1)
						elif size is None or size > mysize:
							writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
								noiselevel=-1)
						else:
							writemsg(_("!!! File %s is incorrect size, "
								"but unable to retry.\n") % myfile, noiselevel=-1)
						return 0
					else:
						continue

				if fetched != 2 and has_space:
					#we either need to resume or start the download
					if fetched == 1:
						try:
							mystat = os.stat(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:
							if mystat.st_size < fetch_resume_size:
								writemsg(_(">>> Deleting distfile with size "
									"%d (smaller than " "PORTAGE_FETCH_RESU"
									"ME_MIN_SIZE)\n") % mystat.st_size)
								try:
									os.unlink(myfile_path)
								except OSError as e:
									if e.errno not in \
										(errno.ENOENT, errno.ESTALE):
										raise
									del e
								fetched = 0
					if fetched == 1:
						#resume mode:
						writemsg(_(">>> Resuming download...\n"))
						locfetch=resumecommand
						command_var = resumecommand_var
					else:
						#normal mode:
						locfetch=fetchcommand
						command_var = fetchcommand_var
					writemsg_stdout(_(">>> Downloading '%s'\n") % \
						_hide_url_passwd(loc))
					variables = {
						"DISTDIR": mysettings["DISTDIR"],
						"URI":     loc,
						"FILE":    myfile
					}

					myfetch = shlex_split(locfetch)
					myfetch = [varexpand(x, mydict=variables) for x in myfetch]
					myret = -1
					try:

						myret = _spawn_fetch(mysettings, myfetch)

					finally:
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2)
						except FileNotFound:
							pass
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e

					# If the file is empty then it's obviously invalid.  Don't
					# trust the return value from the fetcher.  Remove the
					# empty file and try to download again.
					try:
						if os.stat(myfile_path).st_size == 0:
							os.unlink(myfile_path)
							fetched = 0
							continue
					except EnvironmentError:
						pass

					if mydigests is not None and myfile in mydigests:
						try:
							mystat = os.stat(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:

							if stat.S_ISDIR(mystat.st_mode):
								# This can happen if FETCHCOMMAND erroneously
								# contains wget's -P option where it should
								# instead have -O.
								writemsg_level(
									_("!!! The command specified in the "
									"%s variable appears to have\n!!! "
									"created a directory instead of a "
									"normal file.\n") % command_var,
									level=logging.ERROR, noiselevel=-1)
								writemsg_level(
									_("!!! Refer to the make.conf(5) "
									"man page for information about how "
									"to\n!!! correctly specify "
									"FETCHCOMMAND and RESUMECOMMAND.\n"),
									level=logging.ERROR, noiselevel=-1)
								return 0

							# no exception?  file exists. let digestcheck() report
							# an appropriately for size or checksum errors

							# If the fetcher reported success and the file is
							# too small, it's probably because the digest is
							# bad (upstream changed the distfile).  In this
							# case we don't want to attempt to resume. Show a
							# digest verification failure to that the user gets
							# a clue about what just happened.
							if myret != os.EX_OK and \
								mystat.st_size < mydigests[myfile]["size"]:
								# Fetch failed... Try the next one... Kill 404 files though.
								if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
									html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
									with io.open(
										_unicode_encode(myfile_path,
										encoding=_encodings['fs'], errors='strict'),
										mode='r', encoding=_encodings['content'], errors='replace'
										) as f:
										if html404.search(f.read()):
											try:
												os.unlink(mysettings["DISTDIR"]+"/"+myfile)
												writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
												fetched = 0
												continue
											except (IOError, OSError):
												pass
								fetched = 1
								continue
							if True:
								# File is the correct size--check the checksums for the fetched
								# file NOW, for those users who don't have a stable/continuous
								# net connection. This way we have a chance to try to download
								# from another mirror...
								digests = _filter_unaccelarated_hashes(mydigests[myfile])
								if hash_filter is not None:
									digests = _apply_hash_filter(digests, hash_filter)
								verified_ok, reason = verify_all(myfile_path, digests)
								if not verified_ok:
									writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
										noiselevel=-1)
									writemsg(_("!!! Reason: %s\n") % reason[0],
										noiselevel=-1)
									writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
										(reason[1], reason[2]), noiselevel=-1)
									if reason[0] == _("Insufficient data for checksum verification"):
										return 0
									temp_filename = \
										_checksum_failure_temp_file(
										mysettings["DISTDIR"], myfile)
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
									fetched=0
									checksum_failure_count += 1
									if checksum_failure_count == \
										checksum_failure_primaryuri:
										# Switch to "primaryuri" mode in order
										# to increase the probablility of
										# of success.
										primaryuris = \
											primaryuri_dict.get(myfile)
										if primaryuris:
											uri_list.extend(
												reversed(primaryuris))
									if checksum_failure_count >= \
										checksum_failure_max_tries:
										break
								else:
									eout = EOutput()
									eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
									if digests:
										eout.ebegin("%s %s ;-)" % \
											(myfile, " ".join(sorted(digests))))
										eout.eend(0)
									fetched=2
									break
					else:
						if not myret:
							fetched=2
							break
						elif mydigests!=None:
							writemsg(_("No digest file available and download failed.\n\n"),
								noiselevel=-1)
		finally:
			if use_locks and file_lock:
				unlockfile(file_lock)
				file_lock = None

		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		if fetched != 2:
			if restrict_fetch and not restrict_fetch_msg:
				restrict_fetch_msg = True
				msg = _("\n!!! %s/%s"
					" has fetch restriction turned on.\n"
					"!!! This probably means that this "
					"ebuild's files must be downloaded\n"
					"!!! manually.  See the comments in"
					" the ebuild for more information.\n\n") % \
					(mysettings["CATEGORY"], mysettings["PF"])
				writemsg_level(msg,
					level=logging.ERROR, noiselevel=-1)
			elif restrict_fetch:
				pass
			elif listonly:
				pass
			elif not filedict[myfile]:
				writemsg(_("Warning: No mirrors available for file"
					" '%s'\n") % (myfile), noiselevel=-1)
			else:
				writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
					noiselevel=-1)

			if listonly:
				failed_files.add(myfile)
				continue
			elif fetchonly:
				failed_files.add(myfile)
				continue
			return 0
	if failed_files:
		return 0
	return 1
예제 #29
0
파일: git.py 프로젝트: hugbubby/portage
    def verify_head(self):
        if (self.repo.module_specific_options.get(
                'sync-git-verify-commit-signature', 'false') != 'true'):
            return True

        if self.repo.sync_openpgp_key_path is not None:
            if gemato is None:
                writemsg_level(
                    "!!! Verifying against specified key requires gemato-11.0+ installed\n",
                    level=logging.ERROR,
                    noiselevel=-1)
                return False
            openpgp_env = gemato.openpgp.OpenPGPEnvironment()
        else:
            openpgp_env = None

        try:
            out = EOutput()
            env = None
            if openpgp_env is not None:
                try:
                    out.einfo('Using keys from %s' %
                              (self.repo.sync_openpgp_key_path, ))
                    with io.open(self.repo.sync_openpgp_key_path, 'rb') as f:
                        openpgp_env.import_key(f)
                    out.ebegin('Refreshing keys from keyserver')
                    openpgp_env.refresh_keys()
                    out.eend(0)
                except GematoException as e:
                    writemsg_level(
                        "!!! Verification impossible due to keyring problem:\n%s\n"
                        % (e, ),
                        level=logging.ERROR,
                        noiselevel=-1)
                    return (1, False)

                env = os.environ.copy()
                env['GNUPGHOME'] = openpgp_env.home

            rev_cmd = [self.bin_command, "log", "--pretty=format:%G?", "-1"]
            try:
                status = (portage._unicode_decode(
                    subprocess.check_output(rev_cmd,
                                            cwd=portage._unicode_encode(
                                                self.repo.location),
                                            env=env)).strip())
            except subprocess.CalledProcessError:
                return False

            if status == 'G':  # good signature is good
                out.einfo('Trusted signature found on top commit')
                return True
            elif status == 'U':  # untrusted
                out.ewarn('Top commit signature is valid but not trusted')
                return True
            else:
                if status == 'B':
                    expl = 'bad signature'
                elif status == 'X':
                    expl = 'expired signature'
                elif status == 'Y':
                    expl = 'expired key'
                elif status == 'R':
                    expl = 'revoked key'
                elif status == 'E':
                    expl = 'unable to verify signature (missing key?)'
                elif status == 'N':
                    expl = 'no signature'
                else:
                    expl = 'unknown issue'
                out.eerror('No valid signature found: %s' % (expl, ))
                return False
        finally:
            if openpgp_env is not None:
                openpgp_env.close()
예제 #30
0
    def verify_head(self, revision="-1"):
        if self.repo.module_specific_options.get(
                "sync-git-verify-commit-signature",
                "false").lower() not in ("true", "yes"):
            return True

        if self.repo.sync_openpgp_key_path is not None and gemato is None:
            writemsg_level(
                "!!! Verifying against specified key requires gemato-14.5+ installed\n",
                level=logging.ERROR,
                noiselevel=-1,
            )
            return False

        openpgp_env = self._get_openpgp_env(self.repo.sync_openpgp_key_path)

        try:
            out = EOutput()
            env = None
            if openpgp_env is not None and self.repo.sync_openpgp_key_path is not None:
                try:
                    out.einfo("Using keys from %s" %
                              (self.repo.sync_openpgp_key_path, ))
                    with io.open(self.repo.sync_openpgp_key_path, "rb") as f:
                        openpgp_env.import_key(f)
                    self._refresh_keys(openpgp_env)
                except (GematoException, asyncio.TimeoutError) as e:
                    writemsg_level(
                        "!!! Verification impossible due to keyring problem:\n%s\n"
                        % (e, ),
                        level=logging.ERROR,
                        noiselevel=-1,
                    )
                    return False

                env = os.environ.copy()
                env["GNUPGHOME"] = openpgp_env.home

            rev_cmd = [
                self.bin_command, "log", "-n1", "--pretty=format:%G?", revision
            ]
            try:
                status = portage._unicode_decode(
                    subprocess.check_output(
                        rev_cmd,
                        cwd=portage._unicode_encode(self.repo.location),
                        env=env,
                    )).strip()
            except subprocess.CalledProcessError:
                return False

            if status == "G":  # good signature is good
                out.einfo("Trusted signature found on top commit")
                return True
            if status == "U":  # untrusted
                out.ewarn("Top commit signature is valid but not trusted")
                return True
            if status == "B":
                expl = "bad signature"
            elif status == "X":
                expl = "expired signature"
            elif status == "Y":
                expl = "expired key"
            elif status == "R":
                expl = "revoked key"
            elif status == "E":
                expl = "unable to verify signature (missing key?)"
            elif status == "N":
                expl = "no signature"
            else:
                expl = "unknown issue"
            out.eerror("No valid signature found: %s" % (expl, ))
            return False
        finally:
            if openpgp_env is not None:
                openpgp_env.close()
예제 #31
0
def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
	waiting_msg=None, flags=0):
	"""
	If wantnewlockfile is True then this creates a lockfile in the parent
	directory as the file: '.' + basename + '.portage_lockfile'.
	"""
	import fcntl

	if not mypath:
		raise InvalidData(_("Empty path given"))

	if isinstance(mypath, basestring) and mypath[-1] == '/':
		mypath = mypath[:-1]

	if hasattr(mypath, 'fileno'):
		mypath = mypath.fileno()
	if isinstance(mypath, int):
		lockfilename    = mypath
		wantnewlockfile = 0
		unlinkfile      = 0
	elif wantnewlockfile:
		base, tail = os.path.split(mypath)
		lockfilename = os.path.join(base, "." + tail + ".portage_lockfile")
		del base, tail
		unlinkfile   = 1
	else:
		lockfilename = mypath

	if isinstance(mypath, basestring):
		if not os.path.exists(os.path.dirname(mypath)):
			raise DirectoryNotFound(os.path.dirname(mypath))
		preexisting = os.path.exists(lockfilename)
		old_mask = os.umask(000)
		try:
			try:
				myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR, 0o660)
			except OSError as e:
				func_call = "open('%s')" % lockfilename
				if e.errno == OperationNotPermitted.errno:
					raise OperationNotPermitted(func_call)
				elif e.errno == PermissionDenied.errno:
					raise PermissionDenied(func_call)
				else:
					raise

			if not preexisting:
				try:
					if os.stat(lockfilename).st_gid != portage_gid:
						os.chown(lockfilename, -1, portage_gid)
				except OSError as e:
					if e.errno in (errno.ENOENT, errno.ESTALE):
						return lockfile(mypath,
							wantnewlockfile=wantnewlockfile,
							unlinkfile=unlinkfile, waiting_msg=waiting_msg,
							flags=flags)
					else:
						writemsg(_("Cannot chown a lockfile: '%s'\n") % \
							lockfilename, noiselevel=-1)

		finally:
			os.umask(old_mask)

	elif isinstance(mypath, int):
		myfd = mypath

	else:
		raise ValueError(_("Unknown type passed in '%s': '%s'") % \
			(type(mypath), mypath))

	# try for a non-blocking lock, if it's held, throw a message
	# we're waiting on lockfile and use a blocking attempt.
	locking_method = fcntl.lockf
	try:
		fcntl.lockf(myfd,fcntl.LOCK_EX|fcntl.LOCK_NB)
	except IOError as e:
		if "errno" not in dir(e):
			raise
		if e.errno in (errno.EACCES, errno.EAGAIN):
			# resource temp unavailable; eg, someone beat us to the lock.
			if flags & os.O_NONBLOCK:
				raise TryAgain(mypath)

			global _quiet
			out = EOutput()
			out.quiet = _quiet
			if waiting_msg is None:
				if isinstance(mypath, int):
					waiting_msg = _("waiting for lock on fd %i") % myfd
				else:
					waiting_msg = _("waiting for lock on %s\n") % lockfilename
			out.ebegin(waiting_msg)
			# try for the exclusive lock now.
			try:
				fcntl.lockf(myfd, fcntl.LOCK_EX)
			except EnvironmentError as e:
				out.eend(1, str(e))
				raise
			out.eend(os.EX_OK)
		elif e.errno == errno.ENOLCK:
			# We're not allowed to lock on this FS.
			os.close(myfd)
			link_success = False
			if lockfilename == str(lockfilename):
				if wantnewlockfile:
					try:
						if os.stat(lockfilename)[stat.ST_NLINK] == 1:
							os.unlink(lockfilename)
					except OSError:
						pass
					link_success = hardlink_lockfile(lockfilename)
			if not link_success:
				raise
			locking_method = None
			myfd = HARDLINK_FD
		else:
			raise

		
	if isinstance(lockfilename, basestring) and \
		myfd != HARDLINK_FD and _fstat_nlink(myfd) == 0:
		# The file was deleted on us... Keep trying to make one...
		os.close(myfd)
		writemsg(_("lockfile recurse\n"), 1)
		lockfilename, myfd, unlinkfile, locking_method = lockfile(
			mypath, wantnewlockfile=wantnewlockfile, unlinkfile=unlinkfile,
			waiting_msg=waiting_msg, flags=flags)

	writemsg(str((lockfilename,myfd,unlinkfile))+"\n",1)
	return (lockfilename,myfd,unlinkfile,locking_method)
예제 #32
0
class HookDirectory(object):
    def __init__(self,
                 phase,
                 settings,
                 myopts=None,
                 myaction=None,
                 mytargets=None):
        self.myopts = myopts
        self.myaction = myaction
        self.mytargets = mytargets
        check_config_instance(settings)
        self.settings = settings
        self.path = os.path.join(settings["PORTAGE_CONFIGROOT"], HOOKS_PATH,
                                 phase + '.d')
        self.output = EOutput()

    def execute(self, path=None):
        if "hooks" not in self.settings['FEATURES']:
            return

        if not path:
            path = self.path

        path = normalize_path(path)

        if not os.path.exists(path):
            if self.myopts and "--debug" in self.myopts:
                # behavior mimicked by hook.sh
                self.output.ewarn(
                    'This hook path could not be found; ignored: ' + path)
            return

        if os.path.isdir(path):
            command = [HOOKS_SH_BINARY]
            if self.myopts:
                for myopt in self.myopts:
                    command.extend(['--opt', myopt])
            if self.myaction:
                command.extend(['--action', self.myaction])
            if self.mytargets:
                for mytarget in self.mytargets:
                    command.extend(['--target', mytarget])

            command = [
                BASH_BINARY, '-c',
                'cd "' + path + '" && source "' + PORTAGE_BIN_PATH +
                '/isolated-functions.sh" && source ' + ' '.join(command)
            ]
            if self.myopts and "--verbose" in self.myopts:
                self.output.einfo('Executing hooks directory "' + self.path +
                                  '"...')
            code = spawn(mycommand=command, env=self.settings.environ())
            if code:  # if failure
                # behavior mimicked by hook.sh
                raise PortageException(
                    '!!! Hook directory %s failed with exit code %s' %
                    (self.path, code))

        else:
            raise InvalidLocation('This hook path ought to be a directory: ' +
                                  path)
예제 #33
0
import re

from itertools import chain
from collections import Counter

try:
    from lxml import etree
    from lxml.etree import ParserError
except (SystemExit, KeyboardInterrupt):
    raise
except (ImportError, SystemError, RuntimeError, Exception):
    # broken or missing xml support
    # https://bugs.python.org/issue14988
    msg = ["Please emerge dev-python/lxml in order to use repoman."]
    from portage.output import EOutput
    out = EOutput()
    for line in msg:
        out.eerror(line)
    sys.exit(1)

# import our initialized portage instance
from repoman._portage import portage
from repoman.metadata import metadata_dtd_uri
from repoman.modules.scan.scanbase import ScanBase

from portage.exception import InvalidAtom
from portage import os
from portage.dep import Atom
from portage.xml.metadata import parse_metadata_use

from .use_flags import USEFlagChecks
예제 #34
0
def debug(*strings):
    from portage.output import EOutput
    ewarn = EOutput().ewarn
    ewarn(flatten(strings))