def read_config(mandatory_opts):
    loader = KeyValuePairFileLoader(
        '/etc/dispatch-conf.conf', None)
    opts, errors = loader.load()
    if not opts:
        print(_('dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'), file=sys.stderr)
        sys.exit(1)

	# Handle quote removal here, since KeyValuePairFileLoader doesn't do that.
    quotes = "\"'"
    for k, v in opts.items():
        if v[:1] in quotes and v[:1] == v[-1:]:
            opts[k] = v[1:-1]

    for key in mandatory_opts:
        if key not in opts:
            if key == "merge":
                opts["merge"] = "sdiff --suppress-common-lines --output='%s' '%s' '%s'"
            else:
                print(_('dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal') % (key,), file=sys.stderr)

    if not os.path.exists(opts['archive-dir']):
        os.mkdir(opts['archive-dir'])
        # Use restrictive permissions by default, in order to protect
        # against vulnerabilities (like bug #315603 involving rcs).
        os.chmod(opts['archive-dir'], 0o700)
    elif not os.path.isdir(opts['archive-dir']):
        print(_('dispatch-conf: Config archive dir [%s] must exist; fatal') % (opts['archive-dir'],), file=sys.stderr)
        sys.exit(1)

    return opts
Beispiel #2
0
def read_config(mandatory_opts):
    loader = portage.env.loaders.KeyValuePairFileLoader(
        '/etc/dispatch-conf.conf', None)
    opts, errors = loader.load()
    if not opts:
        print(_('dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'), file=sys.stderr)
        sys.exit(1)

	# Handle quote removal here, since KeyValuePairFileLoader doesn't do that.
    quotes = "\"'"
    for k, v in opts.items():
        if v[:1] in quotes and v[:1] == v[-1:]:
            opts[k] = v[1:-1]

    for key in mandatory_opts:
        if key not in opts:
            if key == "merge":
                opts["merge"] = "sdiff --suppress-common-lines --output='%s' '%s' '%s'"
            else:
                print(_('dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal') % (key,), file=sys.stderr)

    if not os.path.exists(opts['archive-dir']):
        os.mkdir(opts['archive-dir'])
    elif not os.path.isdir(opts['archive-dir']):
        print(_('dispatch-conf: Config archive dir [%s] must exist; fatal') % (opts['archive-dir'],), file=sys.stderr)
        sys.exit(1)

    return opts
	def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False):
		if all:
			useflags = None
		elif useflags is None:
			if mysettings:
				useflags = mysettings["USE"].split()
		myfiles = self.getFetchMap(mypkg, useflags=useflags)
		myebuild = self.findname(mypkg)
		if myebuild is None:
			raise AssertionError("ebuild not found for '%s'" % mypkg)
		pkgdir = os.path.dirname(myebuild)
		mf = Manifest(pkgdir, self.settings["DISTDIR"])
		mysums = mf.getDigests()

		failures = {}
		for x in myfiles:
			if not mysums or x not in mysums:
				ok     = False
				reason = _("digest missing")
			else:
				try:
					ok, reason = portage.checksum.verify_all(
						os.path.join(self.settings["DISTDIR"], x), mysums[x])
				except FileNotFound as e:
					ok = False
					reason = _("File Not Found: '%s'") % (e,)
			if not ok:
				failures[x] = reason
		if failures:
			return False
		return True
def load_unpack_dependencies_configuration(repositories):
	repo_dict = {}
	for repo in repositories.repos_with_profiles():
		for eapi in _supported_eapis:
			if eapi_has_automatic_unpack_dependencies(eapi):
				file_name = os.path.join(repo.location, "profiles", "unpack_dependencies", eapi)
				lines = grabfile(file_name, recursive=True)
				for line in lines:
					elements = line.split()
					suffix = elements[0].lower()
					if len(elements) == 1:
						writemsg(_("--- Missing unpack dependencies for '%s' suffix in '%s'\n") % (suffix, file_name))
					depend = " ".join(elements[1:])
					try:
						use_reduce(depend, eapi=eapi)
					except InvalidDependString as e:
						writemsg(_("--- Invalid unpack dependencies for '%s' suffix in '%s': '%s'\n" % (suffix, file_name, e)))
					else:
						repo_dict.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend

	ret = {}
	for repo in repositories.repos_with_profiles():
		for repo_name in [x.name for x in repo.masters] + [repo.name]:
			for eapi in repo_dict.get(repo_name, {}):
				for suffix, depend in repo_dict.get(repo_name, {}).get(eapi, {}).items():
					ret.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend

	return ret
	def multiBuilder(self, options, settings, trees):
		rValue = {}
		directory = options.get("directory",
			os.path.join(settings["PORTAGE_CONFIGROOT"],
			USER_CONFIG_PATH, "sets"))
		name_pattern = options.get("name_pattern", "${name}")
		if not "$name" in name_pattern and not "${name}" in name_pattern:
			raise SetConfigError(_("name_pattern doesn't include ${name} placeholder"))
		greedy = get_boolean(options, "greedy", False)
		# look for repository path variables
		match = self._repopath_match.match(directory)
		if match:
			try:
				directory = self._repopath_sub.sub(trees["porttree"].dbapi.treemap[match.groupdict()["reponame"]], directory)
			except KeyError:
				raise SetConfigError(_("Could not find repository '%s'") % match.groupdict()["reponame"])

		try:
			directory = _unicode_decode(directory,
				encoding=_encodings['fs'], errors='strict')
			# Now verify that we can also encode it.
			_unicode_encode(directory,
				encoding=_encodings['fs'], errors='strict')
		except UnicodeError:
			directory = _unicode_decode(directory,
				encoding=_encodings['fs'], errors='replace')
			raise SetConfigError(
				_("Directory path contains invalid character(s) for encoding '%s': '%s'") \
				% (_encodings['fs'], directory))

		if os.path.isdir(directory):
			directory = normalize_path(directory)

			for parent, dirs, files in os.walk(directory):
				try:
					parent = _unicode_decode(parent,
						encoding=_encodings['fs'], errors='strict')
				except UnicodeDecodeError:
					continue
				for d in dirs[:]:
					if d[:1] == '.':
						dirs.remove(d)
				for filename in files:
					try:
						filename = _unicode_decode(filename,
							encoding=_encodings['fs'], errors='strict')
					except UnicodeDecodeError:
						continue
					if filename[:1] == '.':
						continue
					if filename.endswith(".metadata"):
						continue
					filename = os.path.join(parent,
						filename)[1 + len(directory):]
					myname = name_pattern.replace("$name", filename)
					myname = myname.replace("${name}", filename)
					rValue[myname] = StaticFileSet(
						os.path.join(directory, filename),
						greedy=greedy, dbapi=trees["vartree"].dbapi)
		return rValue
Beispiel #6
0
def hardlock_cleanup(path, remove_all_locks=False):
	myhost = os.uname()[1]
	mydl = os.listdir(path)

	results = []
	mycount = 0

	mylist = {}
	for x in mydl:
		if os.path.isfile(path + "/" + x):
			parts = x.split(".hardlock-")
			if len(parts) == 2:
				filename = parts[0][1:]
				hostpid  = parts[1].split("-")
				host  = "-".join(hostpid[:-1])
				pid   = hostpid[-1]
				
				if filename not in mylist:
					mylist[filename] = {}
				if host not in mylist[filename]:
					mylist[filename][host] = []
				mylist[filename][host].append(pid)

				mycount += 1


	results.append(_("Found %(count)s locks") % {"count": mycount})
	
	for x in mylist:
		if myhost in mylist[x] or remove_all_locks:
			mylockname = hardlock_name(path + "/" + x)
			if hardlink_is_mine(mylockname, path + "/" + x) or \
			   not os.path.exists(path + "/" + x) or \
				 remove_all_locks:
				for y in mylist[x]:
					for z in mylist[x][y]:
						filename = path + "/." + x + ".hardlock-" + y + "-" + z
						if filename == mylockname:
							continue
						try:
							# We're sweeping through, unlinking everyone's locks.
							os.unlink(filename)
							results.append(_("Unlinked: ") + filename)
						except OSError:
							pass
				try:
					os.unlink(path + "/" + x)
					results.append(_("Unlinked: ") + path + "/" + x)
					os.unlink(mylockname)
					results.append(_("Unlinked: ") + mylockname)
				except OSError:
					pass
			else:
				try:
					os.unlink(mylockname)
					results.append(_("Unlinked: ") + mylockname)
				except OSError:
					pass

	return results
def process(mysettings, key, logentries, fulltext):
	if "PORTAGE_ELOG_MAILURI" in mysettings:
		myrecipient = mysettings["PORTAGE_ELOG_MAILURI"].split()[0]
	else:
		myrecipient = "root@localhost"
	
	myfrom = mysettings["PORTAGE_ELOG_MAILFROM"]
	myfrom = myfrom.replace("${HOST}", socket.getfqdn())
	mysubject = mysettings["PORTAGE_ELOG_MAILSUBJECT"]
	mysubject = mysubject.replace("${PACKAGE}", key)
	mysubject = mysubject.replace("${HOST}", socket.getfqdn())

	# look at the phases listed in our logentries to figure out what action was performed
	action = _("merged")
	for phase in logentries:
		# if we found a *rm phase assume that the package was unmerged
		if phase in ["postrm", "prerm"]:
			action = _("unmerged")
	# if we think that the package was unmerged, make sure there was no unexpected
	# phase recorded to avoid misinformation
	if action == _("unmerged"):
		for phase in logentries:
			if phase not in ["postrm", "prerm", "other"]:
				action = _("unknown")

	mysubject = mysubject.replace("${ACTION}", action)

	mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject, fulltext)
	try:
		portage.mail.send_mail(mysettings, mymessage)
	except PortageException as e:
		writemsg("%s\n" % str(e), noiselevel=-1)

	return
Beispiel #8
0
	def _parse_repository_usealiases(self, repositories):
		ret = {}
		for repo in repositories.repos_with_profiles():
			file_name = os.path.join(repo.location, "profiles", "use.aliases")
			eapi = read_corresponding_eapi_file(
				file_name, default=repo.eapi)
			useflag_re = _get_useflag_re(eapi)
			raw_file_dict = grabdict(file_name, recursive=True)
			file_dict = {}
			for real_flag, aliases in raw_file_dict.items():
				if useflag_re.match(real_flag) is None:
					writemsg(_("--- Invalid real USE flag in '%s': '%s'\n") % (file_name, real_flag), noiselevel=-1)
				else:
					for alias in aliases:
						if useflag_re.match(alias) is None:
							writemsg(_("--- Invalid USE flag alias for '%s' real USE flag in '%s': '%s'\n") %
								(real_flag, file_name, alias), noiselevel=-1)
						else:
							if any(alias in v for k, v in file_dict.items() if k != real_flag):
								writemsg(_("--- Duplicated USE flag alias in '%s': '%s'\n") %
									(file_name, alias), noiselevel=-1)
							else:
								file_dict.setdefault(real_flag, []).append(alias)
			ret[repo.name] = file_dict
		return ret
Beispiel #9
0
	def lineParser(self, line, line_num, data, errors):
		line = line.strip()
		if line.startswith('#'): # Skip commented lines
			return
		if not len(line): # skip empty lines
			return
		split = line.split()
		if len(split) < 1:
			errors.setdefault(self.fname, []).append(
				_("Malformed data at line: %s, data: %s")
				% (line_num + 1, line))
			return
		key = split[0]
		value = split[1:]
		if not self._validate(key):
			errors.setdefault(self.fname, []).append(
				_("Key validation failed at line: %s, data %s")
				% (line_num + 1, key))
			return
		if not self._valueValidate(value):
			errors.setdefault(self.fname, []).append(
				_("Value validation failed at line: %s, data %s")
				% (line_num + 1, value))
			return
		if key in data:
			data[key].append(value)
		else:
			data[key] = value
Beispiel #10
0
	def _run(self):
		mf = self.repo_config.load_manifest(
			os.path.join(self.repo_config.location, self.cp),
			self.distdir, fetchlist_dict=self.fetchlist_dict)

		try:
			mf.create(assumeDistHashesAlways=True)
		except FileNotFound as e:
			portage.writemsg(_("!!! File %s doesn't exist, can't update "
				"Manifest\n") % e, noiselevel=-1)
			return 1

		except PortagePackageException as e:
			portage.writemsg(("!!! %s\n") % (e,), noiselevel=-1)
			return 1

		try:
			modified = mf.write(sign=False)
		except PermissionDenied as e:
			portage.writemsg("!!! %s: %s\n" % (_("Permission Denied"), e,),
				noiselevel=-1)
			return 1
		else:
			if modified:
				return self.MODIFIED
			else:
				return os.EX_OK
	def _addProfile(self, currentPath):
		parentsFile = os.path.join(currentPath, "parent")
		eapi_file = os.path.join(currentPath, "eapi")
		try:
			eapi = codecs.open(_unicode_encode(eapi_file,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['content'], errors='replace'
				).readline().strip()
		except IOError:
			pass
		else:
			if not eapi_is_supported(eapi):
				raise ParseError(_(
					"Profile contains unsupported "
					"EAPI '%s': '%s'") % \
					(eapi, os.path.realpath(eapi_file),))
		if os.path.exists(parentsFile):
			parents = grabfile(parentsFile)
			if not parents:
				raise ParseError(
					_("Empty parent file: '%s'") % parentsFile)
			for parentPath in parents:
				parentPath = normalize_path(os.path.join(
					currentPath, parentPath))
				if os.path.exists(parentPath):
					self._addProfile(parentPath)
				else:
					raise ParseError(
						_("Parent '%s' not found: '%s'") %  \
						(parentPath, parentsFile))
		self.profiles.append(currentPath)
Beispiel #12
0
	def _expand_parent_colon(self, parentsFile, parentPath,
		repo_loc, repositories):
		colon = parentPath.find(":")
		if colon == -1:
			return parentPath

		if colon == 0:
			if repo_loc is None:
				raise ParseError(
					_("Parent '%s' not found: '%s'") %  \
					(parentPath, parentsFile))
			else:
				parentPath = normalize_path(os.path.join(
					repo_loc, 'profiles', parentPath[colon+1:]))
		else:
			p_repo_name = parentPath[:colon]
			try:
				p_repo_loc = repositories.get_location_for_name(p_repo_name)
			except KeyError:
				raise ParseError(
					_("Parent '%s' not found: '%s'") %  \
					(parentPath, parentsFile))
			else:
				parentPath = normalize_path(os.path.join(
					p_repo_loc, 'profiles', parentPath[colon+1:]))

		return parentPath
def finalize(mysettings=None):
	"""The mysettings parameter is just for backward compatibility since
	an older version of portage will import the module from a newer version
	when it upgrades itself."""
	global _items
	printer = EOutput()
	for root, key, logentries in _items:
		print()
		if root == "/":
			printer.einfo(_("Messages for package %s:") %
				colorize("INFORM", key))
		else:
			printer.einfo(_("Messages for package %(pkg)s merged to %(root)s:") %
				{"pkg": colorize("INFORM", key), "root": root})
		print()
		for phase in EBUILD_PHASES:
			if phase not in logentries:
				continue
			for msgtype, msgcontent in logentries[phase]:
				fmap = {"INFO": printer.einfo,
						"WARN": printer.ewarn,
						"ERROR": printer.eerror,
						"LOG": printer.einfo,
						"QA": printer.ewarn}
				if isinstance(msgcontent, basestring):
					msgcontent = [msgcontent]
				for line in msgcontent:
					fmap[msgtype](line.strip("\n"))
	_items = []
	return
Beispiel #14
0
 def lineParser(self, line, line_num, data, errors):
     line = line.strip()
     if line.startswith("#"):  # skip commented lines
         return
     if not len(line):  # skip empty lines
         return
     split = line.split("=", 1)
     if len(split) < 2:
         errors.setdefault(self.fname, []).append(_("Malformed data at line: %s, data %s") % (line_num + 1, line))
         return
     key = split[0].strip()
     value = split[1].strip()
     if not key:
         errors.setdefault(self.fname, []).append(_("Malformed key at line: %s, key %s") % (line_num + 1, key))
         return
     if not self._validate(key):
         errors.setdefault(self.fname, []).append(
             _("Key validation failed at line: %s, data %s") % (line_num + 1, key)
         )
         return
     if not self._valueValidate(value):
         errors.setdefault(self.fname, []).append(
             _("Value validation failed at line: %s, data %s") % (line_num + 1, value)
         )
         return
     data[key] = value
Beispiel #15
0
	def _parse_file_to_dict(self, file_name, juststrings=False, recursive=True, eapi_filter=None):
		ret = {}
		location_dict = {}
		file_dict = grabdict_package(file_name, recursive=recursive, verify_eapi=True)
		eapi = read_corresponding_eapi_file(file_name)
		if eapi_filter is not None and not eapi_filter(eapi):
			if file_dict:
				writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") %
					(eapi, os.path.basename(file_name), file_name),
					noiselevel=-1)
			return ret
		useflag_re = _get_useflag_re(eapi)
		for k, v in file_dict.items():
			useflags = []
			for prefixed_useflag in v:
				if prefixed_useflag[:1] == "-":
					useflag = prefixed_useflag[1:]
				else:
					useflag = prefixed_useflag
				if useflag_re.match(useflag) is None:
					writemsg(_("--- Invalid USE flag for '%s' in '%s': '%s'\n") %
						(k, file_name, prefixed_useflag), noiselevel=-1)
				else:
					useflags.append(prefixed_useflag)
			location_dict.setdefault(k, []).extend(useflags)
		for k, v in location_dict.items():
			if juststrings:
				v = " ".join(v)
			else:
				v = tuple(v)
			ret.setdefault(k.cp, {})[k] = v
		return ret
Beispiel #16
0
def _finalize():
	global _items
	printer = EOutput()
	for root, key, logentries, logfile in _items:
		print()
		if root == "/":
			printer.einfo(_("Messages for package %s:") %
				colorize("INFORM", key))
		else:
			printer.einfo(_("Messages for package %(pkg)s merged to %(root)s:") %
				{"pkg": colorize("INFORM", key), "root": root})
		if logfile is not None:
			printer.einfo(_("Log file: %s") % colorize("INFORM", logfile))
		print()
		for phase in EBUILD_PHASES:
			if phase not in logentries:
				continue
			for msgtype, msgcontent in logentries[phase]:
				fmap = {"INFO": printer.einfo,
						"WARN": printer.ewarn,
						"ERROR": printer.eerror,
						"LOG": printer.einfo,
						"QA": printer.ewarn}
				if isinstance(msgcontent, basestring):
					msgcontent = [msgcontent]
				for line in msgcontent:
					fmap[msgtype](line.strip("\n"))
	_items = []
	return
Beispiel #17
0
def verify_all(filename, mydict, calc_prelink=0, strict=0):
    """
	Verify all checksums against a file.

	@param filename: File to run the checksums against
	@type filename: String
	@param calc_prelink: Whether or not to reverse prelink before running the checksum
	@type calc_prelink: Integer
	@param strict: Enable/Disable strict checking (which stops exactly at a checksum failure and throws an exception)
	@type strict: Integer
	@rtype: Tuple
	@return: Result of the checks and possible message:
		1) If size fails, False, and a tuple containing a message, the given size, and the actual size
		2) If there is an os error, False, and a tuple containing the system error followed by 2 nulls
		3) If a checksum fails, False and a tuple containing a message, the given hash, and the actual hash
		4) If all checks succeed, return True and a fake reason
	"""
    # Dict relates to single file only.
    # returns: (passed,reason)
    file_is_ok = True
    reason = "Reason unknown"
    try:
        mysize = os.stat(filename)[stat.ST_SIZE]
        if mydict["size"] != mysize:
            return False, (_("Filesize does not match recorded size"), mysize, mydict["size"])
    except OSError as e:
        if e.errno == errno.ENOENT:
            raise portage.exception.FileNotFound(filename)
        return False, (str(e), None, None)

    verifiable_hash_types = set(mydict).intersection(hashfunc_map)
    verifiable_hash_types.discard("size")
    if not verifiable_hash_types:
        expected = set(hashfunc_map)
        expected.discard("size")
        expected = list(expected)
        expected.sort()
        expected = " ".join(expected)
        got = set(mydict)
        got.discard("size")
        got = list(got)
        got.sort()
        got = " ".join(got)
        return False, (_("Insufficient data for checksum verification"), got, expected)

    for x in sorted(mydict):
        if x == "size":
            continue
        elif x in hashfunc_map:
            myhash = perform_checksum(filename, x, calc_prelink=calc_prelink)[0]
            if mydict[x] != myhash:
                if strict:
                    raise portage.exception.DigestException(
                        ("Failed to verify '$(file)s' on " + "checksum type '%(type)s'") % {"file": filename, "type": x}
                    )
                else:
                    file_is_ok = False
                    reason = (("Failed on %s verification" % x), myhash, mydict[x])
                    break
    return file_is_ok, reason
def deprecated_profile_check(settings=None):
	config_root = "/"
	if settings is not None:
		config_root = settings["PORTAGE_CONFIGROOT"]
	deprecated_profile_file = os.path.join(config_root,
		DEPRECATED_PROFILE_FILE)
	if not os.access(deprecated_profile_file, os.R_OK):
		return False
	dcontent = codecs.open(_unicode_encode(deprecated_profile_file,
		encoding=_encodings['fs'], errors='strict'), 
		mode='r', encoding=_encodings['content'], errors='replace').readlines()
	writemsg(colorize("BAD", _("\n!!! Your current profile is "
		"deprecated and not supported anymore.")) + "\n", noiselevel=-1)
	writemsg(colorize("BAD", _("!!! Use eselect profile to update your "
		"profile.")) + "\n", noiselevel=-1)
	if not dcontent:
		writemsg(colorize("BAD", _("!!! Please refer to the "
			"Gentoo Upgrading Guide.")) + "\n", noiselevel=-1)
		return True
	newprofile = dcontent[0]
	writemsg(colorize("BAD", _("!!! Please upgrade to the "
		"following profile if possible:")) + "\n", noiselevel=-1)
	writemsg(8*" " + colorize("GOOD", newprofile) + "\n", noiselevel=-1)
	if len(dcontent) > 1:
		writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1)
		for myline in dcontent[1:]:
			writemsg(myline, noiselevel=-1)
		writemsg("\n\n", noiselevel=-1)
	return True
Beispiel #19
0
def collect_ebuild_messages(path):
	""" Collect elog messages generated by the bash logging function stored 
		at 'path'.
	"""
	mylogfiles = None
	try:
		mylogfiles = os.listdir(path)
	except OSError:
		pass
	# shortcut for packages without any messages
	if not mylogfiles:
		return {}
	# exploit listdir() file order so we process log entries in chronological order
	mylogfiles.reverse()
	logentries = {}
	for msgfunction in mylogfiles:
		filename = os.path.join(path, msgfunction)
		if msgfunction not in EBUILD_PHASES:
			writemsg(_("!!! can't process invalid log file: %s\n") % filename,
				noiselevel=-1)
			continue
		if not msgfunction in logentries:
			logentries[msgfunction] = []
		lastmsgtype = None
		msgcontent = []
		f = io.open(_unicode_encode(filename,
			encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['repo.content'], errors='replace')
		for l in f:
			l = l.rstrip('\n')
			if not l:
				continue
			try:
				msgtype, msg = l.split(" ", 1)
			except ValueError:
				writemsg(_("!!! malformed entry in "
					"log file: '%s'\n") % filename, noiselevel=-1)
				continue

			if lastmsgtype is None:
				lastmsgtype = msgtype
			
			if msgtype == lastmsgtype:
				msgcontent.append(msg)
			else:
				if msgcontent:
					logentries[msgfunction].append((lastmsgtype, msgcontent))
				msgcontent = [msg]
			lastmsgtype = msgtype
		f.close()
		if msgcontent:
			logentries[msgfunction].append((lastmsgtype, msgcontent))

	# clean logfiles to avoid repetitions
	for f in mylogfiles:
		try:
			os.unlink(os.path.join(path, f))
		except OSError:
			pass
	return logentries
		def onerror(e):
			if isinstance(e, OperationNotPermitted):
				writemsg(_("Operation Not Permitted: %s\n") % str(e),
					noiselevel=-1)
			elif isinstance(e, FileNotFound):
				writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
			else:
				raise
Beispiel #21
0
 def display(self):
     self.out.write(
         "\r"
         + colorize("WARN", _("cache miss: '") + str(self.misses) + "'")
         + " --- "
         + colorize("GOOD", _("cache hit: '") + str(self.hits) + "'")
     )
     self.out.flush()
	def __init__(self, s):
		if isinstance(s, Atom):
			# This is an efficiency assertion, to ensure that the Atom
			# constructor is not called redundantly.
			raise TypeError(_("Expected %s, got %s") % \
				(_atom_base, type(s)))

		_atom_base.__init__(s)

		if "!" == s[:1]:
			blocker = self._blocker(forbid_overlap=("!" == s[1:2]))
			if blocker.overlap.forbid:
				s = s[2:]
			else:
				s = s[1:]
		else:
			blocker = False
		self.__dict__['blocker'] = blocker
		m = _atom_re.match(s)
		if m is None:
			raise InvalidAtom(self)

		if m.group('op') is not None:
			base = _atom_re.groupindex['op']
			op = m.group(base + 1)
			cpv = m.group(base + 2)
			cp = m.group(base + 3)
			if m.group(base + 4) is not None:
				raise InvalidAtom(self)
		elif m.group('star') is not None:
			base = _atom_re.groupindex['star']
			op = '=*'
			cpv = m.group(base + 1)
			cp = m.group(base + 2)
			if m.group(base + 3) is not None:
				raise InvalidAtom(self)
		elif m.group('simple') is not None:
			op = None
			cpv = cp = m.group(_atom_re.groupindex['simple'] + 1)
			if m.group(_atom_re.groupindex['simple'] + 2) is not None:
				raise InvalidAtom(self)
		else:
			raise AssertionError(_("required group not found in atom: '%s'") % self)
		self.__dict__['cp'] = cp
		self.__dict__['cpv'] = cpv
		self.__dict__['slot'] = m.group(_atom_re.groups - 1)
		self.__dict__['operator'] = op

		use_str = m.group(_atom_re.groups)
		if use_str is not None:
			use = _use_dep(dep_getusedeps(s))
			without_use = Atom(m.group('without_use'))
		else:
			use = None
			without_use = self

		self.__dict__['use'] = use
		self.__dict__['without_use'] = without_use
Beispiel #23
0
	def check_auto_sync(self):
		'''Check the auto_sync setting'''
		if self.repo.auto_sync is None:
			writemsg_level("!!! %s\n" % _("Repository '%s' is missing auto_sync attribute")
				% self.repo.name, level=self.logger.ERROR, noiselevel=-1)
		elif self.repo.auto_sync.lower() not in ["yes", "true", "no", "false"]:
			writemsg_level("!!! %s\n" % _("Repository '%s' auto_sync attribute must be one of: %s")
				% (self.repo.name, '{yes, true, no, false}'),
				level=self.logger.ERROR, noiselevel=-1)
Beispiel #24
0
	def singleBuilder(cls, options, settings, trees):
		mode = options.get("mode", "older")
		if str(mode).lower() not in ["newer", "older"]:
			raise SetConfigError(_("invalid 'mode' value %s (use either 'newer' or 'older')") % mode)
		try:
			age = int(options.get("age", "7"))
		except ValueError as e:
			raise SetConfigError(_("value of option 'age' is not an integer"))
		return AgeSet(vardb=trees["vartree"].dbapi, mode=mode, age=age)
Beispiel #25
0
def parse_updates(mycontent):
	"""Valid updates are returned as a list of split update commands."""
	myupd = []
	errors = []
	mylines = mycontent.splitlines()
	for myline in mylines:
		mysplit = myline.split()
		if len(mysplit) == 0:
			continue
		if mysplit[0] not in ("move", "slotmove"):
			errors.append(_("ERROR: Update type not recognized '%s'") % myline)
			continue
		if mysplit[0] == "move":
			if len(mysplit) != 3:
				errors.append(_("ERROR: Update command invalid '%s'") % myline)
				continue
			valid = True
			for i in (1, 2):
				try:
					atom = Atom(mysplit[i])
				except InvalidAtom:
					atom = None
				else:
					if atom.blocker or atom != atom.cp:
						atom = None
				if atom is not None:
					mysplit[i] = atom
				else:
					errors.append(
						_("ERROR: Malformed update entry '%s'") % myline)
					valid = False
					break
			if not valid:
				continue

		if mysplit[0] == "slotmove":
			if len(mysplit)!=4:
				errors.append(_("ERROR: Update command invalid '%s'") % myline)
				continue
			pkg, origslot, newslot = mysplit[1], mysplit[2], mysplit[3]
			try:
				atom = Atom(pkg)
			except InvalidAtom:
				atom = None
			else:
				if atom.blocker:
					atom = None
			if atom is not None:
				mysplit[1] = atom
			else:
				errors.append(_("ERROR: Malformed update entry '%s'") % myline)
				continue

		# The list of valid updates is filtered by continue statements above.
		myupd.append(mysplit)
	return myupd, errors
	def _read_dirVirtuals(self, profiles):
		"""
		Read the 'virtuals' file in all profiles.
		"""
		virtuals_list = []
		for x in profiles:
			virtuals_file = os.path.join(x, "virtuals")
			virtuals_dict = grabdict(virtuals_file)
			atoms_dict = {}
			for k, v in virtuals_dict.items():
				try:
					virt_atom = Atom(k)
				except InvalidAtom:
					virt_atom = None
				else:
					if virt_atom.blocker or \
						str(virt_atom) != str(virt_atom.cp):
						virt_atom = None
				if virt_atom is None:
					writemsg(_("--- Invalid virtuals atom in %s: %s\n") % \
						(virtuals_file, k), noiselevel=-1)
					continue
				providers = []
				for atom in v:
					atom_orig = atom
					if atom[:1] == '-':
						# allow incrementals
						atom = atom[1:]
					try:
						atom = Atom(atom)
					except InvalidAtom:
						atom = None
					else:
						if atom.blocker:
							atom = None
					if atom is None:
						writemsg(_("--- Invalid atom in %s: %s\n") % \
							(virtuals_file, atom_orig), noiselevel=-1)
					else:
						if atom_orig == str(atom):
							# normal atom, so return as Atom instance
							providers.append(atom)
						else:
							# atom has special prefix, so return as string
							providers.append(atom_orig)
				if providers:
					atoms_dict[virt_atom] = providers
			if atoms_dict:
				virtuals_list.append(atoms_dict)

		self._dirVirtuals = stack_dictlist(virtuals_list, incremental=True)

		for virt in self._dirVirtuals:
			# Preference for virtuals decreases from left to right.
			self._dirVirtuals[virt].reverse()
Beispiel #27
0
def file_archive(archive, curconf, newconf, mrgconf):
    """Archive existing config to the archive-dir, bumping old versions
    out of the way into .# versions (log-rotate style). Then, if mrgconf
    was specified and there is a .dist version, merge the user's changes
    and the distributed changes and put the result into mrgconf.  Lastly,
    if newconf was specified, archive it as a .dist.new version (which
    gets moved to the .dist version at the end of the processing)."""

    try:
        os.makedirs(os.path.dirname(archive))
    except OSError:
        pass

    # Archive the current config file if it isn't already saved
    if os.path.exists(archive) and len(diffstatusoutput("diff -aq '%s' '%s'", curconf, archive)[1]) != 0:
        suf = 1
        while suf < 9 and os.path.exists(archive + "." + str(suf)):
            suf += 1

        while suf > 1:
            os.rename(archive + "." + str(suf - 1), archive + "." + str(suf))
            suf -= 1

        os.rename(archive, archive + ".1")

    if os.path.isfile(curconf):
        try:
            shutil.copy2(curconf, archive)
        except (IOError, os.error) as why:
            print(
                _("dispatch-conf: Error copying %(curconf)s to %(archive)s: %(reason)s; fatal")
                % {"curconf": curconf, "archive": archive, "reason": str(why)},
                file=sys.stderr,
            )

    if newconf != "":
        # Save off new config file in the archive dir with .dist.new suffix
        try:
            shutil.copy2(newconf, archive + ".dist.new")
        except (IOError, os.error) as why:
            print(
                _("dispatch-conf: Error copying %(newconf)s to %(archive)s: %(reason)s; fatal")
                % {"newconf": newconf, "archive": archive + ".dist.new", "reason": str(why)},
                file=sys.stderr,
            )

        ret = 0
        if mrgconf != "" and os.path.exists(archive + ".dist"):
            # This puts the results of the merge into mrgconf.
            ret = os.system(DIFF3_MERGE % (curconf, archive + ".dist", newconf, mrgconf))
            mystat = os.lstat(newconf)
            os.chmod(mrgconf, mystat.st_mode)
            os.chown(mrgconf, mystat.st_uid, mystat.st_gid)

        return ret
Beispiel #28
0
	def _check_locations(self):
		"""Check if repositories location are correct and show a warning message if not"""
		for (name, r) in self.prepos.items():
			if name != 'DEFAULT':
				if r.location is None:
					writemsg(_("!!! Location not set for repository %s\n") % name, noiselevel=-1)
				else:
					if not isdir_raise_eaccess(r.location) and not portage._sync_mode:
						self.prepos_order.remove(name)
						writemsg(_("!!! Invalid Repository Location"
							" (not a dir): '%s'\n") % r.location, noiselevel=-1)
def setexec(ctx="\n"):
	ctx = _unicode_encode(ctx, encoding=_encodings['content'], errors='strict')
	if selinux.setexeccon(ctx) < 0:
		ctx = _unicode_decode(ctx, encoding=_encodings['content'],
			errors='replace')
		if selinux.security_getenforce() == 1:
			raise OSError(_("Failed setting exec() context \"%s\".") % ctx)
		else:
			portage.writemsg("!!! " + \
				_("Failed setting exec() context \"%s\".") % ctx, \
				noiselevel=-1)
Beispiel #30
0
	def getfetchsizes(self, mypkg, useflags=None, debug=0, myrepo=None):
		# returns a filename:size dictionnary of remaining downloads
		myebuild, mytree = self.findname2(mypkg, myrepo=myrepo)
		if myebuild is None:
			raise AssertionError(_("ebuild not found for '%s'") % mypkg)
		pkgdir = os.path.dirname(myebuild)
		mf = self.repositories.get_repo_for_location(
			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
				pkgdir, self.settings["DISTDIR"])
		checksums = mf.getDigests()
		if not checksums:
			if debug: 
				writemsg(_("[empty/missing/bad digest]: %s\n") % (mypkg,))
			return {}
		filesdict={}
		myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
		#XXX: maybe this should be improved: take partial downloads
		# into account? check checksums?
		for myfile in myfiles:
			try:
				fetch_size = int(checksums[myfile]["size"])
			except (KeyError, ValueError):
				if debug:
					writemsg(_("[bad digest]: missing %(file)s for %(pkg)s\n") % {"file":myfile, "pkg":mypkg})
				continue
			file_path = os.path.join(self.settings["DISTDIR"], myfile)
			mystat = None
			try:
				mystat = os.stat(file_path)
			except OSError:
				pass
			if mystat is None:
				existing_size = 0
				ro_distdirs = self.settings.get("PORTAGE_RO_DISTDIRS")
				if ro_distdirs is not None:
					for x in shlex_split(ro_distdirs):
						try:
							mystat = os.stat(os.path.join(x, myfile))
						except OSError:
							pass
						else:
							if mystat.st_size == fetch_size:
								existing_size = fetch_size
								break
			else:
				existing_size = mystat.st_size
			remaining_size = fetch_size - existing_size
			if remaining_size > 0:
				# Assume the download is resumable.
				filesdict[myfile] = remaining_size
			elif remaining_size < 0:
				# The existing file is too large and therefore corrupt.
				filesdict[myfile] = int(checksums[myfile]["size"])
		return filesdict
Beispiel #31
0
def unlockfile(mytuple):

    #XXX: Compatability hack.
    if len(mytuple) == 3:
        lockfilename, myfd, unlinkfile = mytuple
        locking_method = fcntl.flock
    elif len(mytuple) == 4:
        lockfilename, myfd, unlinkfile, locking_method = mytuple
    else:
        raise InvalidData

    if (myfd == HARDLINK_FD):
        unhardlink_lockfile(lockfilename, unlinkfile=unlinkfile)
        return True

    # myfd may be None here due to myfd = mypath in lockfile()
    if isinstance(lockfilename, basestring) and \
     not os.path.exists(lockfilename):
        writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
        if myfd is not None:
            os.close(myfd)
            _open_fds.remove(myfd)
        return False

    try:
        if myfd is None:
            myfd = os.open(lockfilename, os.O_WRONLY, 0o660)
            unlinkfile = 1
        locking_method(myfd, fcntl.LOCK_UN)
    except OSError:
        if isinstance(lockfilename, basestring):
            os.close(myfd)
            _open_fds.remove(myfd)
        raise IOError(_("Failed to unlock file '%s'\n") % lockfilename)

    try:
        # This sleep call was added to allow other processes that are
        # waiting for a lock to be able to grab it before it is deleted.
        # lockfile() already accounts for this situation, however, and
        # the sleep here adds more time than is saved overall, so am
        # commenting until it is proved necessary.
        #time.sleep(0.0001)
        if unlinkfile:
            locking_method(myfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
            # We won the lock, so there isn't competition for it.
            # We can safely delete the file.
            writemsg(_("Got the lockfile...\n"), 1)
            if _fstat_nlink(myfd) == 1:
                os.unlink(lockfilename)
                writemsg(_("Unlinked lockfile...\n"), 1)
                locking_method(myfd, fcntl.LOCK_UN)
            else:
                writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
                os.close(myfd)
                _open_fds.remove(myfd)
                return False
    except SystemExit:
        raise
    except Exception as e:
        writemsg(_("Failed to get lock... someone took it.\n"), 1)
        writemsg(str(e) + "\n", 1)

    # why test lockfilename?  because we may have been handed an
    # fd originally, and the caller might not like having their
    # open fd closed automatically on them.
    if isinstance(lockfilename, basestring):
        os.close(myfd)
        _open_fds.remove(myfd)

    return True
Beispiel #32
0
    def rebuild(self,
                exclude_pkgs=None,
                include_file=None,
                preserve_paths=None):
        """
        Raises CommandNotFound if there are preserved libs
        and the scanelf binary is not available.

        @param exclude_pkgs: A set of packages that should be excluded from
                the LinkageMap, since they are being unmerged and their NEEDED
                entries are therefore irrelevant and would only serve to corrupt
                the LinkageMap.
        @type exclude_pkgs: set
        @param include_file: The path of a file containing NEEDED entries for
                a package which does not exist in the vardbapi yet because it is
                currently being merged.
        @type include_file: String
        @param preserve_paths: Libraries preserved by a package instance that
                is currently being merged. They need to be explicitly passed to the
                LinkageMap, since they are not registered in the
                PreservedLibsRegistry yet.
        @type preserve_paths: set
        """

        os = _os_merge
        root = self._root
        root_len = len(root) - 1
        self._clear_cache()
        self._defpath.update(
            getlibpaths(self._dbapi.settings["EROOT"],
                        env=self._dbapi.settings))
        libs = self._libs
        obj_properties = self._obj_properties

        lines = []

        # Data from include_file is processed first so that it
        # overrides any data from previously installed files.
        if include_file is not None:
            for line in grabfile(include_file):
                lines.append((None, include_file, line))

        aux_keys = [self._needed_aux_key]
        can_lock = os.access(os.path.dirname(self._dbapi._dbroot), os.W_OK)
        if can_lock:
            self._dbapi.lock()
        try:
            for cpv in self._dbapi.cpv_all():
                if exclude_pkgs is not None and cpv in exclude_pkgs:
                    continue
                needed_file = self._dbapi.getpath(
                    cpv, filename=self._needed_aux_key)
                for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines():
                    lines.append((cpv, needed_file, line))
        finally:
            if can_lock:
                self._dbapi.unlock()

        # have to call scanelf for preserved libs here as they aren't
        # registered in NEEDED.ELF.2 files
        plibs = {}
        if preserve_paths is not None:
            plibs.update((x, None) for x in preserve_paths)
        if self._dbapi._plib_registry and self._dbapi._plib_registry.hasEntries(
        ):
            for cpv, items in self._dbapi._plib_registry.getPreservedLibs(
            ).items():
                if exclude_pkgs is not None and cpv in exclude_pkgs:
                    # These preserved libs will either be unmerged,
                    # rendering them irrelevant, or they will be
                    # preserved in the replacement package and are
                    # already represented via the preserve_paths
                    # parameter.
                    continue
                plibs.update((x, cpv) for x in items)
        if plibs:
            # We don't use scanelf -q, since that would omit libraries like
            # musl's /usr/lib/libc.so which do not have any DT_NEEDED or
            # DT_SONAME settings.
            args = [
                os.path.join(EPREFIX or "/", "usr/bin/scanelf"),
                "-BF",
                "%a;%F;%S;%r;%n",
            ]
            args.extend(
                os.path.join(root, x.lstrip("." + os.sep)) for x in plibs)
            try:
                proc = subprocess.Popen(args, stdout=subprocess.PIPE)
            except EnvironmentError as e:
                if e.errno != errno.ENOENT:
                    raise
                raise CommandNotFound(args[0])
            else:
                for l in proc.stdout:
                    try:
                        l = _unicode_decode(l,
                                            encoding=_encodings["content"],
                                            errors="strict")
                    except UnicodeDecodeError:
                        l = _unicode_decode(l,
                                            encoding=_encodings["content"],
                                            errors="replace")
                        writemsg_level(
                            _("\nError decoding characters "
                              "returned from scanelf: %s\n\n") % (l, ),
                            level=logging.ERROR,
                            noiselevel=-1,
                        )
                    l = l[3:].rstrip("\n")
                    if not l:
                        continue
                    try:
                        entry = NeededEntry.parse("scanelf", l)
                    except InvalidData as e:
                        writemsg_level("\n%s\n\n" % (e, ),
                                       level=logging.ERROR,
                                       noiselevel=-1)
                        continue
                    try:
                        with open(
                                _unicode_encode(
                                    entry.filename,
                                    encoding=_encodings["fs"],
                                    errors="strict",
                                ),
                                "rb",
                        ) as f:
                            elf_header = ELFHeader.read(f)
                    except EnvironmentError as e:
                        if e.errno != errno.ENOENT:
                            raise
                        # File removed concurrently.
                        continue

                    # Infer implicit soname from basename (bug 715162).
                    if not entry.soname:
                        try:
                            proc = subprocess.Popen(
                                [
                                    b"file",
                                    _unicode_encode(
                                        entry.filename,
                                        encoding=_encodings["fs"],
                                        errors="strict",
                                    ),
                                ],
                                stdout=subprocess.PIPE,
                            )
                            out, err = proc.communicate()
                            proc.wait()
                        except EnvironmentError:
                            pass
                        else:
                            if b"SB shared object" in out:
                                entry.soname = os.path.basename(entry.filename)

                    entry.multilib_category = compute_multilib_category(
                        elf_header)
                    entry.filename = entry.filename[root_len:]
                    owner = plibs.pop(entry.filename, None)
                    lines.append((owner, "scanelf", str(entry)))
                proc.wait()
                proc.stdout.close()

        if plibs:
            # Preserved libraries that did not appear in the scanelf output.
            # This is known to happen with statically linked libraries.
            # Generate dummy lines for these, so we can assume that every
            # preserved library has an entry in self._obj_properties. This
            # is important in order to prevent findConsumers from raising
            # an unwanted KeyError.
            for x, cpv in plibs.items():
                lines.append((cpv, "plibs", ";".join(["", x, "", "", ""])))

        # Share identical frozenset instances when available,
        # in order to conserve memory.
        frozensets = {}
        owner_entries = collections.defaultdict(list)

        while True:
            try:
                owner, location, l = lines.pop()
            except IndexError:
                break
            l = l.rstrip("\n")
            if not l:
                continue
            if "\0" in l:
                # os.stat() will raise "TypeError: must be encoded string
                # without NULL bytes, not str" in this case.
                writemsg_level(
                    _("\nLine contains null byte(s) "
                      "in %s: %s\n\n") % (location, l),
                    level=logging.ERROR,
                    noiselevel=-1,
                )
                continue
            try:
                entry = NeededEntry.parse(location, l)
            except InvalidData as e:
                writemsg_level("\n%s\n\n" % (e, ),
                               level=logging.ERROR,
                               noiselevel=-1)
                continue

            # If NEEDED.ELF.2 contains the new multilib category field,
            # then use that for categorization. Otherwise, if a mapping
            # exists, map e_machine (entry.arch) to an approximate
            # multilib category. If all else fails, use e_machine, just
            # as older versions of portage did.
            if entry.multilib_category is None:
                entry.multilib_category = _approx_multilib_categories.get(
                    entry.arch, entry.arch)

            entry.filename = normalize_path(entry.filename)
            expand = {"ORIGIN": os.path.dirname(entry.filename)}
            entry.runpaths = frozenset(
                normalize_path(
                    varexpand(
                        x, expand, error_leader=lambda: "%s: " % location))
                for x in entry.runpaths)
            entry.runpaths = frozensets.setdefault(entry.runpaths,
                                                   entry.runpaths)
            owner_entries[owner].append(entry)

        # In order to account for internal library resolution which a package
        # may implement (useful at least for handling of bundled libraries),
        # generate implicit runpath entries for any needed sonames which are
        # provided by the same owner package.
        for owner, entries in owner_entries.items():
            if owner is None:
                continue

            providers = {}
            for entry in entries:
                if entry.soname:
                    providers[SonameAtom(entry.multilib_category,
                                         entry.soname)] = entry

            for entry in entries:
                implicit_runpaths = []
                for soname in entry.needed:
                    soname_atom = SonameAtom(entry.multilib_category, soname)
                    provider = providers.get(soname_atom)
                    if provider is None:
                        continue
                    provider_dir = os.path.dirname(provider.filename)
                    if provider_dir not in entry.runpaths:
                        implicit_runpaths.append(provider_dir)

                if implicit_runpaths:
                    entry.runpaths = frozenset(
                        itertools.chain(entry.runpaths, implicit_runpaths))
                    entry.runpaths = frozensets.setdefault(
                        entry.runpaths, entry.runpaths)

        for owner, entry in ((owner, entry)
                             for (owner, entries) in owner_entries.items()
                             for entry in entries):
            arch = entry.multilib_category
            obj = entry.filename
            soname = entry.soname
            path = entry.runpaths
            needed = frozenset(entry.needed)

            needed = frozensets.setdefault(needed, needed)

            obj_key = self._obj_key(obj)
            indexed = True
            myprops = obj_properties.get(obj_key)
            if myprops is None:
                indexed = False
                myprops = self._obj_properties_class(arch, needed, path,
                                                     soname, [], owner)
                obj_properties[obj_key] = myprops
            # All object paths are added into the obj_properties tuple.
            myprops.alt_paths.append(obj)

            # Don't index the same file more that once since only one
            # set of data can be correct and therefore mixing data
            # may corrupt the index (include_file overrides previously
            # installed).
            if indexed:
                continue

            arch_map = libs.get(arch)
            if arch_map is None:
                arch_map = {}
                libs[arch] = arch_map
            if soname:
                soname_map = arch_map.get(soname)
                if soname_map is None:
                    soname_map = self._soname_map_class(providers=[],
                                                        consumers=[])
                    arch_map[soname] = soname_map
                soname_map.providers.append(obj_key)
            for needed_soname in needed:
                soname_map = arch_map.get(needed_soname)
                if soname_map is None:
                    soname_map = self._soname_map_class(providers=[],
                                                        consumers=[])
                    arch_map[needed_soname] = soname_map
                soname_map.consumers.append(obj_key)

        for arch, sonames in libs.items():
            for soname_node in sonames.values():
                soname_node.providers = tuple(set(soname_node.providers))
                soname_node.consumers = tuple(set(soname_node.consumers))
Beispiel #33
0
def parse_layout_conf(repo_location, repo_name=None):
    eapi = read_corresponding_eapi_file(
        os.path.join(repo_location, REPO_NAME_LOC))

    layout_filename = os.path.join(repo_location, "metadata", "layout.conf")
    layout_file = KeyValuePairFileLoader(layout_filename, None, None)
    layout_data, layout_errors = layout_file.load()

    data = {}

    # None indicates abscence of a masters setting, which later code uses
    # to trigger a backward compatibility fallback that sets an implicit
    # master. In order to avoid this fallback behavior, layout.conf can
    # explicitly set masters to an empty value, which will result in an
    # empty tuple here instead of None.
    masters = layout_data.get('masters')
    if masters is not None:
        masters = tuple(masters.split())
    data['masters'] = masters
    data['aliases'] = tuple(layout_data.get('aliases', '').split())

    data['allow-provide-virtual'] = \
     layout_data.get('allow-provide-virtuals', 'false').lower() == 'true'

    data['eapis-banned'] = tuple(layout_data.get('eapis-banned', '').split())
    data['eapis-deprecated'] = tuple(
        layout_data.get('eapis-deprecated', '').split())

    data['sign-commit'] = layout_data.get('sign-commits', 'false').lower() \
     == 'true'

    data['sign-manifest'] = layout_data.get('sign-manifests', 'true').lower() \
     == 'true'

    data['thin-manifest'] = layout_data.get('thin-manifests', 'false').lower() \
     == 'true'

    data['repo-name'] = _gen_valid_repo(layout_data.get('repo-name', ''))

    manifest_policy = layout_data.get('use-manifests', 'strict').lower()
    data['allow-missing-manifest'] = manifest_policy != 'strict'
    data['create-manifest'] = manifest_policy != 'false'
    data['disable-manifest'] = manifest_policy == 'false'

    # for compatibility w/ PMS, fallback to pms; but also check if the
    # cache exists or not.
    cache_formats = layout_data.get('cache-formats', '').lower().split()
    if not cache_formats:
        # Auto-detect cache formats, and prefer md5-cache if available.
        # This behavior was deployed in portage-2.1.11.14, so that the
        # default egencache format could eventually be changed to md5-dict
        # in portage-2.1.11.32. WARNING: Versions prior to portage-2.1.11.14
        # will NOT recognize md5-dict format unless it is explicitly
        # listed in layout.conf.
        cache_formats = []
        if os.path.isdir(os.path.join(repo_location, 'metadata', 'md5-cache')):
            cache_formats.append('md5-dict')
        if os.path.isdir(os.path.join(repo_location, 'metadata', 'cache')):
            cache_formats.append('pms')
    data['cache-formats'] = tuple(cache_formats)

    manifest_hashes = layout_data.get('manifest-hashes')
    if manifest_hashes is not None:
        manifest_hashes = frozenset(manifest_hashes.upper().split())
        if MANIFEST2_REQUIRED_HASH not in manifest_hashes:
            repo_name = _get_repo_name(repo_location, cached=repo_name)
            warnings.warn(
                (_("Repository named '%(repo_name)s' has a "
                   "'manifest-hashes' setting that does not contain "
                   "the '%(hash)s' hash which is required by this "
                   "portage version. You will have to upgrade portage "
                   "if you want to generate valid manifests for this "
                   "repository: %(layout_filename)s") % {
                       "repo_name": repo_name or 'unspecified',
                       "hash": MANIFEST2_REQUIRED_HASH,
                       "layout_filename": layout_filename
                   }), DeprecationWarning)
        unsupported_hashes = manifest_hashes.difference(
            MANIFEST2_HASH_FUNCTIONS)
        if unsupported_hashes:
            repo_name = _get_repo_name(repo_location, cached=repo_name)
            warnings.warn((
                _("Repository named '%(repo_name)s' has a "
                  "'manifest-hashes' setting that contains one "
                  "or more hash types '%(hashes)s' which are not supported by "
                  "this portage version. You will have to upgrade "
                  "portage if you want to generate valid manifests for "
                  "this repository: %(layout_filename)s") % {
                      "repo_name": repo_name or 'unspecified',
                      "hashes": " ".join(sorted(unsupported_hashes)),
                      "layout_filename": layout_filename
                  }), DeprecationWarning)
    data['manifest-hashes'] = manifest_hashes

    data['update-changelog'] = layout_data.get('update-changelog', 'false').lower() \
     == 'true'

    raw_formats = layout_data.get('profile-formats')
    if raw_formats is None:
        if eapi_allows_directories_on_profile_level_and_repository_level(eapi):
            raw_formats = ('portage-1', )
        else:
            raw_formats = ('portage-1-compat', )
    else:
        raw_formats = set(raw_formats.split())
        unknown = raw_formats.difference(_valid_profile_formats)
        if unknown:
            repo_name = _get_repo_name(repo_location, cached=repo_name)
            warnings.warn((_(
                "Repository named '%(repo_name)s' has unsupported "
                "profiles in use ('profile-formats = %(unknown_fmts)s' setting in "
                "'%(layout_filename)s; please upgrade portage.") %
                           dict(repo_name=repo_name or 'unspecified',
                                layout_filename=layout_filename,
                                unknown_fmts=" ".join(unknown))),
                          DeprecationWarning)
        raw_formats = tuple(raw_formats.intersection(_valid_profile_formats))
    data['profile-formats'] = raw_formats

    return data, layout_errors
def process(mysettings, key, logentries, fulltext):
    if mysettings.get("PORTAGE_LOGDIR"):
        logdir = normalize_path(mysettings["PORTAGE_LOGDIR"])
    else:
        logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep),
                              "var", "log", "portage")

    if not os.path.isdir(logdir):
        # Only initialize group/mode if the directory doesn't
        # exist, so that we don't override permissions if they
        # were previously set by the administrator.
        # NOTE: These permissions should be compatible with our
        # default logrotate config as discussed in bug 374287.
        logdir_uid = -1
        if portage.data.secpass >= 2:
            logdir_uid = portage_uid
        ensure_dirs(logdir, uid=logdir_uid, gid=portage_gid, mode=0o2770)

    elogdir = os.path.join(logdir, "elog")
    _ensure_log_subdirs(logdir, elogdir)

    # TODO: Locking
    elogfilename = elogdir + "/summary.log"
    try:
        elogfile = io.open(_unicode_encode(elogfilename,
                                           encoding=_encodings['fs'],
                                           errors='strict'),
                           mode='a',
                           encoding=_encodings['content'],
                           errors='backslashreplace')
    except IOError as e:
        func_call = "open('%s', 'a')" % elogfilename
        if e.errno == errno.EACCES:
            raise portage.exception.PermissionDenied(func_call)
        elif e.errno == errno.EPERM:
            raise portage.exception.OperationNotPermitted(func_call)
        elif e.errno == errno.EROFS:
            raise portage.exception.ReadOnlyFileSystem(func_call)
        else:
            raise

    # Copy group permission bits from parent directory.
    elogdir_st = os.stat(elogdir)
    elogdir_gid = elogdir_st.st_gid
    elogdir_grp_mode = 0o060 & elogdir_st.st_mode

    # Copy the uid from the parent directory if we have privileges
    # to do so, for compatibility with our default logrotate
    # config (see bug 378451). With the "su portage portage"
    # directive and logrotate-3.8.0, logrotate's chown call during
    # the compression phase will only succeed if the log file's uid
    # is portage_uid.
    logfile_uid = -1
    if portage.data.secpass >= 2:
        logfile_uid = elogdir_st.st_uid
    apply_permissions(elogfilename,
                      uid=logfile_uid,
                      gid=elogdir_gid,
                      mode=elogdir_grp_mode,
                      mask=0)

    time_fmt = "%Y-%m-%d %H:%M:%S %Z"
    if sys.hexversion < 0x3000000:
        time_fmt = _unicode_encode(time_fmt)
    time_str = time.strftime(time_fmt, time.localtime(time.time()))
    # Avoid potential UnicodeDecodeError in Python 2, since strftime
    # returns bytes in Python 2, and %Z may contain non-ascii chars.
    time_str = _unicode_decode(time_str,
                               encoding=_encodings['content'],
                               errors='replace')
    elogfile.write(
        _(">>> Messages generated by process "
          "%(pid)d on %(time)s for package %(pkg)s:\n\n") % {
              "pid": os.getpid(),
              "time": time_str,
              "pkg": key
          })
    elogfile.write(_unicode_decode(fulltext))
    elogfile.write("\n")
    elogfile.close()

    return elogfilename
Beispiel #35
0
    def _parse(paths, prepos, ignored_map, ignored_location_map, local_config,
               portdir):
        """Parse files in paths to load config"""
        parser = SafeConfigParser()

        # use read_file/readfp in order to control decoding of unicode
        try:
            # Python >=3.2
            read_file = parser.read_file
            source_kwarg = 'source'
        except AttributeError:
            read_file = parser.readfp
            source_kwarg = 'filename'

        recursive_paths = []
        for p in paths:
            if isinstance(p, basestring):
                recursive_paths.extend(_recursive_file_list(p))
            else:
                recursive_paths.append(p)

        for p in recursive_paths:
            if isinstance(p, basestring):
                f = None
                try:
                    f = io.open(_unicode_encode(p,
                                                encoding=_encodings['fs'],
                                                errors='strict'),
                                mode='r',
                                encoding=_encodings['repo.content'],
                                errors='replace')
                except EnvironmentError:
                    pass
                else:
                    # The 'source' keyword argument is needed since otherwise
                    # ConfigParser in Python <3.3.3 may throw a TypeError
                    # because it assumes that f.name is a native string rather
                    # than binary when constructing error messages.
                    kwargs = {source_kwarg: p}
                    read_file(f, **portage._native_kwargs(kwargs))
                finally:
                    if f is not None:
                        f.close()
            elif isinstance(p, io.StringIO):
                kwargs = {source_kwarg: "<io.StringIO>"}
                read_file(p, **portage._native_kwargs(kwargs))
            else:
                raise TypeError(
                    "Unsupported type %r of element %r of 'paths' argument" %
                    (type(p), p))

        prepos['DEFAULT'] = RepoConfig("DEFAULT",
                                       parser.defaults(),
                                       local_config=local_config)

        for sname in parser.sections():
            optdict = {}
            for oname in parser.options(sname):
                optdict[oname] = parser.get(sname, oname)

            repo = RepoConfig(sname, optdict, local_config=local_config)

            if repo.sync_type is not None and repo.sync_uri is None:
                writemsg_level("!!! %s\n" % _(
                    "Repository '%s' has sync-type attribute, but is missing sync-uri attribute"
                ) % sname,
                               level=logging.ERROR,
                               noiselevel=-1)
                continue

            if repo.sync_uri is not None and repo.sync_type is None:
                writemsg_level("!!! %s\n" % _(
                    "Repository '%s' has sync-uri attribute, but is missing sync-type attribute"
                ) % sname,
                               level=logging.ERROR,
                               noiselevel=-1)
                continue

            if repo.sync_type not in (None, "cvs", "git", "rsync"):
                writemsg_level("!!! %s\n" % _(
                    "Repository '%s' has sync-type attribute set to unsupported value: '%s'"
                ) % (sname, repo.sync_type),
                               level=logging.ERROR,
                               noiselevel=-1)
                continue

            if repo.sync_type == "cvs" and repo.sync_cvs_repo is None:
                writemsg_level("!!! %s\n" % _(
                    "Repository '%s' has sync-type=cvs, but is missing sync-cvs-repo attribute"
                ) % sname,
                               level=logging.ERROR,
                               noiselevel=-1)
                continue

            # For backward compatibility with locations set via PORTDIR and
            # PORTDIR_OVERLAY, delay validation of the location and repo.name
            # until after PORTDIR and PORTDIR_OVERLAY have been processed.
            prepos[sname] = repo
Beispiel #36
0
 def lchown(*pargs, **kwargs):
     writemsg(colorize("BAD", "!!!") +
              _(" It seems that os.lchown does not"
                " exist.  Please rebuild python.\n"),
              noiselevel=-1)
Beispiel #37
0
def unmerge(root_config, myopts, unmerge_action,
	unmerge_files, ldpath_mtimes, autoclean=0,
	clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
	scheduler=None, writemsg_level=portage.util.writemsg_level):
	"""
	Returns os.EX_OK if no errors occur, 1 if an error occurs, and
	130 if interrupted due to a 'no' answer for --ask.
	"""

	if clean_world:
		clean_world = myopts.get('--deselect') != 'n'

	rval, pkgmap = _unmerge_display(root_config, myopts,
		unmerge_action, unmerge_files,
		clean_delay=clean_delay, ordered=ordered,
		writemsg_level=writemsg_level)

	if rval != os.EX_OK:
		return rval

	enter_invalid = '--ask-enter-invalid' in myopts
	vartree = root_config.trees["vartree"]
	sets = root_config.sets
	settings = root_config.settings
	mysettings = portage.config(clone=settings)
	xterm_titles = "notitles" not in settings.features

	if "--pretend" in myopts:
		#we're done... return
		return os.EX_OK
	if "--ask" in myopts:
		uq = UserQuery(myopts)
		if uq.query("Would you like to unmerge these packages?",
			enter_invalid) == "No":
			# enter pretend mode for correct formatting of results
			myopts["--pretend"] = True
			print()
			print("Quitting.")
			print()
			return 128 + signal.SIGINT

	if not vartree.dbapi.writable:
		writemsg_level("!!! %s\n" %
			_("Read-only file system: %s") % vartree.dbapi._dbroot,
			level=logging.ERROR, noiselevel=-1)
		return 1

	#the real unmerging begins, after a short delay unless we're raging....
	if not unmerge_action == "rage-clean" and clean_delay and not autoclean:
		countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")

	all_selected = set()
	all_selected.update(*[x["selected"] for x in pkgmap])

	# Set counter variables
	curval = 1
	maxval = len(all_selected)

	for x in range(len(pkgmap)):
		for y in pkgmap[x]["selected"]:
			emergelog(xterm_titles, "=== Unmerging... ("+y+")")
			message = ">>> Unmerging ({0} of {1}) {2}...\n".format(
				colorize("MERGE_LIST_PROGRESS", str(curval)),
				colorize("MERGE_LIST_PROGRESS", str(maxval)),
				y)
			writemsg_level(message, noiselevel=-1)
			curval += 1

			mysplit = y.split("/")
			#unmerge...
			retval = portage.unmerge(mysplit[0], mysplit[1],
				settings=mysettings,
				vartree=vartree, ldpath_mtimes=ldpath_mtimes,
				scheduler=scheduler)

			if retval != os.EX_OK:
				emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
				if raise_on_error:
					raise UninstallFailure(retval)
				sys.exit(retval)
			else:
				if clean_world and hasattr(sets["selected"], "cleanPackage")\
						and hasattr(sets["selected"], "lock"):
					sets["selected"].lock()
					if hasattr(sets["selected"], "load"):
						sets["selected"].load()
					sets["selected"].cleanPackage(vartree.dbapi, y)
					sets["selected"].unlock()
				emergelog(xterm_titles, " >>> unmerge success: "+y)

	if clean_world and hasattr(sets["selected"], "remove")\
			and hasattr(sets["selected"], "lock"):
		sets["selected"].lock()
		# load is called inside remove()
		for s in root_config.setconfig.active:
			sets["selected"].remove(SETPREFIX + s)
		sets["selected"].unlock()

	return os.EX_OK
Beispiel #38
0
    def _start(self):

        need_builddir = self.phase not in self._phases_without_builddir

        # This can happen if the pre-clean phase triggers
        # die_hooks for some reason, and PORTAGE_BUILDDIR
        # doesn't exist yet.
        if need_builddir and \
         not os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
            msg = _("The ebuild phase '%s' has been aborted "
            "since PORTAGE_BUILDDIR does not exist: '%s'") % \
            (self.phase, self.settings['PORTAGE_BUILDDIR'])
            self._eerror(textwrap.wrap(msg, 72))
            self.returncode = 1
            self._async_wait()
            return

        # Check if the cgroup hierarchy is in place. If it's not, mount it.
        if (os.geteuid() == 0 and platform.system() == 'Linux'
                and 'cgroup' in self.settings.features
                and self.phase not in _global_pid_phases):
            cgroup_root = '/sys/fs/cgroup'
            cgroup_portage = os.path.join(cgroup_root, 'portage')

            try:
                # cgroup tmpfs
                if not os.path.ismount(cgroup_root):
                    # we expect /sys/fs to be there already
                    if not os.path.isdir(cgroup_root):
                        os.mkdir(cgroup_root, 0o755)
                    subprocess.check_call([
                        'mount', '-t', 'tmpfs', '-o',
                        'rw,nosuid,nodev,noexec,mode=0755', 'tmpfs',
                        cgroup_root
                    ])

                # portage subsystem
                if not os.path.ismount(cgroup_portage):
                    if not os.path.isdir(cgroup_portage):
                        os.mkdir(cgroup_portage, 0o755)
                    subprocess.check_call([
                        'mount', '-t', 'cgroup', '-o',
                        'rw,nosuid,nodev,noexec,none,name=portage', 'tmpfs',
                        cgroup_portage
                    ])
                    with open(os.path.join(cgroup_portage, 'release_agent'),
                              'w') as f:
                        f.write(
                            os.path.join(self.settings['PORTAGE_BIN_PATH'],
                                         'cgroup-release-agent'))
                    with open(
                            os.path.join(cgroup_portage, 'notify_on_release'),
                            'w') as f:
                        f.write('1')
                else:
                    # Update release_agent if it no longer exists, because
                    # it refers to a temporary path when portage is updating
                    # itself.
                    release_agent = os.path.join(cgroup_portage,
                                                 'release_agent')
                    try:
                        with open(release_agent) as f:
                            release_agent_path = f.readline().rstrip('\n')
                    except EnvironmentError:
                        release_agent_path = None

                    if (release_agent_path is None
                            or not os.path.exists(release_agent_path)):
                        with open(release_agent, 'w') as f:
                            f.write(
                                os.path.join(self.settings['PORTAGE_BIN_PATH'],
                                             'cgroup-release-agent'))

                cgroup_path = tempfile.mkdtemp(
                    dir=cgroup_portage,
                    prefix='%s:%s.' %
                    (self.settings["CATEGORY"], self.settings["PF"]))
            except (subprocess.CalledProcessError, OSError):
                pass
            else:
                self.cgroup = cgroup_path

        if self.background:
            # Automatically prevent color codes from showing up in logs,
            # since we're not displaying to a terminal anyway.
            self.settings['NOCOLOR'] = 'true'

        start_ipc_daemon = False
        if self._enable_ipc_daemon:
            self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
            if self.phase not in self._phases_without_builddir:
                start_ipc_daemon = True
                if 'PORTAGE_BUILDDIR_LOCKED' not in self.settings:
                    self._build_dir = EbuildBuildDir(scheduler=self.scheduler,
                                                     settings=self.settings)
                    self._start_future = self._build_dir.async_lock()
                    self._start_future.add_done_callback(
                        functools.partial(self._start_post_builddir_lock,
                                          start_ipc_daemon=start_ipc_daemon))
                    return
            else:
                self.settings.pop('PORTAGE_IPC_DAEMON', None)
        else:
            # Since the IPC daemon is disabled, use a simple tempfile based
            # approach to detect unexpected exit like in bug #190128.
            self.settings.pop('PORTAGE_IPC_DAEMON', None)
            if self.phase not in self._phases_without_builddir:
                exit_file = os.path.join(self.settings['PORTAGE_BUILDDIR'],
                                         '.exit_status')
                self.settings['PORTAGE_EBUILD_EXIT_FILE'] = exit_file
                try:
                    os.unlink(exit_file)
                except OSError:
                    if os.path.exists(exit_file):
                        # make sure it doesn't exist
                        raise
            else:
                self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)

        self._start_post_builddir_lock(start_ipc_daemon=start_ipc_daemon)
Beispiel #39
0
def verify_all(filename, mydict, calc_prelink=0, strict=0):
    """
	Verify all checksums against a file.

	@param filename: File to run the checksums against
	@type filename: String
	@param calc_prelink: Whether or not to reverse prelink before running the checksum
	@type calc_prelink: Integer
	@param strict: Enable/Disable strict checking (which stops exactly at a checksum failure and throws an exception)
	@type strict: Integer
	@rtype: Tuple
	@return: Result of the checks and possible message:
		1) If size fails, False, and a tuple containing a message, the given size, and the actual size
		2) If there is an os error, False, and a tuple containing the system error followed by 2 nulls
		3) If a checksum fails, False and a tuple containing a message, the given hash, and the actual hash
		4) If all checks succeed, return True and a fake reason
	"""
    # Dict relates to single file only.
    # returns: (passed,reason)
    file_is_ok = True
    reason = "Reason unknown"
    try:
        mysize = os.stat(filename)[stat.ST_SIZE]
        if mydict.get("size") is not None and mydict["size"] != mysize:
            return False, (_("Filesize does not match recorded size"), mysize,
                           mydict["size"])
    except OSError as e:
        if e.errno == errno.ENOENT:
            raise portage.exception.FileNotFound(filename)
        return False, (str(e), None, None)

    verifiable_hash_types = set(mydict).intersection(hashfunc_map)
    verifiable_hash_types.discard("size")
    if not verifiable_hash_types:
        expected = set(hashfunc_map)
        expected.discard("size")
        expected = list(expected)
        expected.sort()
        expected = " ".join(expected)
        got = set(mydict)
        got.discard("size")
        got = list(got)
        got.sort()
        got = " ".join(got)
        return False, (_("Insufficient data for checksum verification"), got,
                       expected)

    for x in sorted(mydict):
        if x == "size":
            continue
        elif x in hashfunc_map:
            myhash = perform_checksum(filename, x,
                                      calc_prelink=calc_prelink)[0]
            if mydict[x] != myhash:
                if strict:
                    raise portage.exception.DigestException(
                     ("Failed to verify '$(file)s' on " + \
                     "checksum type '%(type)s'") % \
                     {"file" : filename, "type" : x})
                else:
                    file_is_ok = False
                    reason = (("Failed on %s verification" % x), myhash,
                              mydict[x])
                    break

    return file_is_ok, reason
Beispiel #40
0
    def _read_dirVirtuals(self, profiles):
        """
        Read the 'virtuals' file in all profiles.
        """
        virtuals_list = []
        for x in profiles:
            virtuals_file = os.path.join(x, "virtuals")
            virtuals_dict = grabdict(virtuals_file)
            atoms_dict = {}
            for k, v in virtuals_dict.items():
                try:
                    virt_atom = Atom(k)
                except InvalidAtom:
                    virt_atom = None
                else:
                    if virt_atom.blocker or str(virt_atom) != str(
                            virt_atom.cp):
                        virt_atom = None
                if virt_atom is None:
                    writemsg(
                        _("--- Invalid virtuals atom in %s: %s\n") %
                        (virtuals_file, k),
                        noiselevel=-1,
                    )
                    continue
                providers = []
                for atom in v:
                    atom_orig = atom
                    if atom[:1] == "-":
                        # allow incrementals
                        atom = atom[1:]
                    try:
                        atom = Atom(atom)
                    except InvalidAtom:
                        atom = None
                    else:
                        if atom.blocker:
                            atom = None
                    if atom is None:
                        writemsg(
                            _("--- Invalid atom in %s: %s\n") %
                            (virtuals_file, atom_orig),
                            noiselevel=-1,
                        )
                    else:
                        if atom_orig == str(atom):
                            # normal atom, so return as Atom instance
                            providers.append(atom)
                        else:
                            # atom has special prefix, so return as string
                            providers.append(atom_orig)
                if providers:
                    atoms_dict[virt_atom] = providers
            if atoms_dict:
                virtuals_list.append(atoms_dict)

        self._dirVirtuals = stack_dictlist(virtuals_list, incremental=True)

        for virt in self._dirVirtuals:
            # Preference for virtuals decreases from left to right.
            self._dirVirtuals[virt].reverse()
Beispiel #41
0
def parse_updates(mycontent):
    """Valid updates are returned as a list of split update commands."""
    eapi_attrs = _get_eapi_attrs(None)
    slot_re = _get_slot_re(eapi_attrs)
    myupd = []
    errors = []
    mylines = mycontent.splitlines()
    for myline in mylines:
        mysplit = myline.split()
        if len(mysplit) == 0:
            continue
        if mysplit[0] not in ("move", "slotmove"):
            errors.append(_("ERROR: Update type not recognized '%s'") % myline)
            continue
        if mysplit[0] == "move":
            if len(mysplit) != 3:
                errors.append(_("ERROR: Update command invalid '%s'") % myline)
                continue
            valid = True
            for i in (1, 2):
                try:
                    atom = Atom(mysplit[i])
                except InvalidAtom:
                    atom = None
                else:
                    if atom.blocker or atom != atom.cp:
                        atom = None
                if atom is not None:
                    mysplit[i] = atom
                else:
                    errors.append(
                        _("ERROR: Malformed update entry '%s'") % myline)
                    valid = False
                    break
            if not valid:
                continue

        if mysplit[0] == "slotmove":
            if len(mysplit) != 4:
                errors.append(_("ERROR: Update command invalid '%s'") % myline)
                continue
            pkg, origslot, newslot = mysplit[1], mysplit[2], mysplit[3]
            try:
                atom = Atom(pkg)
            except InvalidAtom:
                atom = None
            else:
                if atom.blocker:
                    atom = None
            if atom is not None:
                mysplit[1] = atom
            else:
                errors.append(_("ERROR: Malformed update entry '%s'") % myline)
                continue

            invalid_slot = False
            for slot in (origslot, newslot):
                m = slot_re.match(slot)
                if m is None:
                    invalid_slot = True
                    break
                if "/" in slot:
                    # EAPI 4-slot-abi style SLOT is currently not supported.
                    invalid_slot = True
                    break

            if invalid_slot:
                errors.append(_("ERROR: Malformed update entry '%s'") % myline)
                continue

        # The list of valid updates is filtered by continue statements above.
        myupd.append(mysplit)
    return myupd, errors
Beispiel #42
0
def update_config_files(config_root,
                        protect,
                        protect_mask,
                        update_iter,
                        match_callback=None,
                        case_insensitive=False):
    """Perform global updates on /etc/portage/package.*, /etc/portage/profile/package.*,
	/etc/portage/profile/packages and /etc/portage/sets.
	config_root - location of files to update
	protect - list of paths from CONFIG_PROTECT
	protect_mask - list of paths from CONFIG_PROTECT_MASK
	update_iter - list of update commands as returned from parse_updates(),
		or dict of {repo_name: list}
	match_callback - a callback which will be called with three arguments:
		match_callback(repo_name, old_atom, new_atom)
	and should return boolean value determining whether to perform the update"""

    repo_dict = None
    if isinstance(update_iter, dict):
        repo_dict = update_iter
    if match_callback is None:

        def match_callback(repo_name, atoma, atomb):
            return True

    config_root = normalize_path(config_root)
    update_files = {}
    file_contents = {}
    myxfiles = [
        "package.accept_keywords", "package.env", "package.keywords",
        "package.license", "package.mask", "package.properties",
        "package.unmask", "package.use", "sets"
    ]
    myxfiles += [
        os.path.join("profile", x)
        for x in ("packages", "package.accept_keywords", "package.keywords",
                  "package.mask", "package.unmask", "package.use",
                  "package.use.force", "package.use.mask",
                  "package.use.stable.force", "package.use.stable.mask")
    ]
    abs_user_config = os.path.join(config_root, USER_CONFIG_PATH)
    recursivefiles = []
    for x in myxfiles:
        config_file = os.path.join(abs_user_config, x)
        if os.path.isdir(config_file):
            for parent, dirs, files in os.walk(config_file):
                try:
                    parent = _unicode_decode(parent,
                                             encoding=_encodings['fs'],
                                             errors='strict')
                except UnicodeDecodeError:
                    continue
                for y_enc in list(dirs):
                    try:
                        y = _unicode_decode(y_enc,
                                            encoding=_encodings['fs'],
                                            errors='strict')
                    except UnicodeDecodeError:
                        dirs.remove(y_enc)
                        continue
                    if y.startswith(".") or y in VCS_DIRS:
                        dirs.remove(y_enc)
                for y in files:
                    try:
                        y = _unicode_decode(y,
                                            encoding=_encodings['fs'],
                                            errors='strict')
                    except UnicodeDecodeError:
                        continue
                    if y.startswith("."):
                        continue
                    recursivefiles.append(
                        os.path.join(parent, y)[len(abs_user_config) + 1:])
        else:
            recursivefiles.append(x)
    myxfiles = recursivefiles
    for x in myxfiles:
        f = None
        try:
            f = io.open(_unicode_encode(os.path.join(abs_user_config, x),
                                        encoding=_encodings['fs'],
                                        errors='strict'),
                        mode='r',
                        encoding=_encodings['content'],
                        errors='replace')
            file_contents[x] = f.readlines()
        except IOError:
            continue
        finally:
            if f is not None:
                f.close()

    ignore_line_re = re.compile(r'^#|^\s*$')
    if repo_dict is None:
        update_items = [(None, update_iter)]
    else:
        update_items = [x for x in repo_dict.items() if x[0] != 'DEFAULT']
    for repo_name, update_iter in update_items:
        for update_cmd in update_iter:
            for x, contents in file_contents.items():
                skip_next = False
                for pos, line in enumerate(contents):
                    if skip_next:
                        skip_next = False
                        continue
                    if ignore_line_re.match(line):
                        continue
                    atom = line.split()[0]
                    if atom[:1] == "-":
                        # package.mask supports incrementals
                        atom = atom[1:]
                    if atom[:1] == "*":
                        # packages file supports "*"-prefixed atoms as indication of system packages.
                        atom = atom[1:]
                    if not isvalidatom(atom):
                        continue
                    new_atom = update_dbentry(update_cmd, atom)
                    if atom != new_atom:
                        if match_callback(repo_name, atom, new_atom):
                            # add a comment with the update command, so
                            # the user can clearly see what happened
                            contents[pos] = "# %s\n" % \
                             " ".join("%s" % (x,) for x in update_cmd)
                            contents.insert(
                                pos + 1,
                                line.replace("%s" % (atom, ),
                                             "%s" % (new_atom, ), 1))
                            # we've inserted an additional line, so we need to
                            # skip it when it's reached in the next iteration
                            skip_next = True
                            update_files[x] = 1
                            sys.stdout.write("p")
                            sys.stdout.flush()

    protect_obj = ConfigProtect(config_root,
                                protect,
                                protect_mask,
                                case_insensitive=case_insensitive)
    for x in update_files:
        updating_file = os.path.join(abs_user_config, x)
        if protect_obj.isprotected(updating_file):
            updating_file = new_protect_filename(updating_file)
        try:
            write_atomic(updating_file, "".join(file_contents[x]))
        except PortageException as e:
            writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
            writemsg(_("!!! An error occurred while updating a config file:") + \
             " '%s'\n" % updating_file, noiselevel=-1)
            continue
Beispiel #43
0
def create_conn(baseurl, conn=None):
    """(baseurl,conn) --- Takes a protocol://site:port/address url, and an
	optional connection. If connection is already active, it is passed on.
	baseurl is reduced to address and is returned in tuple (conn,address)"""

    parts = baseurl.split("://", 1)
    if len(parts) != 2:
        raise ValueError(
            _("Provided URI does not "
              "contain protocol identifier. '%s'") % baseurl)
    protocol, url_parts = parts
    del parts

    url_parts = url_parts.split("/")
    host = url_parts[0]
    if len(url_parts) < 2:
        address = "/"
    else:
        address = "/" + "/".join(url_parts[1:])
    del url_parts

    userpass_host = host.split("@", 1)
    if len(userpass_host) == 1:
        host = userpass_host[0]
        userpass = ["anonymous"]
    else:
        host = userpass_host[1]
        userpass = userpass_host[0].split(":")
    del userpass_host

    if len(userpass) > 2:
        raise ValueError(_("Unable to interpret username/password provided."))
    elif len(userpass) == 2:
        username = userpass[0]
        password = userpass[1]
    elif len(userpass) == 1:
        username = userpass[0]
        password = None
    del userpass

    http_headers = {}
    http_params = {}
    if username and password:
        try:
            encodebytes = base64.encodebytes
        except AttributeError:
            # Python 2
            encodebytes = base64.encodestring
        http_headers = {
         b"Authorization": "Basic %s" % \
         encodebytes(_unicode_encode("%s:%s" % (username, password))).replace(
             b"\012",
             b""
           ),
        }

    if not conn:
        if protocol == "https":
            # Use local import since https typically isn't needed, and
            # this way we can usually avoid triggering the global scope
            # http.client ImportError handler (like during stage1 -> stage2
            # builds where USE=ssl is disabled for python).
            try:
                try:
                    from http.client import HTTPSConnection as http_client_HTTPSConnection
                except ImportError:
                    from httplib import HTTPSConnection as http_client_HTTPSConnection
            except ImportError:
                raise NotImplementedError(
                    _("python must have ssl enabled for https support"))
            conn = http_client_HTTPSConnection(host)
        elif protocol == "http":
            conn = http_client_HTTPConnection(host)
        elif protocol == "ftp":
            passive = 1
            if (host[-1] == "*"):
                passive = 0
                host = host[:-1]
            conn = ftplib.FTP(host)
            if password:
                conn.login(username, password)
            else:
                sys.stderr.write(colorize("WARN",
                 _(" * No password provided for username"))+" '%s'" % \
                 (username,) + "\n\n")
                conn.login(username)
            conn.set_pasv(passive)
            conn.set_debuglevel(0)
        elif protocol == "sftp":
            try:
                import paramiko
            except ImportError:
                raise NotImplementedError(
                    _("paramiko must be installed for sftp support"))
            t = paramiko.Transport(host)
            t.connect(username=username, password=password)
            conn = paramiko.SFTPClient.from_transport(t)
        else:
            raise NotImplementedError(
                _("%s is not a supported protocol.") % protocol)

    return (conn, protocol, address, http_params, http_headers)
Beispiel #44
0
    def __init__(self,
                 repositories,
                 profiles,
                 abs_user_config,
                 user_config=True,
                 strict_umatched_removal=False):
        self._punmaskdict = ExtendedAtomDict(list)
        self._pmaskdict = ExtendedAtomDict(list)
        # Preserves atoms that are eliminated by negative
        # incrementals in user_pkgmasklines.
        self._pmaskdict_raw = ExtendedAtomDict(list)

        #Read profile/package.mask from every repo.
        #Repositories inherit masks from their parent profiles and
        #are able to remove mask from them with -atoms.
        #Such a removal affects only the current repo, but not the parent.
        #Add ::repo specs to every atom to make sure atoms only affect
        #packages from the current repo.

        # Cache the repository-wide package.mask files as a particular
        # repo may be often referenced by others as the master.
        pmask_cache = {}

        def grab_pmask(loc, repo_config):
            if loc not in pmask_cache:
                path = os.path.join(loc, 'profiles', 'package.mask')
                pmask_cache[loc] = grabfile_package(
                    path,
                    recursive=repo_config.portage1_profiles,
                    remember_source_file=True,
                    verify_eapi=True,
                    eapi_default=repo_config.eapi,
                    allow_build_id=("build-id" in repo_config.profile_formats))
                if repo_config.portage1_profiles_compat and os.path.isdir(
                        path):
                    warnings.warn(
                        _("Repository '%(repo_name)s' is implicitly using "
                          "'portage-1' profile format in its profiles/package.mask, but "
                          "the repository profiles are not marked as that format.  This will break "
                          "in the future.  Please either convert the following paths "
                          "to files, or add\nprofile-formats = portage-1\nto the "
                          "repository's layout.conf.\n") %
                        dict(repo_name=repo_config.name))

            return pmask_cache[loc]

        repo_pkgmasklines = []
        for repo in repositories.repos_with_profiles():
            lines = []
            repo_lines = grab_pmask(repo.location, repo)
            removals = frozenset(line[0][1:] for line in repo_lines
                                 if line[0][:1] == "-")
            matched_removals = set()
            for master in repo.masters:
                master_lines = grab_pmask(master.location, master)
                for line in master_lines:
                    if line[0] in removals:
                        matched_removals.add(line[0])
                # Since we don't stack masters recursively, there aren't any
                # atoms earlier in the stack to be matched by negative atoms in
                # master_lines. Also, repo_lines may contain negative atoms
                # that are intended to negate atoms from a different master
                # than the one with which we are currently stacking. Therefore,
                # we disable warn_for_unmatched_removal here (see bug #386569).
                lines.append(
                    stack_lists([master_lines, repo_lines],
                                incremental=1,
                                remember_source_file=True,
                                warn_for_unmatched_removal=False))

            # It's safe to warn for unmatched removal if masters have not
            # been overridden by the user, which is guaranteed when
            # user_config is false (when called by repoman).
            if repo.masters:
                unmatched_removals = removals.difference(matched_removals)
                if unmatched_removals and not user_config:
                    source_file = os.path.join(repo.location, "profiles",
                                               "package.mask")
                    unmatched_removals = list(unmatched_removals)
                    if len(unmatched_removals) > 3:
                        writemsg(_(
                            "--- Unmatched removal atoms in %s: %s and %s more\n"
                        ) % (source_file, ", ".join(
                            "-" + x for x in unmatched_removals[:3]),
                             len(unmatched_removals) - 3),
                                 noiselevel=-1)
                    else:
                        writemsg(
                            _("--- Unmatched removal atom(s) in %s: %s\n") %
                            (source_file, ", ".join(
                                "-" + x for x in unmatched_removals)),
                            noiselevel=-1)

            else:
                lines.append(
                    stack_lists([repo_lines],
                                incremental=1,
                                remember_source_file=True,
                                warn_for_unmatched_removal=not user_config,
                                strict_warn_for_unmatched_removal=
                                strict_umatched_removal))
            repo_pkgmasklines.extend(
                append_repo(stack_lists(lines),
                            repo.name,
                            remember_source_file=True))

        repo_pkgunmasklines = []
        for repo in repositories.repos_with_profiles():
            if not repo.portage1_profiles:
                continue
            repo_lines = grabfile_package(os.path.join(repo.location, "profiles", "package.unmask"), \
             recursive=1, remember_source_file=True,
             verify_eapi=True, eapi_default=repo.eapi,
             allow_build_id=("build-id" in repo.profile_formats))
            lines = stack_lists([repo_lines], incremental=1, \
             remember_source_file=True, warn_for_unmatched_removal=True,
             strict_warn_for_unmatched_removal=strict_umatched_removal)
            repo_pkgunmasklines.extend(
                append_repo(lines, repo.name, remember_source_file=True))

        #Read package.mask from the user's profile. Stack them in the end
        #to allow profiles to override masks from their parent profiles.
        profile_pkgmasklines = []
        profile_pkgunmasklines = []
        for x in profiles:
            profile_pkgmasklines.append(
                grabfile_package(os.path.join(x.location, "package.mask"),
                                 recursive=x.portage1_directories,
                                 remember_source_file=True,
                                 verify_eapi=True,
                                 eapi=x.eapi,
                                 eapi_default=None,
                                 allow_build_id=x.allow_build_id))
            if x.portage1_directories:
                profile_pkgunmasklines.append(
                    grabfile_package(os.path.join(x.location,
                                                  "package.unmask"),
                                     recursive=x.portage1_directories,
                                     remember_source_file=True,
                                     verify_eapi=True,
                                     eapi=x.eapi,
                                     eapi_default=None,
                                     allow_build_id=x.allow_build_id))
        profile_pkgmasklines = stack_lists(profile_pkgmasklines, incremental=1, \
         remember_source_file=True, warn_for_unmatched_removal=True,
         strict_warn_for_unmatched_removal=strict_umatched_removal)
        profile_pkgunmasklines = stack_lists(profile_pkgunmasklines, incremental=1, \
         remember_source_file=True, warn_for_unmatched_removal=True,
         strict_warn_for_unmatched_removal=strict_umatched_removal)

        #Read /etc/portage/package.mask. Don't stack it to allow the user to
        #remove mask atoms from everywhere with -atoms.
        user_pkgmasklines = []
        user_pkgunmasklines = []
        if user_config:
            user_pkgmasklines = grabfile_package(
             os.path.join(abs_user_config, "package.mask"), recursive=1, \
             allow_wildcard=True, allow_repo=True,
             remember_source_file=True, verify_eapi=False,
             allow_build_id=True)
            user_pkgunmasklines = grabfile_package(
             os.path.join(abs_user_config, "package.unmask"), recursive=1, \
             allow_wildcard=True, allow_repo=True,
             remember_source_file=True, verify_eapi=False,
             allow_build_id=True)

        #Stack everything together. At this point, only user_pkgmasklines may contain -atoms.
        #Don't warn for unmatched -atoms here, since we don't do it for any other user config file.
        raw_pkgmasklines = stack_lists([repo_pkgmasklines, profile_pkgmasklines], \
         incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
        pkgmasklines = stack_lists([repo_pkgmasklines, profile_pkgmasklines, user_pkgmasklines], \
         incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
        pkgunmasklines = stack_lists([repo_pkgunmasklines, profile_pkgunmasklines, user_pkgunmasklines], \
         incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)

        for x, source_file in raw_pkgmasklines:
            self._pmaskdict_raw.setdefault(x.cp, []).append(x)

        for x, source_file in pkgmasklines:
            self._pmaskdict.setdefault(x.cp, []).append(x)

        for x, source_file in pkgunmasklines:
            self._punmaskdict.setdefault(x.cp, []).append(x)

        for d in (self._pmaskdict_raw, self._pmaskdict, self._punmaskdict):
            for k, v in d.items():
                d[k] = tuple(v)
Beispiel #45
0
def _get_global(k):
    if k in _initialized_globals:
        return globals()[k]

    if k == 'secpass':

        unprivileged = False
        if hasattr(portage, 'settings'):
            unprivileged = "unprivileged" in portage.settings.features
        else:
            # The config class has equivalent code, but we also need to
            # do it here if _disable_legacy_globals() has been called.
            eroot_or_parent = first_existing(
                os.path.join(_target_root(),
                             _target_eprefix().lstrip(os.sep)))
            try:
                eroot_st = os.stat(eroot_or_parent)
            except OSError:
                pass
            else:
                unprivileged = _unprivileged_mode(eroot_or_parent, eroot_st)

        v = 0
        if uid == 0:
            v = 2
        elif unprivileged:
            v = 2
        elif portage_gid in os.getgroups():
            v = 1

    elif k in ('portage_gid', 'portage_uid'):

        #Discover the uid and gid of the portage user/group
        keyerror = False
        try:
            portage_uid = pwd.getpwnam(_get_global('_portage_username')).pw_uid
        except KeyError:
            keyerror = True
            portage_uid = 0

        try:
            portage_gid = grp.getgrnam(_get_global('_portage_grpname')).gr_gid
        except KeyError:
            keyerror = True
            portage_gid = 0

        # Suppress this error message if both PORTAGE_GRPNAME and
        # PORTAGE_USERNAME are set to "root", for things like
        # Android (see bug #454060).
        if keyerror and not (_get_global('_portage_username') == "root"
                             and _get_global('_portage_grpname') == "root"):
            writemsg(colorize(
                "BAD", _("portage: 'portage' user or group missing.")) + "\n",
                     noiselevel=-1)
            writemsg(_("         For the defaults, line 1 goes into passwd, "
                       "and 2 into group.\n"),
                     noiselevel=-1)
            writemsg(colorize("GOOD",
             "         portage:x:250:250:portage:/var/tmp/portage:/bin/false") \
             + "\n", noiselevel=-1)
            writemsg(colorize("GOOD", "         portage::250:portage") + "\n",
                     noiselevel=-1)
            portage_group_warning()

        globals()['portage_gid'] = portage_gid
        _initialized_globals.add('portage_gid')
        globals()['portage_uid'] = portage_uid
        _initialized_globals.add('portage_uid')

        if k == 'portage_gid':
            return portage_gid
        elif k == 'portage_uid':
            return portage_uid
        else:
            raise AssertionError('unknown name: %s' % k)

    elif k == 'userpriv_groups':
        v = [_get_global('portage_gid')]
        if secpass >= 2:
            # Get a list of group IDs for the portage user. Do not use
            # grp.getgrall() since it is known to trigger spurious
            # SIGPIPE problems with nss_ldap.
            cmd = ["id", "-G", _portage_username]

            if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
                # Python 3.1 _execvp throws TypeError for non-absolute executable
                # path passed as bytes (see http://bugs.python.org/issue8513).
                fullname = portage.process.find_binary(cmd[0])
                if fullname is None:
                    globals()[k] = v
                    _initialized_globals.add(k)
                    return v
                cmd[0] = fullname

            encoding = portage._encodings['content']
            cmd = [
                portage._unicode_encode(x, encoding=encoding, errors='strict')
                for x in cmd
            ]
            proc = subprocess.Popen(cmd,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.STDOUT)
            myoutput = proc.communicate()[0]
            status = proc.wait()
            if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
                for x in portage._unicode_decode(myoutput,
                                                 encoding=encoding,
                                                 errors='strict').split():
                    try:
                        v.append(int(x))
                    except ValueError:
                        pass
                v = sorted(set(v))

    # Avoid instantiating portage.settings when the desired
    # variable is set in os.environ.
    elif k in ('_portage_grpname', '_portage_username'):
        v = None
        if k == '_portage_grpname':
            env_key = 'PORTAGE_GRPNAME'
        else:
            env_key = 'PORTAGE_USERNAME'

        if env_key in os.environ:
            v = os.environ[env_key]
        elif hasattr(portage, 'settings'):
            v = portage.settings.get(env_key)
        else:
            # The config class has equivalent code, but we also need to
            # do it here if _disable_legacy_globals() has been called.
            eroot_or_parent = first_existing(
                os.path.join(_target_root(),
                             _target_eprefix().lstrip(os.sep)))
            try:
                eroot_st = os.stat(eroot_or_parent)
            except OSError:
                pass
            else:
                if _unprivileged_mode(eroot_or_parent, eroot_st):
                    if k == '_portage_grpname':
                        try:
                            grp_struct = grp.getgrgid(eroot_st.st_gid)
                        except KeyError:
                            pass
                        else:
                            v = grp_struct.gr_name
                    else:
                        try:
                            pwd_struct = pwd.getpwuid(eroot_st.st_uid)
                        except KeyError:
                            pass
                        else:
                            v = pwd_struct.pw_name

        if v is None:
            v = 'portage'
    else:
        raise AssertionError('unknown name: %s' % k)

    globals()[k] = v
    _initialized_globals.add(k)
    return v
Beispiel #46
0
def movefile(src,
             dest,
             newmtime=None,
             sstat=None,
             mysettings=None,
             hardlink_candidates=None,
             encoding=_encodings['fs']):
    """moves a file from src to dest, preserving all permissions and attributes; mtime will
	be preserved even when moving across filesystems.  Returns mtime as integer on success
	and None on failure.  mtime is expressed in seconds in Python <3.3 and nanoseconds in
	Python >=3.3.  Move is atomic."""

    if mysettings is None:
        mysettings = portage.settings

    src_bytes = _unicode_encode(src, encoding=encoding, errors='strict')
    dest_bytes = _unicode_encode(dest, encoding=encoding, errors='strict')
    xattr_enabled = "xattr" in mysettings.features
    selinux_enabled = mysettings.selinux_enabled()
    if selinux_enabled:
        selinux = _unicode_module_wrapper(_selinux, encoding=encoding)
        _copyfile = selinux.copyfile
        _rename = selinux.rename
    else:
        _copyfile = copyfile
        _rename = _os.rename

    lchown = _unicode_func_wrapper(portage.data.lchown, encoding=encoding)
    os = _unicode_module_wrapper(_os,
                                 encoding=encoding,
                                 overrides=_os_overrides)

    try:
        if not sstat:
            sstat = os.lstat(src)

    except SystemExit as e:
        raise
    except Exception as e:
        writemsg("!!! %s\n" % _("Stating source file failed... movefile()"),
                 noiselevel=-1)
        writemsg("!!! %s\n" % (e, ), noiselevel=-1)
        return None

    destexists = 1
    try:
        dstat = os.lstat(dest)
    except (OSError, IOError):
        dstat = os.lstat(os.path.dirname(dest))
        destexists = 0

    if bsd_chflags:
        if destexists and dstat.st_flags != 0:
            bsd_chflags.lchflags(dest, 0)
        # Use normal stat/chflags for the parent since we want to
        # follow any symlinks to the real parent directory.
        pflags = os.stat(os.path.dirname(dest)).st_flags
        if pflags != 0:
            bsd_chflags.chflags(os.path.dirname(dest), 0)

    if destexists:
        if stat.S_ISLNK(dstat[stat.ST_MODE]):
            try:
                os.unlink(dest)
                destexists = 0
            except SystemExit as e:
                raise
            except Exception as e:
                pass

    if stat.S_ISLNK(sstat[stat.ST_MODE]):
        try:
            target = os.readlink(src)
            if mysettings and "D" in mysettings and \
             target.startswith(mysettings["D"]):
                target = target[len(mysettings["D"]) - 1:]
            if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
                os.unlink(dest)
            try:
                if selinux_enabled:
                    selinux.symlink(target, dest, src)
                else:
                    os.symlink(target, dest)
            except OSError as e:
                # Some programs will create symlinks automatically, so we have
                # to tolerate these links being recreated during the merge
                # process. In any case, if the link is pointing at the right
                # place, we're in good shape.
                if e.errno not in (errno.ENOENT, errno.EEXIST) or \
                 target != os.readlink(dest):
                    raise
            lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])

            try:
                _os.unlink(src_bytes)
            except OSError:
                pass

            try:
                os.utime(dest,
                         ns=(sstat.st_mtime_ns, sstat.st_mtime_ns),
                         follow_symlinks=False)
            except NotImplementedError:
                # utimensat() and lutimes() missing in libc.
                return os.stat(dest, follow_symlinks=False).st_mtime_ns
            else:
                return sstat.st_mtime_ns
        except SystemExit as e:
            raise
        except Exception as e:
            writemsg("!!! %s\n" % _("failed to properly create symlink:"),
                     noiselevel=-1)
            writemsg("!!! %s -> %s\n" % (dest, target), noiselevel=-1)
            writemsg("!!! %s\n" % (e, ), noiselevel=-1)
            return None

    hardlinked = False
    # Since identical files might be merged to multiple filesystems,
    # so os.link() calls might fail for some paths, so try them all.
    # For atomic replacement, first create the link as a temp file
    # and them use os.rename() to replace the destination.
    if hardlink_candidates:
        head, tail = os.path.split(dest)
        hardlink_tmp = os.path.join(head, ".%s._portage_merge_.%s" % \
         (tail, portage.getpid()))
        try:
            os.unlink(hardlink_tmp)
        except OSError as e:
            if e.errno != errno.ENOENT:
                writemsg(_("!!! Failed to remove hardlink temp file: %s\n") % \
                 (hardlink_tmp,), noiselevel=-1)
                writemsg("!!! %s\n" % (e, ), noiselevel=-1)
                return None
            del e
        for hardlink_src in hardlink_candidates:
            try:
                os.link(hardlink_src, hardlink_tmp)
            except OSError:
                continue
            else:
                try:
                    os.rename(hardlink_tmp, dest)
                except OSError as e:
                    writemsg(_("!!! Failed to rename %s to %s\n") % \
                     (hardlink_tmp, dest), noiselevel=-1)
                    writemsg("!!! %s\n" % (e, ), noiselevel=-1)
                    return None
                hardlinked = True
                try:
                    _os.unlink(src_bytes)
                except OSError:
                    pass
                break

    renamefailed = 1
    if hardlinked:
        renamefailed = False
    if not hardlinked and (selinux_enabled or sstat.st_dev == dstat.st_dev):
        try:
            if selinux_enabled:
                selinux.rename(src, dest)
            else:
                os.rename(src, dest)
            renamefailed = 0
        except OSError as e:
            if e.errno != errno.EXDEV:
                # Some random error.
                writemsg("!!! %s\n" % _("Failed to move %(src)s to %(dest)s") %
                         {
                             "src": src,
                             "dest": dest
                         },
                         noiselevel=-1)
                writemsg("!!! %s\n" % (e, ), noiselevel=-1)
                return None
            # Invalid cross-device-link 'bind' mounted or actually Cross-Device
    if renamefailed:
        if stat.S_ISREG(sstat[stat.ST_MODE]):
            dest_tmp = dest + "#new"
            dest_tmp_bytes = _unicode_encode(dest_tmp,
                                             encoding=encoding,
                                             errors='strict')
            success = False
            try:  # For safety copy then move it over.
                _copyfile(src_bytes, dest_tmp_bytes)
                _apply_stat(sstat, dest_tmp_bytes)
                if xattr_enabled:
                    try:
                        _copyxattr(src_bytes,
                                   dest_tmp_bytes,
                                   exclude=mysettings.get(
                                       "PORTAGE_XATTR_EXCLUDE", ""))
                    except SystemExit:
                        raise
                    except:
                        msg = _("Failed to copy extended attributes. "
                                "In order to avoid this error, set "
                                "FEATURES=\"-xattr\" in make.conf.")
                        msg = textwrap.wrap(msg, 65)
                        for line in msg:
                            writemsg("!!! %s\n" % (line, ), noiselevel=-1)
                        raise
                _rename(dest_tmp_bytes, dest_bytes)
                _os.unlink(src_bytes)
                success = True
            except Exception as e:
                writemsg("!!! %s\n" % _('copy %(src)s -> %(dest)s failed.') % {
                    "src": src,
                    "dest": dest
                },
                         noiselevel=-1)
                writemsg("!!! %s\n" % (e, ), noiselevel=-1)
                return None
            finally:
                if not success:
                    try:
                        _os.unlink(dest_tmp_bytes)
                    except OSError:
                        pass
        else:
            #we don't yet handle special, so we need to fall back to /bin/mv
            a = spawn([MOVE_BINARY, '-f', src, dest], env=os.environ)
            if a != os.EX_OK:
                writemsg(_("!!! Failed to move special file:\n"),
                         noiselevel=-1)
                writemsg(_("!!! '%(src)s' to '%(dest)s'\n") % \
                 {"src": _unicode_decode(src, encoding=encoding),
                 "dest": _unicode_decode(dest, encoding=encoding)}, noiselevel=-1)
                writemsg("!!! %s\n" % a, noiselevel=-1)
                return None  # failure

    # In Python <3.3 always use stat_obj[stat.ST_MTIME] for the integral timestamp
    # which is returned, since the stat_obj.st_mtime float attribute rounds *up*
    # if the nanosecond part of the timestamp is 999999881 ns or greater.
    try:
        if hardlinked:
            newmtime = os.stat(dest).st_mtime_ns
        else:
            # Note: It is not possible to preserve nanosecond precision
            # (supported in POSIX.1-2008 via utimensat) with the IEEE 754
            # double precision float which only has a 53 bit significand.
            if newmtime is not None:
                os.utime(dest, ns=(newmtime, newmtime))
            else:
                newmtime = sstat.st_mtime_ns
                if renamefailed:
                    # If rename succeeded then timestamps are automatically
                    # preserved with complete precision because the source
                    # and destination inodes are the same. Otherwise, manually
                    # update timestamps with nanosecond precision.
                    os.utime(dest, ns=(newmtime, newmtime))
    except OSError:
        # The utime can fail here with EPERM even though the move succeeded.
        # Instead of failing, use stat to return the mtime if possible.
        try:
            newmtime = os.stat(dest).st_mtime_ns
        except OSError as e:
            writemsg(_("!!! Failed to stat in movefile()\n"), noiselevel=-1)
            writemsg("!!! %s\n" % dest, noiselevel=-1)
            writemsg("!!! %s\n" % str(e), noiselevel=-1)
            return None

    if bsd_chflags:
        # Restore the flags we saved before moving
        if pflags:
            bsd_chflags.chflags(os.path.dirname(dest), pflags)

    return newmtime
Beispiel #47
0
    def __init__(self, paths, settings):
        """Load config from files in paths"""

        prepos = {}
        location_map = {}
        treemap = {}
        ignored_map = {}
        ignored_location_map = {}

        if "PORTAGE_REPOSITORIES" in settings:
            portdir = ""
            portdir_overlay = ""
            portdir_sync = ""
        else:
            portdir = settings.get("PORTDIR", "")
            portdir_overlay = settings.get("PORTDIR_OVERLAY", "")
            portdir_sync = settings.get("SYNC", "")

        try:
            self._parse(paths, prepos, ignored_map, ignored_location_map,
                        settings.local_config, portdir)
        except ConfigParserError as e:
            writemsg(_("!!! Error while reading repo config file: %s\n") % e,
                     noiselevel=-1)
            # The configparser state is unreliable (prone to quirky
            # exceptions) after it has thrown an error, so use empty
            # config and try to fall back to PORTDIR{,_OVERLAY}.
            prepos.clear()
            prepos['DEFAULT'] = RepoConfig('DEFAULT', {},
                                           local_config=settings.local_config)
            location_map.clear()
            treemap.clear()
            ignored_map.clear()
            ignored_location_map.clear()

        default_portdir = os.path.join(os.sep,
                                       settings['EPREFIX'].lstrip(os.sep),
                                       'usr', 'portage')

        # If PORTDIR_OVERLAY contains a repo with the same repo_name as
        # PORTDIR, then PORTDIR is overridden.
        portdir = self._add_repositories(portdir, portdir_overlay, prepos,
                                         ignored_map, ignored_location_map,
                                         settings.local_config,
                                         default_portdir)
        if portdir and portdir.strip():
            portdir = os.path.realpath(portdir)

        ignored_repos = tuple((repo_name, tuple(paths)) \
         for repo_name, paths in ignored_map.items())

        self.missing_repo_names = frozenset(
            repo.location for repo in prepos.values()
            if repo.location is not None and repo.missing_repo_name)

        # Do this before expanding aliases, so that location_map and
        # treemap consistently map unaliased names whenever available.
        for repo_name, repo in list(prepos.items()):
            if repo.location is None:
                if repo_name != 'DEFAULT':
                    # Skip this warning for repoman (bug #474578).
                    if settings.local_config and paths:
                        writemsg_level("!!! %s\n" % _(
                            "Section '%s' in repos.conf is missing location attribute"
                        ) % repo.name,
                                       level=logging.ERROR,
                                       noiselevel=-1)
                    del prepos[repo_name]
                    continue
            else:
                if not portage._sync_mode:
                    if not isdir_raise_eaccess(repo.location):
                        writemsg_level("!!! %s\n" % _(
                            "Section '%s' in repos.conf has location attribute set "
                            "to nonexistent directory: '%s'") %
                                       (repo_name, repo.location),
                                       level=logging.ERROR,
                                       noiselevel=-1)

                        # Ignore missing directory for 'gentoo' so that
                        # first sync with emerge-webrsync is possible.
                        if repo.name != 'gentoo':
                            del prepos[repo_name]
                            continue

                    # After removing support for PORTDIR_OVERLAY, the following check can be:
                    # if repo.missing_repo_name:
                    if repo.missing_repo_name and repo.name != repo_name:
                        writemsg_level("!!! %s\n" % _(
                            "Section '%s' in repos.conf refers to repository "
                            "without repository name set in '%s'") %
                                       (repo_name,
                                        os.path.join(repo.location,
                                                     REPO_NAME_LOC)),
                                       level=logging.ERROR,
                                       noiselevel=-1)
                        del prepos[repo_name]
                        continue

                    if repo.name != repo_name:
                        writemsg_level("!!! %s\n" % _(
                            "Section '%s' in repos.conf has name different "
                            "from repository name '%s' set inside repository")
                                       % (repo_name, repo.name),
                                       level=logging.ERROR,
                                       noiselevel=-1)
                        del prepos[repo_name]
                        continue

                location_map[repo.location] = repo_name
                treemap[repo_name] = repo.location

        # Add alias mappings, but never replace unaliased mappings.
        for repo_name, repo in list(prepos.items()):
            names = set()
            names.add(repo_name)
            if repo.aliases:
                aliases = stack_lists([repo.aliases], incremental=True)
                names.update(aliases)

            for name in names:
                if name in prepos and prepos[name].location is not None:
                    if name == repo_name:
                        # unaliased names already handled earlier
                        continue
                    writemsg_level(_("!!! Repository name or alias '%s', " + \
                     "defined for repository '%s', overrides " + \
                     "existing alias or repository.\n") % (name, repo_name), level=logging.WARNING, noiselevel=-1)
                    # Never replace an unaliased mapping with
                    # an aliased mapping.
                    continue
                prepos[name] = repo
                if repo.location is not None:
                    if repo.location not in location_map:
                        # Never replace an unaliased mapping with
                        # an aliased mapping.
                        location_map[repo.location] = name
                    treemap[name] = repo.location

        main_repo = prepos['DEFAULT'].main_repo
        if main_repo is None or main_repo not in prepos:
            #setting main_repo if it was not set in repos.conf
            main_repo = location_map.get(portdir)
            if main_repo is not None:
                prepos['DEFAULT'].main_repo = main_repo
            else:
                prepos['DEFAULT'].main_repo = None
                if portdir and not portage._sync_mode:
                    writemsg(_(
                        "!!! main-repo not set in DEFAULT and PORTDIR is empty.\n"
                    ),
                             noiselevel=-1)

        if main_repo is not None and prepos[main_repo].priority is None:
            # This happens if main-repo has been set in repos.conf.
            prepos[main_repo].priority = -1000

        # Backward compatible SYNC support for mirrorselect.
        if portdir_sync and main_repo is not None:
            if portdir_sync.startswith("rsync://"):
                prepos[main_repo].sync_uri = portdir_sync
                prepos[main_repo].sync_type = "rsync"

        # Include repo.name in sort key, for predictable sorting
        # even when priorities are equal.
        prepos_order = sorted(prepos.items(),
                              key=lambda r: (r[1].priority or 0, r[1].name))

        # filter duplicates from aliases, by only including
        # items where repo.name == key
        prepos_order = [
            repo.name for (key, repo) in prepos_order if repo.name == key
            and key != 'DEFAULT' and repo.location is not None
        ]

        self.prepos = prepos
        self.prepos_order = prepos_order
        self.ignored_repos = ignored_repos
        self.location_map = location_map
        self.treemap = treemap
        self._prepos_changed = True
        self._repo_location_list = []

        #The 'masters' key currently contains repo names. Replace them with the matching RepoConfig.
        for repo_name, repo in prepos.items():
            if repo_name == "DEFAULT":
                continue
            if repo.masters is None:
                if self.mainRepo() and repo_name != self.mainRepo().name:
                    repo.masters = self.mainRepo(),
                else:
                    repo.masters = ()
            else:
                if repo.masters and isinstance(repo.masters[0], RepoConfig):
                    # This one has already been processed
                    # because it has an alias.
                    continue
                master_repos = []
                for master_name in repo.masters:
                    if master_name not in prepos:
                        layout_filename = os.path.join(repo.user_location,
                                                       "metadata",
                                                       "layout.conf")
                        writemsg_level(_("Unavailable repository '%s' " \
                         "referenced by masters entry in '%s'\n") % \
                         (master_name, layout_filename),
                         level=logging.ERROR, noiselevel=-1)
                    else:
                        master_repos.append(prepos[master_name])
                repo.masters = tuple(master_repos)

        #The 'eclass_overrides' key currently contains repo names. Replace them with the matching repo paths.
        for repo_name, repo in prepos.items():
            if repo_name == "DEFAULT":
                continue

            eclass_locations = []
            eclass_locations.extend(master_repo.location
                                    for master_repo in repo.masters)
            # Only append the current repo to eclass_locations if it's not
            # there already. This allows masters to have more control over
            # eclass override order, which may be useful for scenarios in
            # which there is a plan to migrate eclasses to a master repo.
            if repo.location not in eclass_locations:
                eclass_locations.append(repo.location)

            if repo.eclass_overrides:
                for other_repo_name in repo.eclass_overrides:
                    if other_repo_name in self.treemap:
                        eclass_locations.append(
                            self.get_location_for_name(other_repo_name))
                    else:
                        writemsg_level(_("Unavailable repository '%s' " \
                         "referenced by eclass-overrides entry for " \
                         "'%s'\n") % (other_repo_name, repo_name), \
                         level=logging.ERROR, noiselevel=-1)
            repo.eclass_locations = tuple(eclass_locations)

        eclass_dbs = {}
        for repo_name, repo in prepos.items():
            if repo_name == "DEFAULT":
                continue

            eclass_db = None
            for eclass_location in repo.eclass_locations:
                tree_db = eclass_dbs.get(eclass_location)
                if tree_db is None:
                    tree_db = eclass_cache.cache(eclass_location)
                    eclass_dbs[eclass_location] = tree_db
                if eclass_db is None:
                    eclass_db = tree_db.copy()
                else:
                    eclass_db.append(tree_db)
            repo.eclass_db = eclass_db

        for repo_name, repo in prepos.items():
            if repo_name == "DEFAULT":
                continue

            if repo._masters_orig is None and self.mainRepo() and \
             repo.name != self.mainRepo().name and not portage._sync_mode:
                # TODO: Delete masters code in pym/portage/tests/resolver/ResolverPlayground.py when deleting this warning.
                writemsg_level(
                    "!!! %s\n" %
                    _("Repository '%s' is missing masters attribute in '%s'") %
                    (repo.name,
                     os.path.join(repo.location, "metadata", "layout.conf")) +
                    "!!! %s\n" %
                    _("Set 'masters = %s' in this file for future compatibility"
                      ) % self.mainRepo().name,
                    level=logging.WARNING,
                    noiselevel=-1)

        self._prepos_changed = True
        self._repo_location_list = []

        self._check_locations()
Beispiel #48
0
	def _builderGetRepository(cls, options, repositories):
		repository = options.get("repository", "porttree")
		if not repository in repositories:
			raise SetConfigError(_("invalid repository class '%s'") % repository)
		return repository
Beispiel #49
0
    def _add_repositories(portdir, portdir_overlay, prepos, ignored_map,
                          ignored_location_map, local_config, default_portdir):
        """Add overlays in PORTDIR_OVERLAY as repositories"""
        overlays = []
        portdir_orig = None
        if portdir:
            portdir = normalize_path(portdir)
            portdir_orig = portdir
            overlays.append(portdir)
        try:
            port_ov = [normalize_path(i) for i in shlex_split(portdir_overlay)]
        except ValueError as e:
            #File "/usr/lib/python3.2/shlex.py", line 168, in read_token
            #	raise ValueError("No closing quotation")
            writemsg(_("!!! Invalid PORTDIR_OVERLAY:"
                       " %s: %s\n") % (e, portdir_overlay),
                     noiselevel=-1)
            port_ov = []
        overlays.extend(port_ov)
        default_repo_opts = {}
        if prepos['DEFAULT'].aliases is not None:
            default_repo_opts['aliases'] = \
             ' '.join(prepos['DEFAULT'].aliases)
        if prepos['DEFAULT'].eclass_overrides is not None:
            default_repo_opts['eclass-overrides'] = \
             ' '.join(prepos['DEFAULT'].eclass_overrides)
        if prepos['DEFAULT'].masters is not None:
            default_repo_opts['masters'] = \
             ' '.join(prepos['DEFAULT'].masters)

        if overlays:
            # We need a copy of the original repos.conf data, since we're
            # going to modify the prepos dict and some of the RepoConfig
            # objects that we put in prepos may have to be discarded if
            # they get overridden by a repository with the same name but
            # a different location. This is common with repoman, for example,
            # when temporarily overriding an rsync repo with another copy
            # of the same repo from CVS.
            repos_conf = prepos.copy()
            #overlay priority is negative because we want them to be looked before any other repo
            base_priority = 0
            for ov in overlays:
                # Ignore missing directory for 'gentoo' so that
                # first sync with emerge-webrsync is possible.
                if isdir_raise_eaccess(ov) or \
                 (base_priority == 0 and ov is portdir):
                    repo_opts = default_repo_opts.copy()
                    repo_opts['location'] = ov
                    repo = RepoConfig(None,
                                      repo_opts,
                                      local_config=local_config)
                    # repos_conf_opts contains options from repos.conf
                    repos_conf_opts = repos_conf.get(repo.name)
                    if repos_conf_opts is not None:
                        # Selectively copy only the attributes which
                        # repos.conf is allowed to override.
                        for k in ('aliases', 'eclass_overrides', 'force',
                                  'masters', 'priority', 'sync_cvs_repo',
                                  'sync_type', 'sync_uri'):
                            v = getattr(repos_conf_opts, k, None)
                            if v is not None:
                                setattr(repo, k, v)

                    if repo.name in prepos:
                        # Silently ignore when PORTDIR overrides the location
                        # setting from the default repos.conf (bug #478544).
                        old_location = prepos[repo.name].location
                        if old_location is not None and \
                         old_location != repo.location and \
                         not (base_priority == 0 and
                         old_location == default_portdir):
                            ignored_map.setdefault(repo.name,
                                                   []).append(old_location)
                            ignored_location_map[old_location] = repo.name
                            if old_location == portdir:
                                portdir = repo.user_location

                    if repo.priority is None:
                        if base_priority == 0 and ov == portdir_orig:
                            # If it's the original PORTDIR setting and it's not
                            # in PORTDIR_OVERLAY, then it will be assigned a
                            # special priority setting later.
                            pass
                        else:
                            repo.priority = base_priority
                            base_priority += 1

                    prepos[repo.name] = repo
                else:

                    if not portage._sync_mode:
                        writemsg(
                            _("!!! Invalid PORTDIR_OVERLAY (not a dir): '%s'\n"
                              ) % ov,
                            noiselevel=-1)

        return portdir
Beispiel #50
0
def _lockfile_iteration(mypath,
                        wantnewlockfile=False,
                        unlinkfile=False,
                        waiting_msg=None,
                        flags=0):
    """
	Acquire a lock on mypath, without retry. Return None if the lockfile
	was removed by previous lock holder (caller must retry).

	@param mypath: lock file path
	@type mypath: str
	@param wantnewlockfile: use a separate new lock file
	@type wantnewlockfile: bool
	@param unlinkfile: remove lock file prior to unlock
	@type unlinkfile: bool
	@param waiting_msg: message to show before blocking
	@type waiting_msg: str
	@param flags: lock flags (only supports os.O_NONBLOCK)
	@type flags: int
	@rtype: bool
	@return: unlockfile tuple on success, None if retry is needed
	"""
    if not mypath:
        raise InvalidData(_("Empty path given"))

    # Since Python 3.4, chown requires int type (no proxies).
    portage_gid = int(portage.data.portage_gid)

    # Support for file object or integer file descriptor parameters is
    # deprecated due to ambiguity in whether or not it's safe to close
    # the file descriptor, making it prone to "Bad file descriptor" errors
    # or file descriptor leaks.
    if isinstance(mypath, str) and mypath[-1] == '/':
        mypath = mypath[:-1]

    lockfilename_path = mypath
    if hasattr(mypath, 'fileno'):
        warnings.warn(
            "portage.locks.lockfile() support for "
            "file object parameters is deprecated. Use a file path instead.",
            DeprecationWarning,
            stacklevel=2)
        lockfilename_path = getattr(mypath, 'name', None)
        mypath = mypath.fileno()
    if isinstance(mypath, int):
        warnings.warn(
            "portage.locks.lockfile() support for integer file "
            "descriptor parameters is deprecated. Use a file path instead.",
            DeprecationWarning,
            stacklevel=2)
        lockfilename = mypath
        wantnewlockfile = 0
        unlinkfile = 0
    elif wantnewlockfile:
        base, tail = os.path.split(mypath)
        lockfilename = os.path.join(base, "." + tail + ".portage_lockfile")
        lockfilename_path = lockfilename
        unlinkfile = 1
    else:
        lockfilename = mypath

    if isinstance(mypath, str):
        if not os.path.exists(os.path.dirname(mypath)):
            raise DirectoryNotFound(os.path.dirname(mypath))
        preexisting = os.path.exists(lockfilename)
        old_mask = os.umask(000)
        try:
            while True:
                try:
                    myfd = os.open(lockfilename, os.O_CREAT | os.O_RDWR, 0o660)
                except OSError as e:
                    if e.errno in (errno.ENOENT,
                                   errno.ESTALE) and os.path.isdir(
                                       os.path.dirname(lockfilename)):
                        # Retry required for NFS (see bug 636798).
                        continue
                    else:
                        _raise_exc(e)
                else:
                    break

            if not preexisting:
                try:
                    if portage.data.secpass >= 1 and os.stat(
                            lockfilename).st_gid != portage_gid:
                        os.chown(lockfilename, -1, portage_gid)
                except OSError as e:
                    if e.errno in (errno.ENOENT, errno.ESTALE):
                        os.close(myfd)
                        return None
                    writemsg("%s: chown('%s', -1, %d)\n" % \
                     (e, lockfilename, portage_gid), noiselevel=-1)
                    writemsg(_("Cannot chown a lockfile: '%s'\n") % \
                     lockfilename, noiselevel=-1)
                    writemsg(_("Group IDs of current user: %s\n") % \
                     " ".join(str(n) for n in os.getgroups()),
                     noiselevel=-1)
        finally:
            os.umask(old_mask)

    elif isinstance(mypath, int):
        myfd = mypath

    else:
        raise ValueError(_("Unknown type passed in '%s': '%s'") % \
         (type(mypath), mypath))

    # try for a non-blocking lock, if it's held, throw a message
    # we're waiting on lockfile and use a blocking attempt.
    locking_method = portage._eintr_func_wrapper(_get_lock_fn())
    try:
        if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
            raise IOError(errno.ENOSYS, "Function not implemented")
        locking_method(myfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError as e:
        if not hasattr(e, "errno"):
            raise
        if e.errno in (errno.EACCES, errno.EAGAIN, errno.ENOLCK):
            # resource temp unavailable; eg, someone beat us to the lock.
            if flags & os.O_NONBLOCK:
                os.close(myfd)
                raise TryAgain(mypath)

            global _quiet
            if _quiet:
                out = None
            else:
                out = portage.output.EOutput()
            if waiting_msg is None:
                if isinstance(mypath, int):
                    waiting_msg = _("waiting for lock on fd %i") % myfd
                else:
                    waiting_msg = _("waiting for lock on %s") % lockfilename
            if out is not None:
                out.ebegin(waiting_msg)
            # try for the exclusive lock now.
            enolock_msg_shown = False
            while True:
                try:
                    locking_method(myfd, fcntl.LOCK_EX)
                except EnvironmentError as e:
                    if e.errno == errno.ENOLCK:
                        # This is known to occur on Solaris NFS (see
                        # bug #462694). Assume that the error is due
                        # to temporary exhaustion of record locks,
                        # and loop until one becomes available.
                        if not enolock_msg_shown:
                            enolock_msg_shown = True
                            if isinstance(mypath, int):
                                context_desc = _("Error while waiting "
                                                 "to lock fd %i") % myfd
                            else:
                                context_desc = _("Error while waiting "
                                                 "to lock '%s'") % lockfilename
                            writemsg("\n!!! %s: %s\n" % (context_desc, e),
                                     noiselevel=-1)

                        time.sleep(_HARDLINK_POLL_LATENCY)
                        continue

                    if out is not None:
                        out.eend(1, str(e))
                    raise
                else:
                    break

            if out is not None:
                out.eend(os.EX_OK)
        elif e.errno in (errno.ENOSYS, ):
            # We're not allowed to lock on this FS.
            if not isinstance(lockfilename, int):
                # If a file object was passed in, it's not safe
                # to close the file descriptor because it may
                # still be in use.
                os.close(myfd)
            lockfilename_path = _unicode_decode(lockfilename_path,
                                                encoding=_encodings['fs'],
                                                errors='strict')
            if not isinstance(lockfilename_path, str):
                raise
            link_success = hardlink_lockfile(lockfilename_path,
                                             waiting_msg=waiting_msg,
                                             flags=flags)
            if not link_success:
                raise
            lockfilename = lockfilename_path
            locking_method = None
            myfd = HARDLINK_FD
        else:
            raise

    fstat_result = None
    if isinstance(lockfilename, str) and myfd != HARDLINK_FD and unlinkfile:
        try:
            (removed, fstat_result) = _lockfile_was_removed(myfd, lockfilename)
        except Exception:
            # Do not leak the file descriptor here.
            os.close(myfd)
            raise
        else:
            if removed:
                # Removed by previous lock holder... Caller will retry...
                os.close(myfd)
                return None

    if myfd != HARDLINK_FD:
        _lock_manager(myfd,
                      os.fstat(myfd) if fstat_result is None else fstat_result,
                      mypath)

    writemsg(str((lockfilename, myfd, unlinkfile)) + "\n", 1)
    return (lockfilename, myfd, unlinkfile, locking_method)
Beispiel #51
0
	def _start(self):
		tar_options = ""
		if "xattr" in self.features:
			process = subprocess.Popen(["tar", "--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
			output = process.communicate()[0]
			if b"--xattrs" in output:
				tar_options = ["--xattrs", "--xattrs-include='*'"]
				for x in portage.util.shlex_split(self.env.get("PORTAGE_XATTR_EXCLUDE", "")):
					tar_options.append(portage._shell_quote("--xattrs-exclude=%s" % x))
				tar_options = " ".join(tar_options)

		decomp = _compressors.get(compression_probe(self.pkg_path))
		if decomp is not None:
			decomp_cmd = decomp.get("decompress")
		else:
			decomp_cmd = None
		if decomp_cmd is None:
			self.scheduler.output("!!! %s\n" %
				_("File compression header unrecognized: %s") %
				self.pkg_path, log_path=self.logfile,
				background=self.background, level=logging.ERROR)
			self.returncode = 1
			self._async_wait()
			return

		try:
			decompression_binary = shlex_split(varexpand(decomp_cmd, mydict=self.env))[0]
		except IndexError:
			decompression_binary = ""

		if find_binary(decompression_binary) is None:
			# Try alternative command if it exists
			if _compressors.get(compression_probe(self.pkg_path)).get("decompress_alt"):
				decomp_cmd = _compressors.get(
					compression_probe(self.pkg_path)).get("decompress_alt")
			try:
				decompression_binary = shlex_split(varexpand(decomp_cmd, mydict=self.env))[0]
			except IndexError:
				decompression_binary = ""

			if find_binary(decompression_binary) is None:
				missing_package = _compressors.get(compression_probe(self.pkg_path)).get("package")
				self.scheduler.output("!!! %s\n" %
					_("File compression unsupported %s.\n Command was: %s.\n Maybe missing package: %s") %
					(self.pkg_path, varexpand(decomp_cmd, mydict=self.env), missing_package), log_path=self.logfile,
					background=self.background, level=logging.ERROR)
				self.returncode = 1
				self._async_wait()
				return

		# Add -q to decomp_cmd opts, in order to avoid "trailing garbage
		# after EOF ignored" warning messages due to xpak trailer.
		# SIGPIPE handling (128 + SIGPIPE) should be compatible with
		# assert_sigpipe_ok() that's used by the ebuild unpack() helper.
		self.args = [self._shell_binary, "-c",
			("%s -cq -- %s | tar -xp %s -C %s -f - ; " + \
			"p=(${PIPESTATUS[@]}) ; " + \
			"if [[ ${p[0]} != 0 && ${p[0]} != %d ]] ; then " % (128 + signal.SIGPIPE) + \
			"echo bzip2 failed with status ${p[0]} ; exit ${p[0]} ; fi ; " + \
			"if [ ${p[1]} != 0 ] ; then " + \
			"echo tar failed with status ${p[1]} ; exit ${p[1]} ; fi ; " + \
			"exit 0 ;") % \
			(decomp_cmd,
			portage._shell_quote(self.pkg_path),
			tar_options,
			portage._shell_quote(self.image_dir))]

		SpawnProcess._start(self)
Beispiel #52
0
def _parse_color_map(config_root='/', onerror=None):
    """
	Parse /etc/portage/color.map and return a dict of error codes.

	@param onerror: an optional callback to handle any ParseError that would
		otherwise be raised
	@type onerror: callable
	@rtype: dict
	@return: a dictionary mapping color classes to color codes
	"""
    global codes, _styles
    myfile = os.path.join(config_root, COLOR_MAP_FILE)
    ansi_code_pattern = re.compile("^[0-9;]*m$")
    quotes = '\'"'

    def strip_quotes(token):
        if token[0] in quotes and token[0] == token[-1]:
            token = token[1:-1]
        return token

    f = None
    try:
        f = io.open(_unicode_encode(myfile,
                                    encoding=_encodings['fs'],
                                    errors='strict'),
                    mode='r',
                    encoding=_encodings['content'],
                    errors='replace')
        lineno = 0
        for line in f:
            lineno += 1

            commenter_pos = line.find("#")
            line = line[:commenter_pos].strip()

            if len(line) == 0:
                continue

            split_line = line.split("=")
            if len(split_line) != 2:
                e = ParseError(_("'%s', line %s: expected exactly one occurrence of '=' operator") % \
                 (myfile, lineno))
                raise e
                if onerror:
                    onerror(e)
                else:
                    raise e
                continue

            k = strip_quotes(split_line[0].strip())
            v = strip_quotes(split_line[1].strip())
            if not k in _styles and not k in codes:
                e = ParseError(_("'%s', line %s: Unknown variable: '%s'") % \
                 (myfile, lineno, k))
                if onerror:
                    onerror(e)
                else:
                    raise e
                continue
            if ansi_code_pattern.match(v):
                if k in _styles:
                    _styles[k] = (esc_seq + v, )
                elif k in codes:
                    codes[k] = esc_seq + v
            else:
                code_list = []
                for x in v.split():
                    if x in codes:
                        if k in _styles:
                            code_list.append(x)
                        elif k in codes:
                            code_list.append(codes[x])
                    else:
                        e = ParseError(_("'%s', line %s: Undefined: '%s'") % \
                         (myfile, lineno, x))
                        if onerror:
                            onerror(e)
                        else:
                            raise e
                if k in _styles:
                    _styles[k] = tuple(code_list)
                elif k in codes:
                    codes[k] = "".join(code_list)
    except (IOError, OSError) as e:
        if e.errno == errno.ENOENT:
            raise FileNotFound(myfile)
        elif e.errno == errno.EACCES:
            raise PermissionDenied(myfile)
        raise
    finally:
        if f is not None:
            f.close()
	def _start(self):
		tar_options = ""
		if "xattr" in self.features:
			process = subprocess.Popen(["tar", "--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
			output = process.communicate()[0]
			if b"--xattrs" in output:
				tar_options = ["--xattrs", "--xattrs-include='*'"]
				for x in portage.util.shlex_split(self.env.get("PORTAGE_XATTR_EXCLUDE", "")):
					tar_options.append(portage._shell_quote("--xattrs-exclude=%s" % x))
				tar_options = " ".join(tar_options)

		decomp = _compressors.get(compression_probe(self.pkg_path))
		if decomp is not None:
			decomp_cmd = decomp.get("decompress")
		elif tarfile.is_tarfile(portage._unicode_encode(self.pkg_path,
			encoding=portage._encodings['fs'], errors='strict')):
			decomp_cmd = 'cat'
			decomp = {
				'compress': 'cat',
				'package': 'sys-apps/coreutils',
			}
		else:
			decomp_cmd = None
		if decomp_cmd is None:
			self.scheduler.output("!!! %s\n" %
				_("File compression header unrecognized: %s") %
				self.pkg_path, log_path=self.logfile,
				background=self.background, level=logging.ERROR)
			self.returncode = 1
			self._async_wait()
			return

		try:
			decompression_binary = shlex_split(varexpand(decomp_cmd, mydict=self.env))[0]
		except IndexError:
			decompression_binary = ""

		if find_binary(decompression_binary) is None:
			# Try alternative command if it exists
			if decomp.get("decompress_alt"):
				decomp_cmd = decomp.get("decompress_alt")
			try:
				decompression_binary = shlex_split(varexpand(decomp_cmd, mydict=self.env))[0]
			except IndexError:
				decompression_binary = ""

			if find_binary(decompression_binary) is None:
				missing_package = decomp.get("package")
				self.scheduler.output("!!! %s\n" %
					_("File compression unsupported %s.\n Command was: %s.\n Maybe missing package: %s") %
					(self.pkg_path, varexpand(decomp_cmd, mydict=self.env), missing_package), log_path=self.logfile,
					background=self.background, level=logging.ERROR)
				self.returncode = 1
				self._async_wait()
				return

		pkg_xpak = portage.xpak.tbz2(self.pkg_path)
		pkg_xpak.scan()

		# SIGPIPE handling (128 + SIGPIPE) should be compatible with
		# assert_sigpipe_ok() that's used by the ebuild unpack() helper.
		self.args = [self._shell_binary, "-c",
			("cmd0=(head -c %d -- %s) cmd1=(%s) cmd2=(tar -xp %s -C %s -f -); " + \
			'"${cmd0[@]}" | "${cmd1[@]}" | "${cmd2[@]}"; ' + \
			"p=(${PIPESTATUS[@]}) ; for i in {0..2}; do " + \
			"if [[ ${p[$i]} != 0 && ${p[$i]} != %d ]] ; then " + \
			"echo command $(eval \"echo \\\"'\\${cmd$i[*]}'\\\"\") " + \
			"failed with status ${p[$i]} ; exit ${p[$i]} ; fi ; done; " + \
			"if [ ${p[$i]} != 0 ] ; then " + \
			"echo command $(eval \"echo \\\"'\\${cmd$i[*]}'\\\"\") " + \
			"failed with status ${p[$i]} ; exit ${p[$i]} ; fi ; " + \
			"exit 0 ;") % \
			(pkg_xpak.filestat.st_size - pkg_xpak.xpaksize,
			portage._shell_quote(self.pkg_path),
			decomp_cmd,
			tar_options,
			portage._shell_quote(self.image_dir),
			128 + signal.SIGPIPE)]

		SpawnProcess._start(self)
Beispiel #54
0
 def display(self):
     self.out.write("\r"+colorize("WARN",
      _("cache miss: '")+str(self.misses)+"'") + \
      " --- "+colorize("GOOD", _("cache hit: '")+str(self.hits)+"'"))
     self.out.flush()
Beispiel #55
0
    def listBrokenBinaries(self, debug=False):
        """
        Find binaries and their needed sonames, which have no providers.

        @param debug: Boolean to enable debug output
        @type debug: Boolean
        @rtype: dict (example: {'/usr/bin/foo': set(['libbar.so'])})
        @return: The return value is an object -> set-of-sonames mapping, where
                object is a broken binary and the set consists of sonames needed by
                object that have no corresponding libraries to fulfill the dependency.

        """

        os = _os_merge

        class _LibraryCache:
            """
            Caches properties associated with paths.

            The purpose of this class is to prevent multiple instances of
            _ObjectKey for the same paths.

            """
            def __init__(cache_self):
                cache_self.cache = {}

            def get(cache_self, obj):
                """
                Caches and returns properties associated with an object.

                @param obj: absolute path (can be symlink)
                @type obj: string (example: '/usr/lib/libfoo.so')
                @rtype: 4-tuple with types
                        (string or None, string or None, 2-tuple, Boolean)
                @return: 4-tuple with the following components:
                        1. arch as a string or None if it does not exist,
                        2. soname as a string or None if it does not exist,
                        3. obj_key as 2-tuple,
                        4. Boolean representing whether the object exists.
                        (example: ('libfoo.so.1', (123L, 456L), True))

                """
                if obj in cache_self.cache:
                    return cache_self.cache[obj]

                obj_key = self._obj_key(obj)
                # Check that the library exists on the filesystem.
                if obj_key.file_exists():
                    # Get the arch and soname from LinkageMap._obj_properties if
                    # it exists. Otherwise, None.
                    obj_props = self._obj_properties.get(obj_key)
                    if obj_props is None:
                        arch = None
                        soname = None
                    else:
                        arch = obj_props.arch
                        soname = obj_props.soname
                    return cache_self.cache.setdefault(
                        obj, (arch, soname, obj_key, True))
                return cache_self.cache.setdefault(
                    obj, (None, None, obj_key, False))

        rValue = {}
        cache = _LibraryCache()
        providers = self.listProviders()

        # Iterate over all obj_keys and their providers.
        for obj_key, sonames in providers.items():
            obj_props = self._obj_properties[obj_key]
            arch = obj_props.arch
            path = obj_props.runpaths
            objs = obj_props.alt_paths
            path = path.union(self._defpath)
            # Iterate over each needed soname and the set of library paths that
            # fulfill the soname to determine if the dependency is broken.
            for soname, libraries in sonames.items():
                # validLibraries is used to store libraries, which satisfy soname,
                # so if no valid libraries are found, the soname is not satisfied
                # for obj_key.  If unsatisfied, objects associated with obj_key
                # must be emerged.
                validLibraries = set()
                # It could be the case that the library to satisfy the soname is
                # not in the obj's runpath, but a symlink to the library is (eg
                # libnvidia-tls.so.1 in nvidia-drivers).  Also, since LinkageMap
                # does not catalog symlinks, broken or missing symlinks may go
                # unnoticed.  As a result of these cases, check that a file with
                # the same name as the soname exists in obj's runpath.
                # XXX If we catalog symlinks in LinkageMap, this could be improved.
                for directory in path:
                    cachedArch, cachedSoname, cachedKey, cachedExists = cache.get(
                        os.path.join(directory, soname))
                    # Check that this library provides the needed soname.  Doing
                    # this, however, will cause consumers of libraries missing
                    # sonames to be unnecessarily emerged. (eg libmix.so)
                    if cachedSoname == soname and cachedArch == arch:
                        validLibraries.add(cachedKey)
                        if debug and cachedKey not in set(
                                map(self._obj_key_cache.get, libraries)):
                            # XXX This is most often due to soname symlinks not in
                            # a library's directory.  We could catalog symlinks in
                            # LinkageMap to avoid checking for this edge case here.
                            writemsg_level(
                                _("Found provider outside of findProviders:") +
                                (" %s -> %s %s\n" % (
                                    os.path.join(directory, soname),
                                    self._obj_properties[cachedKey].alt_paths,
                                    libraries,
                                )),
                                level=logging.DEBUG,
                                noiselevel=-1,
                            )
                        # A valid library has been found, so there is no need to
                        # continue.
                        break
                    if (debug and cachedArch == arch
                            and cachedKey in self._obj_properties):
                        writemsg_level(
                            (_("Broken symlink or missing/bad soname: " +
                               "%(dir_soname)s -> %(cachedKey)s " +
                               "with soname %(cachedSoname)s but expecting %(soname)s"
                               ) %
                             {
                                 "dir_soname": os.path.join(directory, soname),
                                 "cachedKey": self._obj_properties[cachedKey],
                                 "cachedSoname": cachedSoname,
                                 "soname": soname,
                             }) + "\n",
                            level=logging.DEBUG,
                            noiselevel=-1,
                        )
                # This conditional checks if there are no libraries to satisfy the
                # soname (empty set).
                if not validLibraries:
                    for obj in objs:
                        rValue.setdefault(obj, set()).add(soname)
                    # If no valid libraries have been found by this point, then
                    # there are no files named with the soname within obj's runpath,
                    # but if there are libraries (from the providers mapping), it is
                    # likely that soname symlinks or the actual libraries are
                    # missing or broken.  Thus those libraries are added to rValue
                    # in order to emerge corrupt library packages.
                    for lib in libraries:
                        rValue.setdefault(lib, set()).add(soname)
                        if debug:
                            if not os.path.isfile(lib):
                                writemsg_level(
                                    _("Missing library:") + " %s\n" % (lib, ),
                                    level=logging.DEBUG,
                                    noiselevel=-1,
                                )
                            else:
                                writemsg_level(
                                    _("Possibly missing symlink:") +
                                    "%s\n" % (os.path.join(
                                        os.path.dirname(lib), soname)),
                                    level=logging.DEBUG,
                                    noiselevel=-1,
                                )
        return rValue
Beispiel #56
0
def dir_get_metadata(baseurl,
                     conn=None,
                     chunk_size=3000,
                     verbose=1,
                     usingcache=1,
                     makepickle=None):
    """(baseurl,conn,chunk_size,verbose) -- 
	"""
    if not conn:
        keepconnection = 0
    else:
        keepconnection = 1

    cache_path = "/var/cache/edb"
    metadatafilename = os.path.join(cache_path, 'remote_metadata.pickle')

    if makepickle is None:
        makepickle = "/var/cache/edb/metadata.idx.most_recent"

    try:
        conn, protocol, address, params, headers = create_conn(baseurl, conn)
    except _all_errors as e:
        # ftplib.FTP(host) can raise errors like this:
        #   socket.error: (111, 'Connection refused')
        sys.stderr.write("!!! %s\n" % (e, ))
        return {}

    out = sys.stdout
    try:
        metadatafile = open(
            _unicode_encode(metadatafilename,
                            encoding=_encodings['fs'],
                            errors='strict'), 'rb')
        mypickle = pickle.Unpickler(metadatafile)
        try:
            mypickle.find_global = None
        except AttributeError:
            # TODO: If py3k, override Unpickler.find_class().
            pass
        metadata = mypickle.load()
        out.write(_("Loaded metadata pickle.\n"))
        out.flush()
        metadatafile.close()
    except (AttributeError, EOFError, EnvironmentError, ValueError,
            pickle.UnpicklingError):
        metadata = {}
    if baseurl not in metadata:
        metadata[baseurl] = {}
    if "indexname" not in metadata[baseurl]:
        metadata[baseurl]["indexname"] = ""
    if "timestamp" not in metadata[baseurl]:
        metadata[baseurl]["timestamp"] = 0
    if "unmodified" not in metadata[baseurl]:
        metadata[baseurl]["unmodified"] = 0
    if "data" not in metadata[baseurl]:
        metadata[baseurl]["data"] = {}

    if not os.access(cache_path, os.W_OK):
        sys.stderr.write(_("!!! Unable to write binary metadata to disk!\n"))
        sys.stderr.write(_("!!! Permission denied: '%s'\n") % cache_path)
        return metadata[baseurl]["data"]

    import portage.exception
    try:
        filelist = dir_get_list(baseurl, conn)
    except portage.exception.PortageException as e:
        sys.stderr.write(
            _("!!! Error connecting to '%s'.\n") % _hide_url_passwd(baseurl))
        sys.stderr.write("!!! %s\n" % str(e))
        del e
        return metadata[baseurl]["data"]
    tbz2list = match_in_array(filelist, suffix=".tbz2")
    metalist = match_in_array(filelist, prefix="metadata.idx")
    del filelist

    # Determine if our metadata file is current.
    metalist.sort()
    metalist.reverse()  # makes the order new-to-old.
    for mfile in metalist:
        if usingcache and \
           ((metadata[baseurl]["indexname"] != mfile) or \
           (metadata[baseurl]["timestamp"] < int(time.time()-(60*60*24)))):
            # Try to download new cache until we succeed on one.
            data = ""
            for trynum in [1, 2, 3]:
                mytempfile = tempfile.TemporaryFile()
                try:
                    file_get(baseurl + "/" + mfile, mytempfile, conn)
                    if mytempfile.tell() > len(data):
                        mytempfile.seek(0)
                        data = mytempfile.read()
                except ValueError as e:
                    sys.stderr.write("--- " + str(e) + "\n")
                    if trynum < 3:
                        sys.stderr.write(_("Retrying...\n"))
                    sys.stderr.flush()
                    mytempfile.close()
                    continue
                if match_in_array([mfile], suffix=".gz"):
                    out.write("gzip'd\n")
                    out.flush()
                    try:
                        import gzip
                        mytempfile.seek(0)
                        gzindex = gzip.GzipFile(mfile[:-3], 'rb', 9,
                                                mytempfile)
                        data = gzindex.read()
                    except SystemExit as e:
                        raise
                    except Exception as e:
                        mytempfile.close()
                        sys.stderr.write(
                            _("!!! Failed to use gzip: ") + str(e) + "\n")
                        sys.stderr.flush()
                    mytempfile.close()
                try:
                    metadata[baseurl]["data"] = pickle.loads(data)
                    del data
                    metadata[baseurl]["indexname"] = mfile
                    metadata[baseurl]["timestamp"] = int(time.time())
                    metadata[baseurl][
                        "modified"] = 0  # It's not, right after download.
                    out.write(_("Pickle loaded.\n"))
                    out.flush()
                    break
                except SystemExit as e:
                    raise
                except Exception as e:
                    sys.stderr.write(
                        _("!!! Failed to read data from index: ") +
                        str(mfile) + "\n")
                    sys.stderr.write("!!! " + str(e) + "\n")
                    sys.stderr.flush()
            try:
                metadatafile = open(
                    _unicode_encode(metadatafilename,
                                    encoding=_encodings['fs'],
                                    errors='strict'), 'wb')
                pickle.dump(metadata, metadatafile, protocol=2)
                metadatafile.close()
            except SystemExit as e:
                raise
            except Exception as e:
                sys.stderr.write(
                    _("!!! Failed to write binary metadata to disk!\n"))
                sys.stderr.write("!!! " + str(e) + "\n")
                sys.stderr.flush()
            break
    # We may have metadata... now we run through the tbz2 list and check.

    class CacheStats(object):
        from time import time

        def __init__(self, out):
            self.misses = 0
            self.hits = 0
            self.last_update = 0
            self.out = out
            self.min_display_latency = 0.2

        def update(self):
            cur_time = self.time()
            if cur_time - self.last_update >= self.min_display_latency:
                self.last_update = cur_time
                self.display()

        def display(self):
            self.out.write("\r"+colorize("WARN",
             _("cache miss: '")+str(self.misses)+"'") + \
             " --- "+colorize("GOOD", _("cache hit: '")+str(self.hits)+"'"))
            self.out.flush()

    cache_stats = CacheStats(out)
    have_tty = os.environ.get('TERM') != 'dumb' and out.isatty()
    if have_tty:
        cache_stats.display()
    binpkg_filenames = set()
    for x in tbz2list:
        x = os.path.basename(x)
        binpkg_filenames.add(x)
        if x not in metadata[baseurl]["data"]:
            cache_stats.misses += 1
            if have_tty:
                cache_stats.update()
            metadata[baseurl]["modified"] = 1
            myid = None
            for retry in range(3):
                try:
                    myid = file_get_metadata(
                        "/".join((baseurl.rstrip("/"), x.lstrip("/"))), conn,
                        chunk_size)
                    break
                except http_client_BadStatusLine:
                    # Sometimes this error is thrown from conn.getresponse() in
                    # make_http_request().  The docstring for this error in
                    # httplib.py says "Presumably, the server closed the
                    # connection before sending a valid response".
                    conn, protocol, address, params, headers = create_conn(
                        baseurl)
                except http_client_ResponseNotReady:
                    # With some http servers this error is known to be thrown
                    # from conn.getresponse() in make_http_request() when the
                    # remote file does not have appropriate read permissions.
                    # Maybe it's possible to recover from this exception in
                    # cases though, so retry.
                    conn, protocol, address, params, headers = create_conn(
                        baseurl)

            if myid and myid[0]:
                metadata[baseurl]["data"][x] = make_metadata_dict(myid)
            elif verbose:
                sys.stderr.write(
                    colorize("BAD", _("!!! Failed to retrieve metadata on: "))
                    + str(x) + "\n")
                sys.stderr.flush()
        else:
            cache_stats.hits += 1
            if have_tty:
                cache_stats.update()
    cache_stats.display()
    # Cleanse stale cache for files that don't exist on the server anymore.
    stale_cache = set(metadata[baseurl]["data"]).difference(binpkg_filenames)
    if stale_cache:
        for x in stale_cache:
            del metadata[baseurl]["data"][x]
        metadata[baseurl]["modified"] = 1
    del stale_cache
    del binpkg_filenames
    out.write("\n")
    out.flush()

    try:
        if "modified" in metadata[baseurl] and metadata[baseurl]["modified"]:
            metadata[baseurl]["timestamp"] = int(time.time())
            metadatafile = open(
                _unicode_encode(metadatafilename,
                                encoding=_encodings['fs'],
                                errors='strict'), 'wb')
            pickle.dump(metadata, metadatafile, protocol=2)
            metadatafile.close()
        if makepickle:
            metadatafile = open(
                _unicode_encode(makepickle,
                                encoding=_encodings['fs'],
                                errors='strict'), 'wb')
            pickle.dump(metadata[baseurl]["data"], metadatafile, protocol=2)
            metadatafile.close()
    except SystemExit as e:
        raise
    except Exception as e:
        sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
        sys.stderr.write("!!! " + str(e) + "\n")
        sys.stderr.flush()

    if not keepconnection:
        conn.close()

    return metadata[baseurl]["data"]
Beispiel #57
0
def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
    """Given a string (packagename or virtual) expand it into a valid
	cat/package string. Virtuals use the mydb to determine which provided
	virtual is a valid choice and defaults to the first element when there
	are no installed/available candidates."""
    myslash = mycpv.split("/")
    mysplit = _pkgsplit(myslash[-1])
    if settings is None:
        settings = globals()["settings"]
    virts = settings.getvirtuals()
    virts_p = settings.get_virts_p()
    if len(myslash) > 2:
        # this is illegal case.
        mysplit = []
        mykey = mycpv
    elif len(myslash) == 2:
        if mysplit:
            mykey = myslash[0] + "/" + mysplit[0]
        else:
            mykey = mycpv
        if mydb and virts and mykey in virts:
            writemsg("mydb.__class__: %s\n" % (mydb.__class__), 1)
            if hasattr(mydb, "cp_list"):
                if not mydb.cp_list(mykey, use_cache=use_cache):
                    writemsg("virts[%s]: %s\n" % (str(mykey), virts[mykey]), 1)
                    mykey_orig = mykey[:]
                    for vkey in virts[mykey]:
                        # The virtuals file can contain a versioned atom, so
                        # it may be necessary to remove the operator and
                        # version from the atom before it is passed into
                        # dbapi.cp_list().
                        if mydb.cp_list(vkey.cp):
                            mykey = str(vkey)
                            writemsg(_("virts chosen: %s\n") % (mykey), 1)
                            break
                    if mykey == mykey_orig:
                        mykey = str(virts[mykey][0])
                        writemsg(_("virts defaulted: %s\n") % (mykey), 1)
            #we only perform virtual expansion if we are passed a dbapi
    else:
        #specific cpv, no category, ie. "foo-1.0"
        if mysplit:
            myp = mysplit[0]
        else:
            # "foo" ?
            myp = mycpv
        mykey = None
        matches = []
        if mydb and hasattr(mydb, "categories"):
            for x in mydb.categories:
                if mydb.cp_list(x + "/" + myp, use_cache=use_cache):
                    matches.append(x + "/" + myp)
        if len(matches) > 1:
            virtual_name_collision = False
            if len(matches) == 2:
                for x in matches:
                    if not x.startswith("virtual/"):
                        # Assume that the non-virtual is desired.  This helps
                        # avoid the ValueError for invalid deps that come from
                        # installed packages (during reverse blocker detection,
                        # for example).
                        mykey = x
                    else:
                        virtual_name_collision = True
            if not virtual_name_collision:
                # AmbiguousPackageName inherits from ValueError,
                # for backward compatibility with calling code
                # that already handles ValueError.
                raise AmbiguousPackageName(matches)
        elif matches:
            mykey = matches[0]

        if not mykey and not isinstance(mydb, list):
            if myp in virts_p:
                mykey = virts_p[myp][0]
            #again, we only perform virtual expansion if we have a dbapi (not a list)
        if not mykey:
            mykey = "null/" + myp
    if mysplit:
        if mysplit[2] == "r0":
            return mykey + "-" + mysplit[1]
        else:
            return mykey + "-" + mysplit[1] + "-" + mysplit[2]
    else:
        return mykey
Beispiel #58
0
def lockfile(mypath,
             wantnewlockfile=0,
             unlinkfile=0,
             waiting_msg=None,
             flags=0):
    """
	If wantnewlockfile is True then this creates a lockfile in the parent
	directory as the file: '.' + basename + '.portage_lockfile'.
	"""

    if not mypath:
        raise InvalidData(_("Empty path given"))

    # Since Python 3.4, chown requires int type (no proxies).
    portage_gid = int(portage.data.portage_gid)

    # Support for file object or integer file descriptor parameters is
    # deprecated due to ambiguity in whether or not it's safe to close
    # the file descriptor, making it prone to "Bad file descriptor" errors
    # or file descriptor leaks.
    if isinstance(mypath, basestring) and mypath[-1] == '/':
        mypath = mypath[:-1]

    lockfilename_path = mypath
    if hasattr(mypath, 'fileno'):
        warnings.warn(
            "portage.locks.lockfile() support for "
            "file object parameters is deprecated. Use a file path instead.",
            DeprecationWarning,
            stacklevel=2)
        lockfilename_path = getattr(mypath, 'name', None)
        mypath = mypath.fileno()
    if isinstance(mypath, int):
        warnings.warn(
            "portage.locks.lockfile() support for integer file "
            "descriptor parameters is deprecated. Use a file path instead.",
            DeprecationWarning,
            stacklevel=2)
        lockfilename = mypath
        wantnewlockfile = 0
        unlinkfile = 0
    elif wantnewlockfile:
        base, tail = os.path.split(mypath)
        lockfilename = os.path.join(base, "." + tail + ".portage_lockfile")
        lockfilename_path = lockfilename
        unlinkfile = 1
    else:
        lockfilename = mypath

    if isinstance(mypath, basestring):
        if not os.path.exists(os.path.dirname(mypath)):
            raise DirectoryNotFound(os.path.dirname(mypath))
        preexisting = os.path.exists(lockfilename)
        old_mask = os.umask(000)
        try:
            try:
                myfd = os.open(lockfilename, os.O_CREAT | os.O_RDWR, 0o660)
            except OSError as e:
                func_call = "open('%s')" % lockfilename
                if e.errno == OperationNotPermitted.errno:
                    raise OperationNotPermitted(func_call)
                elif e.errno == PermissionDenied.errno:
                    raise PermissionDenied(func_call)
                elif e.errno == ReadOnlyFileSystem.errno:
                    raise ReadOnlyFileSystem(func_call)
                else:
                    raise

            if not preexisting:
                try:
                    if portage.data.secpass >= 1 and os.stat(
                            lockfilename).st_gid != portage_gid:
                        os.chown(lockfilename, -1, portage_gid)
                except OSError as e:
                    if e.errno in (errno.ENOENT, errno.ESTALE):
                        return lockfile(mypath,
                                        wantnewlockfile=wantnewlockfile,
                                        unlinkfile=unlinkfile,
                                        waiting_msg=waiting_msg,
                                        flags=flags)
                    else:
                        writemsg("%s: chown('%s', -1, %d)\n" % \
                         (e, lockfilename, portage_gid), noiselevel=-1)
                        writemsg(_("Cannot chown a lockfile: '%s'\n") % \
                         lockfilename, noiselevel=-1)
                        writemsg(_("Group IDs of current user: %s\n") % \
                         " ".join(str(n) for n in os.getgroups()),
                         noiselevel=-1)
        finally:
            os.umask(old_mask)

    elif isinstance(mypath, int):
        myfd = mypath

    else:
        raise ValueError(_("Unknown type passed in '%s': '%s'") % \
         (type(mypath), mypath))

    # try for a non-blocking lock, if it's held, throw a message
    # we're waiting on lockfile and use a blocking attempt.
    locking_method = portage._eintr_func_wrapper(_get_lock_fn())
    try:
        if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
            raise IOError(errno.ENOSYS, "Function not implemented")
        locking_method(myfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError as e:
        if not hasattr(e, "errno"):
            raise
        if e.errno in (errno.EACCES, errno.EAGAIN, errno.ENOLCK):
            # resource temp unavailable; eg, someone beat us to the lock.
            if flags & os.O_NONBLOCK:
                os.close(myfd)
                raise TryAgain(mypath)

            global _quiet
            if _quiet:
                out = None
            else:
                out = portage.output.EOutput()
            if waiting_msg is None:
                if isinstance(mypath, int):
                    waiting_msg = _("waiting for lock on fd %i") % myfd
                else:
                    waiting_msg = _("waiting for lock on %s") % lockfilename
            if out is not None:
                out.ebegin(waiting_msg)
            # try for the exclusive lock now.
            enolock_msg_shown = False
            while True:
                try:
                    locking_method(myfd, fcntl.LOCK_EX)
                except EnvironmentError as e:
                    if e.errno == errno.ENOLCK:
                        # This is known to occur on Solaris NFS (see
                        # bug #462694). Assume that the error is due
                        # to temporary exhaustion of record locks,
                        # and loop until one becomes available.
                        if not enolock_msg_shown:
                            enolock_msg_shown = True
                            if isinstance(mypath, int):
                                context_desc = _("Error while waiting "
                                                 "to lock fd %i") % myfd
                            else:
                                context_desc = _("Error while waiting "
                                                 "to lock '%s'") % lockfilename
                            writemsg("\n!!! %s: %s\n" % (context_desc, e),
                                     noiselevel=-1)

                        time.sleep(_HARDLINK_POLL_LATENCY)
                        continue

                    if out is not None:
                        out.eend(1, str(e))
                    raise
                else:
                    break

            if out is not None:
                out.eend(os.EX_OK)
        elif e.errno in (errno.ENOSYS, ):
            # We're not allowed to lock on this FS.
            if not isinstance(lockfilename, int):
                # If a file object was passed in, it's not safe
                # to close the file descriptor because it may
                # still be in use.
                os.close(myfd)
            lockfilename_path = _unicode_decode(lockfilename_path,
                                                encoding=_encodings['fs'],
                                                errors='strict')
            if not isinstance(lockfilename_path, basestring):
                raise
            link_success = hardlink_lockfile(lockfilename_path,
                                             waiting_msg=waiting_msg,
                                             flags=flags)
            if not link_success:
                raise
            lockfilename = lockfilename_path
            locking_method = None
            myfd = HARDLINK_FD
        else:
            raise


    if isinstance(lockfilename, basestring) and \
     myfd != HARDLINK_FD and _fstat_nlink(myfd) == 0:
        # The file was deleted on us... Keep trying to make one...
        os.close(myfd)
        writemsg(_("lockfile recurse\n"), 1)
        lockfilename, myfd, unlinkfile, locking_method = lockfile(
            mypath,
            wantnewlockfile=wantnewlockfile,
            unlinkfile=unlinkfile,
            waiting_msg=waiting_msg,
            flags=flags)

    if myfd != HARDLINK_FD:

        # FD_CLOEXEC is enabled by default in Python >=3.4.
        if sys.hexversion < 0x3040000:
            try:
                fcntl.FD_CLOEXEC
            except AttributeError:
                pass
            else:
                fcntl.fcntl(
                    myfd, fcntl.F_SETFD,
                    fcntl.fcntl(myfd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

        _open_fds.add(myfd)

    writemsg(str((lockfilename, myfd, unlinkfile)) + "\n", 1)
    return (lockfilename, myfd, unlinkfile, locking_method)
Beispiel #59
0
def hardlink_lockfile(lockfilename,
                      max_wait=DeprecationWarning,
                      waiting_msg=None,
                      flags=0):
    """Does the NFS, hardlink shuffle to ensure locking on the disk.
	We create a PRIVATE hardlink to the real lockfile, that is just a
	placeholder on the disk.
	If our file can 2 references, then we have the lock. :)
	Otherwise we lather, rise, and repeat.
	"""

    if max_wait is not DeprecationWarning:
        warnings.warn(
            "The 'max_wait' parameter of "
            "portage.locks.hardlink_lockfile() is now unused. Use "
            "flags=os.O_NONBLOCK instead.",
            DeprecationWarning,
            stacklevel=2)

    global _quiet
    out = None
    displayed_waiting_msg = False
    preexisting = os.path.exists(lockfilename)
    myhardlock = hardlock_name(lockfilename)

    # Since Python 3.4, chown requires int type (no proxies).
    portage_gid = int(portage.data.portage_gid)

    # myhardlock must not exist prior to our link() call, and we can
    # safely unlink it since its file name is unique to our PID
    try:
        os.unlink(myhardlock)
    except OSError as e:
        if e.errno in (errno.ENOENT, errno.ESTALE):
            pass
        else:
            func_call = "unlink('%s')" % myhardlock
            if e.errno == OperationNotPermitted.errno:
                raise OperationNotPermitted(func_call)
            elif e.errno == PermissionDenied.errno:
                raise PermissionDenied(func_call)
            else:
                raise

    while True:
        # create lockfilename if it doesn't exist yet
        try:
            myfd = os.open(lockfilename, os.O_CREAT | os.O_RDWR, 0o660)
        except OSError as e:
            func_call = "open('%s')" % lockfilename
            if e.errno == OperationNotPermitted.errno:
                raise OperationNotPermitted(func_call)
            elif e.errno == PermissionDenied.errno:
                raise PermissionDenied(func_call)
            elif e.errno == ReadOnlyFileSystem.errno:
                raise ReadOnlyFileSystem(func_call)
            else:
                raise
        else:
            myfd_st = None
            try:
                myfd_st = os.fstat(myfd)
                if not preexisting:
                    # Don't chown the file if it is preexisting, since we
                    # want to preserve existing permissions in that case.
                    if portage.data.secpass >= 1 and myfd_st.st_gid != portage_gid:
                        os.fchown(myfd, -1, portage_gid)
            except OSError as e:
                if e.errno not in (errno.ENOENT, errno.ESTALE):
                    writemsg("%s: fchown('%s', -1, %d)\n" % \
                     (e, lockfilename, portage_gid), noiselevel=-1)
                    writemsg(_("Cannot chown a lockfile: '%s'\n") % \
                     lockfilename, noiselevel=-1)
                    writemsg(_("Group IDs of current user: %s\n") % \
                     " ".join(str(n) for n in os.getgroups()),
                     noiselevel=-1)
                else:
                    # another process has removed the file, so we'll have
                    # to create it again
                    continue
            finally:
                os.close(myfd)

            # If fstat shows more than one hardlink, then it's extremely
            # unlikely that the following link call will result in a lock,
            # so optimize away the wasteful link call and sleep or raise
            # TryAgain.
            if myfd_st is not None and myfd_st.st_nlink < 2:
                try:
                    os.link(lockfilename, myhardlock)
                except OSError as e:
                    func_call = "link('%s', '%s')" % (lockfilename, myhardlock)
                    if e.errno == OperationNotPermitted.errno:
                        raise OperationNotPermitted(func_call)
                    elif e.errno == PermissionDenied.errno:
                        raise PermissionDenied(func_call)
                    elif e.errno in (errno.ESTALE, errno.ENOENT):
                        # another process has removed the file, so we'll have
                        # to create it again
                        continue
                    else:
                        raise
                else:
                    if hardlink_is_mine(myhardlock, lockfilename):
                        if out is not None:
                            out.eend(os.EX_OK)
                        break

                    try:
                        os.unlink(myhardlock)
                    except OSError as e:
                        # This should not happen, since the file name of
                        # myhardlock is unique to our host and PID,
                        # and the above link() call succeeded.
                        if e.errno not in (errno.ENOENT, errno.ESTALE):
                            raise
                        raise FileNotFound(myhardlock)

        if flags & os.O_NONBLOCK:
            raise TryAgain(lockfilename)

        if out is None and not _quiet:
            out = portage.output.EOutput()
        if out is not None and not displayed_waiting_msg:
            displayed_waiting_msg = True
            if waiting_msg is None:
                waiting_msg = _("waiting for lock on %s\n") % lockfilename
            out.ebegin(waiting_msg)

        time.sleep(_HARDLINK_POLL_LATENCY)

    return True
Beispiel #60
0
 def _killed_by_signal(self, signum):
     msg = _("The ebuild phase '%s' has been "
             "killed by signal %s.") % (self.phase, signum)
     self._eerror(textwrap.wrap(msg, 72))