Ejemplo n.º 1
0
	def updateprotect(self):
		"""Update internal state for isprotected() calls.  Nonexistent paths
		are ignored."""

		os = _os_merge

		self.protect = []
		self._dirs = set()
		for x in self.protect_list:
			ppath = normalize_path(
				os.path.join(self.myroot, x.lstrip(os.path.sep)))
			try:
				if stat.S_ISDIR(os.stat(ppath).st_mode):
					self._dirs.add(ppath)
				self.protect.append(ppath)
			except OSError:
				# If it doesn't exist, there's no need to protect it.
				pass

		self.protectmask = []
		for x in self.mask_list:
			ppath = normalize_path(
				os.path.join(self.myroot, x.lstrip(os.path.sep)))
			try:
				"""Use lstat so that anything, even a broken symlink can be
				protected."""
				if stat.S_ISDIR(os.lstat(ppath).st_mode):
					self._dirs.add(ppath)
				self.protectmask.append(ppath)
				"""Now use stat in case this is a symlink to a directory."""
				if stat.S_ISDIR(os.stat(ppath).st_mode):
					self._dirs.add(ppath)
			except OSError:
				# If it doesn't exist, there's no need to mask it.
				pass
Ejemplo n.º 2
0
	def _same_device(path1, path2):
		try:
			st1 = os.stat(path1)
			st2 = os.stat(path2)
		except OSError:
			return False
		else:
			return st1.st_dev == st2.st_dev
Ejemplo n.º 3
0
def hardlink_is_mine(link, lock):
    try:
        lock_st = os.stat(lock)
        if lock_st.st_nlink == 2:
            link_st = os.stat(link)
            return lock_st.st_ino == link_st.st_ino and lock_st.st_dev == link_st.st_dev
    except OSError:
        pass
    return False
Ejemplo n.º 4
0
def exists_raise_eaccess(path):
	try:
		os.stat(path)
	except OSError as e:
		if e.errno == PermissionDenied.errno:
			raise PermissionDenied("stat('%s')" % path)
		return False
	else:
		return True
Ejemplo n.º 5
0
def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
	mypath = normalize_path(my_original_path)
	try:
		pathstat = os.stat(mypath)
		if not stat.S_ISDIR(pathstat.st_mode):
			raise DirectoryNotFound(mypath)
	except EnvironmentError as e:
		if e.errno == PermissionDenied.errno:
			raise PermissionDenied(mypath)
		del e
		return [], []
	except PortageException:
		return [], []
	else:
		try:
			fpaths = os.listdir(mypath)
		except EnvironmentError as e:
			if e.errno != errno.EACCES:
				raise
			del e
			raise PermissionDenied(mypath)
		ftype = []
		for x in fpaths:
			try:
				if followSymlinks:
					pathstat = os.stat(mypath+"/"+x)
				else:
					pathstat = os.lstat(mypath+"/"+x)

				if stat.S_ISREG(pathstat[stat.ST_MODE]):
					ftype.append(0)
				elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
					ftype.append(1)
				elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
					ftype.append(2)
				else:
					ftype.append(3)
			except (IOError, OSError):
				ftype.append(3)

	if ignorelist or ignorecvs:
		ret_list = []
		ret_ftype = []
		for file_path, file_type in zip(fpaths, ftype):
			if file_path in ignorelist:
				pass
			elif ignorecvs:
				if file_path[:2] != ".#" and \
					not (file_type == 1 and file_path in VCS_DIRS):
					ret_list.append(file_path)
					ret_ftype.append(file_type)
	else:
		ret_list = fpaths
		ret_ftype = ftype

	return ret_list, ret_ftype
Ejemplo n.º 6
0
	def getfetchsizes(self, mypkg, useflags=None, debug=0, myrepo=None):
		# returns a filename:size dictionnary of remaining downloads
		myebuild, mytree = self.findname2(mypkg, myrepo=myrepo)
		if myebuild is None:
			raise AssertionError(_("ebuild not found for '%s'") % mypkg)
		pkgdir = os.path.dirname(myebuild)
		mf = self.repositories.get_repo_for_location(
			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
				pkgdir, self.settings["DISTDIR"])
		checksums = mf.getDigests()
		if not checksums:
			if debug: 
				writemsg(_("[empty/missing/bad digest]: %s\n") % (mypkg,))
			return {}
		filesdict={}
		myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
		#XXX: maybe this should be improved: take partial downloads
		# into account? check checksums?
		for myfile in myfiles:
			try:
				fetch_size = int(checksums[myfile]["size"])
			except (KeyError, ValueError):
				if debug:
					writemsg(_("[bad digest]: missing %(file)s for %(pkg)s\n") % {"file":myfile, "pkg":mypkg})
				continue
			file_path = os.path.join(self.settings["DISTDIR"], myfile)
			mystat = None
			try:
				mystat = os.stat(file_path)
			except OSError:
				pass
			if mystat is None:
				existing_size = 0
				ro_distdirs = self.settings.get("PORTAGE_RO_DISTDIRS")
				if ro_distdirs is not None:
					for x in shlex_split(ro_distdirs):
						try:
							mystat = os.stat(os.path.join(x, myfile))
						except OSError:
							pass
						else:
							if mystat.st_size == fetch_size:
								existing_size = fetch_size
								break
			else:
				existing_size = mystat.st_size
			remaining_size = fetch_size - existing_size
			if remaining_size > 0:
				# Assume the download is resumable.
				filesdict[myfile] = remaining_size
			elif remaining_size < 0:
				# The existing file is too large and therefore corrupt.
				filesdict[myfile] = int(checksums[myfile]["size"])
		return filesdict
Ejemplo n.º 7
0
	def load(self):
		atoms = []
		nonatoms = []
		atoms_changed = False
		# load atoms and non-atoms from different files so the worldfile is 
		# backwards-compatible with older versions and other PMs, even though 
		# it's supposed to be private state data :/
		try:
			mtime = os.stat(self._filename).st_mtime
		except (OSError, IOError):
			mtime = None
		if (not self._loaded or self._mtime != mtime):
			try:
				data, errors = self.loader.load()
				for fname in errors:
					for e in errors[fname]:
						self.errors.append(fname+": "+e)
			except EnvironmentError as e:
				if e.errno != errno.ENOENT:
					raise
				del e
				data = {}
			atoms = list(data)
			self._mtime = mtime
			atoms_changed = True
		else:
			atoms.extend(self._atoms)
		try:
			mtime = os.stat(self._filename2).st_mtime
		except (OSError, IOError):
			mtime = None
		if (not self._loaded or self._mtime2 != mtime):
			try:
				data, errors = self.loader2.load()
				for fname in errors:
					for e in errors[fname]:
						self.errors.append(fname+": "+e)
			except EnvironmentError as e:
				if e.errno != errno.ENOENT:
					raise
				del e
				data = {}
			nonatoms = list(data)
			self._mtime2 = mtime
			atoms_changed = True
		else:
			nonatoms.extend(self._nonatoms)
		if atoms_changed:
			self._setAtoms(atoms+nonatoms)
Ejemplo n.º 8
0
def _ensure_log_subdirs(logdir, subdir):
	"""
	This assumes that logdir exists, and creates subdirectories down
	to subdir as necessary. The gid of logdir is copied to all
	subdirectories, along with 0x2070 mode bits if present. Both logdir
	and subdir are assumed to be normalized absolute paths.
	"""
	st = os.stat(logdir)
	uid = -1
	gid = st.st_gid
	grp_mode = 0o2070 & st.st_mode

	# If logdir is writable by the portage group but its uid
	# is not portage_uid, then set the uid to portage_uid if
	# we have privileges to do so, for compatibility with our
	# default logrotate config (see bug 378451). With the
	# "su portage portage" directive and logrotate-3.8.0,
	# logrotate's chown call during the compression phase will
	# only succeed if the log file's uid is portage_uid.
	if grp_mode and gid == portage_gid and \
		portage.data.secpass >= 2:
		uid = portage_uid
		if st.st_uid != portage_uid:
			ensure_dirs(logdir, uid=uid)

	logdir_split_len = len(logdir.split(os.sep))
	subdir_split = subdir.split(os.sep)[logdir_split_len:]
	subdir_split.reverse()
	current = logdir
	while subdir_split:
		current = os.path.join(current, subdir_split.pop())
		ensure_dirs(current, uid=uid, gid=gid, mode=grp_mode, mask=0)
Ejemplo n.º 9
0
 def load(self):
     try:
         mtime = os.stat(self._filename).st_mtime
     except (OSError, IOError):
         mtime = None
     if not self._loaded or self._mtime != mtime:
         try:
             data, errors = self.loader.load()
             for fname in errors:
                 for e in errors[fname]:
                     self.errors.append(fname + ": " + e)
         except EnvironmentError as e:
             if e.errno != errno.ENOENT:
                 raise
             del e
             data = {}
         if self.greedy:
             atoms = []
             for a in data:
                 matches = self.dbapi.match(a)
                 for cpv in matches:
                     atoms.append("%s:%s" % (cpv_getkey(cpv), self.dbapi.aux_get(cpv, ["SLOT"])[0]))
                     # In addition to any installed slots, also try to pull
                     # in the latest new slot that may be available.
                 atoms.append(a)
         else:
             atoms = iter(data)
         self._setAtoms(atoms)
         self._mtime = mtime
Ejemplo n.º 10
0
def have_ebuild_dir(path, maxdepth=3):
	""" 
	Try to figure out if 'path' or a subdirectory contains one or more
	ebuild files named appropriately for their parent directory.
	"""
	stack = [(normalize_path(path), 1)]
	while stack:
		path, depth = stack.pop()
		basename = os.path.basename(path)
		try:
			listdir = os.listdir(path)
		except OSError:
			continue
		for filename in listdir:
			abs_filename = os.path.join(path, filename)
			try:
				st = os.stat(abs_filename)
			except OSError:
				continue
			if stat.S_ISDIR(st.st_mode):
				if depth < maxdepth:
					stack.append((abs_filename, depth + 1))
			elif stat.S_ISREG(st.st_mode):
				if filename.endswith(".ebuild") and \
					filename.startswith(basename + "-"):
					return os.path.dirname(os.path.dirname(path))
Ejemplo n.º 11
0
def grab_updates(updpath, prev_mtimes=None):
	"""Returns all the updates from the given directory as a sorted list of
	tuples, each containing (file_path, statobj, content).  If prev_mtimes is
	given then only updates with differing mtimes are considered."""
	try:
		mylist = os.listdir(updpath)
	except OSError as oe:
		if oe.errno == errno.ENOENT:
			raise DirectoryNotFound(updpath)
		raise
	if prev_mtimes is None:
		prev_mtimes = {}
	# validate the file name (filter out CVS directory, etc...)
	mylist = [myfile for myfile in mylist if len(myfile) == 7 and myfile[1:3] == "Q-"]
	if len(mylist) == 0:
		return []
	
	# update names are mangled to make them sort properly
	mylist = [myfile[3:]+"-"+myfile[:2] for myfile in mylist]
	mylist.sort()
	mylist = [myfile[5:]+"-"+myfile[:4] for myfile in mylist]

	update_data = []
	for myfile in mylist:
		file_path = os.path.join(updpath, myfile)
		mystat = os.stat(file_path)
		if file_path not in prev_mtimes or \
		long(prev_mtimes[file_path]) != mystat[stat.ST_MTIME]:
			content = codecs.open(_unicode_encode(file_path,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['repo.content'], errors='replace'
				).read()
			update_data.append((file_path, mystat, content))
	return update_data
Ejemplo n.º 12
0
	def _apply_max_mtime(self, existing_st, entries):
		"""
		Set the Manifest mtime to the max mtime of all relevant files
		(the existing Manifest mtime is included in order to account for
		eclass modifications that change DIST entries). This results in a
		stable/predictable mtime, which is useful when converting thin
		manifests to thick manifests for distribution via rsync. For
		portability, the mtime is set with 1 second resolution.

		@param existing_st: stat result for existing Manifest
		@type existing_st: posix.stat_result
		@param entries: list of current Manifest2Entry instances
		@type entries: list
		"""
		# Use stat_result[stat.ST_MTIME] for 1 second resolution, since
		# it always rounds down. Note that stat_result.st_mtime will round
		# up from 0.999999999 to 1.0 when precision is lost during conversion
		# from nanosecond resolution to float.
		max_mtime = None if existing_st is None else existing_st[stat.ST_MTIME]
		for entry in entries:
			if entry.type == 'DIST':
				continue
			abs_path = (os.path.join(self.pkgdir, 'files', entry.name) if
				entry.type == 'AUX' else os.path.join(self.pkgdir, entry.name))
			mtime = os.stat(abs_path)[stat.ST_MTIME]
			if max_mtime is None or mtime > max_mtime:
				max_mtime = mtime

		if max_mtime is not None:
			os.utime(self.getFullname(), (max_mtime, max_mtime))
Ejemplo n.º 13
0
def _check_distfile(filename, digests, eout, show_errors=1):
	"""
	@return a tuple of (match, stat_obj) where match is True if filename
	matches all given digests (if any) and stat_obj is a stat result, or
	None if the file does not exist.
	"""
	if digests is None:
		digests = {}
	size = digests.get("size")
	if size is not None and len(digests) == 1:
		digests = None

	try:
		st = os.stat(filename)
	except OSError:
		return (False, None)
	if size is not None and size != st.st_size:
		return (False, st)
	if not digests:
		if size is not None:
			eout.ebegin(_("%s size ;-)") % os.path.basename(filename))
			eout.eend(0)
		elif st.st_size == 0:
			# Zero-byte distfiles are always invalid.
			return (False, st)
	else:
		if _check_digests(filename, digests, show_errors=show_errors):
			eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
				" ".join(sorted(digests))))
			eout.eend(0)
		else:
			return (False, st)
	return (True, st)
Ejemplo n.º 14
0
	def sync_timestamp(self):
			# If possible, update the mtime to match the remote package if
			# the fetcher didn't already do it automatically.
			bintree = self.pkg.root_config.trees["bintree"]
			if bintree._remote_has_index:
				remote_mtime = bintree._remotepkgs[
					bintree.dbapi._instance_key(
					self.pkg.cpv)].get("_mtime_")
				if remote_mtime is not None:
					try:
						remote_mtime = long(remote_mtime)
					except ValueError:
						pass
					else:
						try:
							local_mtime = os.stat(self.pkg_path)[stat.ST_MTIME]
						except OSError:
							pass
						else:
							if remote_mtime != local_mtime:
								try:
									os.utime(self.pkg_path,
										(remote_mtime, remote_mtime))
								except OSError:
									pass
Ejemplo n.º 15
0
	def _pkgindex_entry(self, cpv):
		"""
		Performs checksums and evaluates USE flag conditionals.
		Raises InvalidDependString if necessary.
		@rtype: dict
		@return: a dict containing entry for the give cpv.
		"""

		pkg_path = self.getname(cpv)

		d = dict(zip(self._pkgindex_aux_keys,
			self.dbapi.aux_get(cpv, self._pkgindex_aux_keys)))

		d.update(perform_multiple_checksums(
			pkg_path, hashes=self._pkgindex_hashes))

		d["CPV"] = cpv
		st = os.stat(pkg_path)
		d["MTIME"] = str(st[stat.ST_MTIME])
		d["SIZE"] = str(st.st_size)

		rel_path = self._pkg_paths[cpv]
		# record location if it's non-default
		if rel_path != cpv + ".tbz2":
			d["PATH"] = rel_path

		self._eval_use_flags(cpv, d)
		return d
Ejemplo n.º 16
0
	def load(self):
		atoms_changed = False
		try:
			mtime = os.stat(self._filename).st_mtime
		except (OSError, IOError):
			mtime = None
		if (not self._loaded or self._mtime != mtime):
			try:
				data, errors = self.loader.load()
				for fname in errors:
					for e in errors[fname]:
						self.errors.append(fname+": "+e)
			except EnvironmentError as e:
				if e.errno != errno.ENOENT:
					raise
				del e
				data = {}
			nonatoms = list(data)
			self._mtime = mtime
			atoms_changed = True
		else:
			nonatoms = list(self._nonatoms)

		if atoms_changed:
			self._setAtoms(nonatoms)
Ejemplo n.º 17
0
	def close(self):
		"""Closes the temporary file, copies permissions (if possible),
		and performs the atomic replacement via os.rename().  If the abort()
		method has been called, then the temp file is closed and removed."""
		f = object.__getattribute__(self, '_file')
		real_name = object.__getattribute__(self, '_real_name')
		if not f.closed:
			try:
				f.close()
				if not object.__getattribute__(self, '_aborted'):
					try:
						apply_stat_permissions(f.name, os.stat(real_name))
					except OperationNotPermitted:
						pass
					except FileNotFound:
						pass
					except OSError as oe: # from the above os.stat call
						if oe.errno in (errno.ENOENT, errno.EPERM):
							pass
						else:
							raise
					os.rename(f.name, real_name)
			finally:
				# Make sure we cleanup the temp file
				# even if an exception is raised.
				try:
					os.unlink(f.name)
				except OSError as oe:
					pass
Ejemplo n.º 18
0
def RecursiveFileLoader(filename):
    """
	If filename is of type file, return a generate that yields filename
	else if filename is of type directory, return a generator that fields
	files in that directory.
	
	Ignore files beginning with . or ending in ~.
	Prune CVS directories.

	@param filename: name of a file/directory to traverse
	@rtype: list
	@returns: List of files to process
	"""

    try:
        st = os.stat(filename)
    except OSError:
        return
    if stat.S_ISDIR(st.st_mode):
        for root, dirs, files in os.walk(filename):
            for d in list(dirs):
                if d[:1] == "." or d == "CVS":
                    dirs.remove(d)
            for f in files:
                try:
                    f = _unicode_decode(f, encoding=_encodings["fs"], errors="strict")
                except UnicodeDecodeError:
                    continue
                if f[:1] == "." or f[-1:] == "~":
                    continue
                yield os.path.join(root, f)
    else:
        yield filename
Ejemplo n.º 19
0
	def _set_returncode(self, wait_retval):
		SpawnProcess._set_returncode(self, wait_retval)
		if self.returncode == os.EX_OK:
			# If possible, update the mtime to match the remote package if
			# the fetcher didn't already do it automatically.
			bintree = self.pkg.root_config.trees["bintree"]
			if bintree._remote_has_index:
				remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
				if remote_mtime is not None:
					try:
						remote_mtime = long(remote_mtime)
					except ValueError:
						pass
					else:
						try:
							local_mtime = os.stat(self.pkg_path)[stat.ST_MTIME]
						except OSError:
							pass
						else:
							if remote_mtime != local_mtime:
								try:
									os.utime(self.pkg_path,
										(remote_mtime, remote_mtime))
								except OSError:
									pass

		if self.locked:
			self.unlock()
Ejemplo n.º 20
0
	def __getattr__(self, attr):
		if attr == 'mtime':
			# use stat.ST_MTIME; accessing .st_mtime gets you a float
			# depending on the python version, and long(float) introduces
			# some rounding issues that aren't present for people using
			# the straight c api.
			# thus use the defacto python compatibility work around;
			# access via index, which guarantees you get the raw long.
			try:
				self.mtime = obj = os.stat(self.location)[stat.ST_MTIME]
			except OSError as e:
				if e.errno in (errno.ENOENT, errno.ESTALE):
					raise FileNotFound(self.location)
				elif e.errno == PermissionDenied.errno:
					raise PermissionDenied(self.location)
				raise
			return obj
		if not attr.islower():
			# we don't care to allow .mD5 as an alias for .md5
			raise AttributeError(attr)
		hashname = attr.upper()
		if hashname not in checksum.hashfunc_map:
			raise AttributeError(attr)
		val = checksum.perform_checksum(self.location, hashname)[0]
		setattr(self, attr, val)
		return val
Ejemplo n.º 21
0
	def _fetch_fs(self, mirror_info):
		file_path = os.path.join(mirror_info.location, self.distfile)

		st = None
		size_ok = False
		try:
			st = os.stat(file_path)
		except OSError as e:
			if e.errno not in (errno.ENOENT, errno.ESTALE):
				msg = "%s stat failed in %s: %s" % \
					(self.distfile, mirror_info.name, e)
				self.scheduler.output(msg + '\n', background=True,
					log_path=self._log_path)
				logging.error(msg)
		else:
			size_ok = st.st_size == self.digests["size"]
			self._current_stat = st

		if size_ok:
			self._current_mirror = mirror_info
			self._start_task(
				FileDigester(file_path=file_path,
					hash_names=(self._select_hash(),),
					background=self.background,
					logfile=self._log_path),
				self._fs_mirror_digester_exit)
		else:
			self._try_next_mirror()
Ejemplo n.º 22
0
	def _migrate(self, update_location):
		"""
		When repo.user_location is a normal directory, migrate it to
		storage so that it can be replaced with a symlink. After migration,
		commit the content as the latest snapshot.
		"""
		try:
			os.rename(self._user_location, update_location)
		except OSError:
			portage.util.ensure_dirs(update_location)
			portage.util.apply_stat_permissions(update_location,
				os.stat(self._user_location))
			# It's probably on a different device, so copy it.
			yield self._check_call(['rsync', '-a',
				self._user_location + '/', update_location + '/'])

			# Remove the old copy so that symlink can be created. Run with
			# maximum privileges, since removal requires write access to
			# the parent directory.
			yield self._check_call(['rm', '-rf', user_location], privileged=True)

		self._update_location = update_location

		# Make this copy the latest snapshot
		yield self.commit_update()
Ejemplo n.º 23
0
def verify_all(filename, mydict, calc_prelink=0, strict=0):
    """
	Verify all checksums against a file.

	@param filename: File to run the checksums against
	@type filename: String
	@param calc_prelink: Whether or not to reverse prelink before running the checksum
	@type calc_prelink: Integer
	@param strict: Enable/Disable strict checking (which stops exactly at a checksum failure and throws an exception)
	@type strict: Integer
	@rtype: Tuple
	@return: Result of the checks and possible message:
		1) If size fails, False, and a tuple containing a message, the given size, and the actual size
		2) If there is an os error, False, and a tuple containing the system error followed by 2 nulls
		3) If a checksum fails, False and a tuple containing a message, the given hash, and the actual hash
		4) If all checks succeed, return True and a fake reason
	"""
    # Dict relates to single file only.
    # returns: (passed,reason)
    file_is_ok = True
    reason = "Reason unknown"
    try:
        mysize = os.stat(filename)[stat.ST_SIZE]
        if mydict["size"] != mysize:
            return False, (_("Filesize does not match recorded size"), mysize, mydict["size"])
    except OSError as e:
        if e.errno == errno.ENOENT:
            raise portage.exception.FileNotFound(filename)
        return False, (str(e), None, None)

    verifiable_hash_types = set(mydict).intersection(hashfunc_map)
    verifiable_hash_types.discard("size")
    if not verifiable_hash_types:
        expected = set(hashfunc_map)
        expected.discard("size")
        expected = list(expected)
        expected.sort()
        expected = " ".join(expected)
        got = set(mydict)
        got.discard("size")
        got = list(got)
        got.sort()
        got = " ".join(got)
        return False, (_("Insufficient data for checksum verification"), got, expected)

    for x in sorted(mydict):
        if x == "size":
            continue
        elif x in hashfunc_map:
            myhash = perform_checksum(filename, x, calc_prelink=calc_prelink)[0]
            if mydict[x] != myhash:
                if strict:
                    raise portage.exception.DigestException(
                        ("Failed to verify '$(file)s' on " + "checksum type '%(type)s'") % {"file": filename, "type": x}
                    )
                else:
                    file_is_ok = False
                    reason = (("Failed on %s verification" % x), myhash, mydict[x])
                    break
    return file_is_ok, reason
Ejemplo n.º 24
0
def process(mysettings, key, logentries, fulltext):
	if mysettings.get("PORT_LOGDIR"):
		logdir = normalize_path(mysettings["PORT_LOGDIR"])
	else:
		logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep),
			"var", "log", "portage")

	if not os.path.isdir(logdir):
		# Only initialize group/mode if the directory doesn't
		# exist, so that we don't override permissions if they
		# were previously set by the administrator.
		# NOTE: These permissions should be compatible with our
		# default logrotate config as discussed in bug 374287.
		logdir_uid = -1
		if portage.data.secpass >= 2:
			logdir_uid = portage_uid
		ensure_dirs(logdir, uid=logdir_uid, gid=portage_gid, mode=0o2770)

	elogdir = os.path.join(logdir, "elog")
	_ensure_log_subdirs(logdir, elogdir)

	# TODO: Locking
	elogfilename = elogdir+"/summary.log"
	elogfile = io.open(_unicode_encode(elogfilename,
		encoding=_encodings['fs'], errors='strict'),
		mode='a', encoding=_encodings['content'], errors='backslashreplace')

	# Copy group permission bits from parent directory.
	elogdir_st = os.stat(elogdir)
	elogdir_gid = elogdir_st.st_gid
	elogdir_grp_mode = 0o060 & elogdir_st.st_mode

	# Copy the uid from the parent directory if we have privileges
	# to do so, for compatibility with our default logrotate
	# config (see bug 378451). With the "su portage portage"
	# directive and logrotate-3.8.0, logrotate's chown call during
	# the compression phase will only succeed if the log file's uid
	# is portage_uid.
	logfile_uid = -1
	if portage.data.secpass >= 2:
		logfile_uid = elogdir_st.st_uid
	apply_permissions(elogfilename, uid=logfile_uid, gid=elogdir_gid,
		mode=elogdir_grp_mode, mask=0)

	time_str = time.strftime("%Y-%m-%d %H:%M:%S %Z",
		time.localtime(time.time()))
	# Avoid potential UnicodeDecodeError later.
	time_str = _unicode_decode(time_str,
		encoding=_encodings['content'], errors='replace')
	elogfile.write(_unicode_decode(
		_(">>> Messages generated by process " +
		"%(pid)d on %(time)s for package %(pkg)s:\n\n") %
		{"pid": os.getpid(), "time": time_str, "pkg": key}))
	elogfile.write(_unicode_decode(fulltext))
	elogfile.write(_unicode_decode("\n"))
	elogfile.close()

	return elogfilename
Ejemplo n.º 25
0
def process(mysettings, key, logentries, fulltext):

	if mysettings.get("PORT_LOGDIR"):
		logdir = normalize_path(mysettings["PORT_LOGDIR"])
	else:
		logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep),
			"var", "log", "portage")

	if not os.path.isdir(logdir):
		# Only initialize group/mode if the directory doesn't
		# exist, so that we don't override permissions if they
		# were previously set by the administrator.
		# NOTE: These permissions should be compatible with our
		# default logrotate config as discussed in bug 374287.
		uid = -1
		if portage.data.secpass >= 2:
			uid = portage_uid
		ensure_dirs(logdir, uid=uid, gid=portage_gid, mode=0o2770)

	cat = mysettings['CATEGORY']
	pf = mysettings['PF']

	elogfilename = pf + ":" + _unicode_decode(
		time.strftime("%Y%m%d-%H%M%S", time.gmtime(time.time())),
		encoding=_encodings['content'], errors='replace') + ".log"

	if "split-elog" in mysettings.features:
		log_subdir = os.path.join(logdir, "elog", cat)
		elogfilename = os.path.join(log_subdir, elogfilename)
	else:
		log_subdir = os.path.join(logdir, "elog")
		elogfilename = os.path.join(log_subdir, cat + ':' + elogfilename)
	_ensure_log_subdirs(logdir, log_subdir)

	elogfile = io.open(_unicode_encode(elogfilename,
		encoding=_encodings['fs'], errors='strict'),
		mode='w', encoding=_encodings['content'], errors='backslashreplace')
	elogfile.write(_unicode_decode(fulltext))
	elogfile.close()

	# Copy group permission bits from parent directory.
	elogdir_st = os.stat(log_subdir)
	elogdir_gid = elogdir_st.st_gid
	elogdir_grp_mode = 0o060 & elogdir_st.st_mode

	# Copy the uid from the parent directory if we have privileges
	# to do so, for compatibility with our default logrotate
	# config (see bug 378451). With the "su portage portage"
	# directive and logrotate-3.8.0, logrotate's chown call during
	# the compression phase will only succeed if the log file's uid
	# is portage_uid.
	logfile_uid = -1
	if portage.data.secpass >= 2:
		logfile_uid = elogdir_st.st_uid
	apply_permissions(elogfilename, uid=logfile_uid, gid=elogdir_gid,
		mode=elogdir_grp_mode, mask=0)

	return elogfilename
Ejemplo n.º 26
0
    def _filter(self, atom):

        cpv = self._db.match(atom)[0]
        path = self._db.getpath(cpv, filename="COUNTER")
        age = (time.time() - os.stat(path).st_mtime) / (3600 * 24)
        if (self._mode == "older" and age <= self._age) or (self._mode == "newer" and age >= self._age):
            return False
        else:
            return True
Ejemplo n.º 27
0
def isdir_raise_eaccess(path):
	try:
		st = os.stat(path)
	except OSError as e:
		if e.errno == PermissionDenied.errno:
			raise PermissionDenied("stat('%s')" % path)
		return False
	else:
		return stat.S_ISDIR(st.st_mode)
Ejemplo n.º 28
0
	def _apply_max_mtime(self, preserved_stats, entries):
		"""
		Set the Manifest mtime to the max mtime of all relevant files
		and directories. Directory mtimes account for file renames and
		removals. The existing Manifest mtime accounts for eclass
		modifications that change DIST entries. This results in a
		stable/predictable mtime, which is useful when converting thin
		manifests to thick manifests for distribution via rsync. For
		portability, the mtime is set with 1 second resolution.

		@param preserved_stats: maps paths to preserved stat results
			that should be used instead of os.stat() calls
		@type preserved_stats: dict
		@param entries: list of current Manifest2Entry instances
		@type entries: list
		"""
		# Use stat_result[stat.ST_MTIME] for 1 second resolution, since
		# it always rounds down. Note that stat_result.st_mtime will round
		# up from 0.999999999 to 1.0 when precision is lost during conversion
		# from nanosecond resolution to float.
		max_mtime = None
		_update_max = (lambda st: max_mtime if max_mtime is not None
			and max_mtime > st[stat.ST_MTIME] else st[stat.ST_MTIME])
		_stat = (lambda path: preserved_stats[path] if path in preserved_stats
			else os.stat(path))

		for stat_result in preserved_stats.values():
			max_mtime = _update_max(stat_result)

		for entry in entries:
			if entry.type == 'DIST':
				continue
			abs_path = (os.path.join(self.pkgdir, 'files', entry.name) if
				entry.type == 'AUX' else os.path.join(self.pkgdir, entry.name))
			max_mtime = _update_max(_stat(abs_path))

		if not self.thin:
			# Account for changes to all relevant nested directories.
			# This is not necessary for thin manifests because
			# self.pkgdir is already included via preserved_stats.
			for parent_dir, dirs, files in os.walk(self.pkgdir.rstrip(os.sep)):
				try:
					parent_dir = _unicode_decode(parent_dir,
						encoding=_encodings['fs'], errors='strict')
				except UnicodeDecodeError:
					# If an absolute path cannot be decoded, then it is
					# always excluded from the manifest (repoman will
					# report such problems).
					pass
				else:
					max_mtime = _update_max(_stat(parent_dir))

		if max_mtime is not None:
			for path in preserved_stats:
				os.utime(path, (max_mtime, max_mtime))
Ejemplo n.º 29
0
def _checksum_failure_temp_file(settings, distdir, basename):
	"""
	First try to find a duplicate temp file with the same checksum and return
	that filename if available. Otherwise, use mkstemp to create a new unique
	filename._checksum_failure_.$RANDOM, rename the given file, and return the
	new filename. In any case, filename will be renamed or removed before this
	function returns a temp filename.
	"""

	filename = os.path.join(distdir, basename)
	if basename.endswith(_download_suffix):
		normal_basename = basename[:-len(_download_suffix)]
	else:
		normal_basename = basename
	size = os.stat(filename).st_size
	checksum = None
	tempfile_re = re.compile(re.escape(normal_basename) + r'\._checksum_failure_\..*')
	for temp_filename in os.listdir(distdir):
		if not tempfile_re.match(temp_filename):
			continue
		temp_filename = os.path.join(distdir, temp_filename)
		try:
			if size != os.stat(temp_filename).st_size:
				continue
		except OSError:
			continue
		try:
			temp_checksum = perform_md5(temp_filename)
		except FileNotFound:
			# Apparently the temp file disappeared. Let it go.
			continue
		if checksum is None:
			checksum = perform_md5(filename)
		if checksum == temp_checksum:
			os.unlink(filename)
			return temp_filename

	fd, temp_filename = \
		tempfile.mkstemp("", normal_basename + "._checksum_failure_.", distdir)
	os.close(fd)
	_movefile(filename, temp_filename, mysettings=settings)
	return temp_filename
Ejemplo n.º 30
0
def find_updated_config_files(target_root, config_protect):
	"""
	Return a tuple of configuration files that needs to be updated.
	The tuple contains lists organized like this:
	[ protected_dir, file_list ]
	If the protected config isn't a protected_dir but a procted_file, list is:
	[ protected_file, None ]
	If no configuration files needs to be updated, None is returned
	"""

	os = _os_merge

	if config_protect:
		# directories with some protect files in them
		for x in config_protect:
			files = []

			x = os.path.join(target_root, x.lstrip(os.path.sep))
			if not os.access(x, os.W_OK):
				continue
			try:
				mymode = os.lstat(x).st_mode
			except OSError:
				continue

			if stat.S_ISLNK(mymode):
				# We want to treat it like a directory if it
				# is a symlink to an existing directory.
				try:
					real_mode = os.stat(x).st_mode
					if stat.S_ISDIR(real_mode):
						mymode = real_mode
				except OSError:
					pass

			if stat.S_ISDIR(mymode):
				mycommand = \
					"find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
			else:
				mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
						os.path.split(x.rstrip(os.path.sep))
			mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
			a = subprocess_getstatusoutput(mycommand)

			if a[0] == 0:
				files = a[1].split('\0')
				# split always produces an empty string as the last element
				if files and not files[-1]:
					del files[-1]
				if files:
					if stat.S_ISDIR(mymode):
						yield (x, files)
					else:
						yield (x, None)
Ejemplo n.º 31
0
    def pre_sync(self, repo):
        self.settings, self.trees, self.mtimedb = self.emerge_config
        self.xterm_titles = "notitles" not in self.settings.features
        msg = ">>> Syncing repository '%s' into '%s'..." \
         % (repo.name, repo.location)
        self.logger(self.xterm_titles, msg)
        writemsg_level(msg + "\n")
        self.portdb = self.trees[self.settings['EROOT']]['porttree'].dbapi
        try:
            st = os.stat(repo.location)
        except OSError:
            st = None

        self.usersync_uid = None
        spawn_kwargs = {}
        spawn_kwargs["env"] = self.settings.environ()
        if repo.sync_user is not None:

            def get_sync_user_data(sync_user):
                user = None
                group = None
                home = None
                logname = None

                spl = sync_user.split(':', 1)
                if spl[0]:
                    username = spl[0]
                    try:
                        try:
                            pw = pwd.getpwnam(username)
                        except KeyError:
                            pw = pwd.getpwuid(int(username))
                    except (ValueError, KeyError):
                        writemsg("!!! User '%s' invalid or does not exist\n" %
                                 username,
                                 noiselevel=-1)
                        return (logname, user, group, home)
                    user = pw.pw_uid
                    group = pw.pw_gid
                    home = pw.pw_dir
                    logname = pw.pw_name

                if len(spl) > 1:
                    groupname = spl[1]
                    try:
                        try:
                            gp = grp.getgrnam(groupname)
                        except KeyError:
                            pw = grp.getgrgid(int(groupname))
                    except (ValueError, KeyError):
                        writemsg("!!! Group '%s' invalid or does not exist\n" %
                                 groupname,
                                 noiselevel=-1)
                        return (logname, user, group, home)

                    group = gp.gr_gid

                return (logname, user, group, home)

            # user or user:group
            (logname, uid, gid, home) = get_sync_user_data(repo.sync_user)
            if uid is not None:
                spawn_kwargs["uid"] = uid
                self.usersync_uid = uid
            if gid is not None:
                spawn_kwargs["gid"] = gid
                spawn_kwargs["groups"] = [gid]
            if home is not None:
                spawn_kwargs["env"]["HOME"] = home
            if logname is not None:
                spawn_kwargs["env"]["LOGNAME"] = logname

        if st is None:
            perms = {'mode': 0o755}
            # respect sync-user if set
            if 'umask' in spawn_kwargs:
                perms['mode'] &= ~spawn_kwargs['umask']
            if 'uid' in spawn_kwargs:
                perms['uid'] = spawn_kwargs['uid']
            if 'gid' in spawn_kwargs:
                perms['gid'] = spawn_kwargs['gid']

            portage.util.ensure_dirs(repo.location, **perms)
            st = os.stat(repo.location)

        if (repo.sync_user is None and 'usersync' in self.settings.features
                and portage.data.secpass >= 2
                and (st.st_uid != os.getuid() and st.st_mode & 0o700
                     or st.st_gid != os.getgid() and st.st_mode & 0o070)):
            try:
                pw = pwd.getpwuid(st.st_uid)
            except KeyError:
                pass
            else:
                # Drop privileges when syncing, in order to match
                # existing uid/gid settings.
                self.usersync_uid = st.st_uid
                spawn_kwargs["uid"] = st.st_uid
                spawn_kwargs["gid"] = st.st_gid
                spawn_kwargs["groups"] = [st.st_gid]
                spawn_kwargs["env"]["HOME"] = pw.pw_dir
                spawn_kwargs["env"]["LOGNAME"] = pw.pw_name
                umask = 0o002
                if not st.st_mode & 0o020:
                    umask = umask | 0o020
                spawn_kwargs["umask"] = umask
        # override the defaults when sync_umask is set
        if repo.sync_umask is not None:
            spawn_kwargs["umask"] = int(repo.sync_umask, 8)
        self.spawn_kwargs = spawn_kwargs

        if self.usersync_uid is not None:
            # PORTAGE_TMPDIR is used below, so validate it and
            # bail out if necessary.
            rval = _check_temp_dir(self.settings)
            if rval != os.EX_OK:
                return rval

        os.umask(0o022)
        return os.EX_OK
Ejemplo n.º 32
0
def _lockfile_iteration(mypath,
                        wantnewlockfile=False,
                        unlinkfile=False,
                        waiting_msg=None,
                        flags=0):
    """
	Acquire a lock on mypath, without retry. Return None if the lockfile
	was removed by previous lock holder (caller must retry).

	@param mypath: lock file path
	@type mypath: str
	@param wantnewlockfile: use a separate new lock file
	@type wantnewlockfile: bool
	@param unlinkfile: remove lock file prior to unlock
	@type unlinkfile: bool
	@param waiting_msg: message to show before blocking
	@type waiting_msg: str
	@param flags: lock flags (only supports os.O_NONBLOCK)
	@type flags: int
	@rtype: bool
	@return: unlockfile tuple on success, None if retry is needed
	"""
    if not mypath:
        raise InvalidData(_("Empty path given"))

    # Since Python 3.4, chown requires int type (no proxies).
    portage_gid = int(portage.data.portage_gid)

    # Support for file object or integer file descriptor parameters is
    # deprecated due to ambiguity in whether or not it's safe to close
    # the file descriptor, making it prone to "Bad file descriptor" errors
    # or file descriptor leaks.
    if isinstance(mypath, str) and mypath[-1] == '/':
        mypath = mypath[:-1]

    lockfilename_path = mypath
    if hasattr(mypath, 'fileno'):
        warnings.warn(
            "portage.locks.lockfile() support for "
            "file object parameters is deprecated. Use a file path instead.",
            DeprecationWarning,
            stacklevel=2)
        lockfilename_path = getattr(mypath, 'name', None)
        mypath = mypath.fileno()
    if isinstance(mypath, int):
        warnings.warn(
            "portage.locks.lockfile() support for integer file "
            "descriptor parameters is deprecated. Use a file path instead.",
            DeprecationWarning,
            stacklevel=2)
        lockfilename = mypath
        wantnewlockfile = 0
        unlinkfile = 0
    elif wantnewlockfile:
        base, tail = os.path.split(mypath)
        lockfilename = os.path.join(base, "." + tail + ".portage_lockfile")
        lockfilename_path = lockfilename
        unlinkfile = 1
    else:
        lockfilename = mypath

    if isinstance(mypath, str):
        if not os.path.exists(os.path.dirname(mypath)):
            raise DirectoryNotFound(os.path.dirname(mypath))
        preexisting = os.path.exists(lockfilename)
        old_mask = os.umask(000)
        try:
            while True:
                try:
                    myfd = os.open(lockfilename, os.O_CREAT | os.O_RDWR, 0o660)
                except OSError as e:
                    if e.errno in (errno.ENOENT,
                                   errno.ESTALE) and os.path.isdir(
                                       os.path.dirname(lockfilename)):
                        # Retry required for NFS (see bug 636798).
                        continue
                    else:
                        _raise_exc(e)
                else:
                    break

            if not preexisting:
                try:
                    if portage.data.secpass >= 1 and os.stat(
                            lockfilename).st_gid != portage_gid:
                        os.chown(lockfilename, -1, portage_gid)
                except OSError as e:
                    if e.errno in (errno.ENOENT, errno.ESTALE):
                        os.close(myfd)
                        return None
                    writemsg("%s: chown('%s', -1, %d)\n" % \
                     (e, lockfilename, portage_gid), noiselevel=-1)
                    writemsg(_("Cannot chown a lockfile: '%s'\n") % \
                     lockfilename, noiselevel=-1)
                    writemsg(_("Group IDs of current user: %s\n") % \
                     " ".join(str(n) for n in os.getgroups()),
                     noiselevel=-1)
        finally:
            os.umask(old_mask)

    elif isinstance(mypath, int):
        myfd = mypath

    else:
        raise ValueError(_("Unknown type passed in '%s': '%s'") % \
         (type(mypath), mypath))

    # try for a non-blocking lock, if it's held, throw a message
    # we're waiting on lockfile and use a blocking attempt.
    locking_method = portage._eintr_func_wrapper(_get_lock_fn())
    try:
        if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
            raise IOError(errno.ENOSYS, "Function not implemented")
        locking_method(myfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError as e:
        if not hasattr(e, "errno"):
            raise
        if e.errno in (errno.EACCES, errno.EAGAIN, errno.ENOLCK):
            # resource temp unavailable; eg, someone beat us to the lock.
            if flags & os.O_NONBLOCK:
                os.close(myfd)
                raise TryAgain(mypath)

            global _quiet
            if _quiet:
                out = None
            else:
                out = portage.output.EOutput()
            if waiting_msg is None:
                if isinstance(mypath, int):
                    waiting_msg = _("waiting for lock on fd %i") % myfd
                else:
                    waiting_msg = _("waiting for lock on %s") % lockfilename
            if out is not None:
                out.ebegin(waiting_msg)
            # try for the exclusive lock now.
            enolock_msg_shown = False
            while True:
                try:
                    locking_method(myfd, fcntl.LOCK_EX)
                except EnvironmentError as e:
                    if e.errno == errno.ENOLCK:
                        # This is known to occur on Solaris NFS (see
                        # bug #462694). Assume that the error is due
                        # to temporary exhaustion of record locks,
                        # and loop until one becomes available.
                        if not enolock_msg_shown:
                            enolock_msg_shown = True
                            if isinstance(mypath, int):
                                context_desc = _("Error while waiting "
                                                 "to lock fd %i") % myfd
                            else:
                                context_desc = _("Error while waiting "
                                                 "to lock '%s'") % lockfilename
                            writemsg("\n!!! %s: %s\n" % (context_desc, e),
                                     noiselevel=-1)

                        time.sleep(_HARDLINK_POLL_LATENCY)
                        continue

                    if out is not None:
                        out.eend(1, str(e))
                    raise
                else:
                    break

            if out is not None:
                out.eend(os.EX_OK)
        elif e.errno in (errno.ENOSYS, ):
            # We're not allowed to lock on this FS.
            if not isinstance(lockfilename, int):
                # If a file object was passed in, it's not safe
                # to close the file descriptor because it may
                # still be in use.
                os.close(myfd)
            lockfilename_path = _unicode_decode(lockfilename_path,
                                                encoding=_encodings['fs'],
                                                errors='strict')
            if not isinstance(lockfilename_path, str):
                raise
            link_success = hardlink_lockfile(lockfilename_path,
                                             waiting_msg=waiting_msg,
                                             flags=flags)
            if not link_success:
                raise
            lockfilename = lockfilename_path
            locking_method = None
            myfd = HARDLINK_FD
        else:
            raise

    fstat_result = None
    if isinstance(lockfilename, str) and myfd != HARDLINK_FD and unlinkfile:
        try:
            (removed, fstat_result) = _lockfile_was_removed(myfd, lockfilename)
        except Exception:
            # Do not leak the file descriptor here.
            os.close(myfd)
            raise
        else:
            if removed:
                # Removed by previous lock holder... Caller will retry...
                os.close(myfd)
                return None

    if myfd != HARDLINK_FD:
        _lock_manager(myfd,
                      os.fstat(myfd) if fstat_result is None else fstat_result,
                      mypath)

    writemsg(str((lockfilename, myfd, unlinkfile)) + "\n", 1)
    return (lockfilename, myfd, unlinkfile, locking_method)
Ejemplo n.º 33
0
def _lockfile_was_removed(lock_fd, lock_path):
    """
	Check if lock_fd still refers to a file located at lock_path, since
	the file may have been removed by a concurrent process that held the
	lock earlier. This implementation includes support for NFS, where
	stat is not reliable for removed files due to the default file
	attribute cache behavior ('ac' mount option).

	@param lock_fd: an open file descriptor for a lock file
	@type lock_fd: int
	@param lock_path: path of lock file
	@type lock_path: str
	@rtype: bool
	@return: a tuple of (removed, fstat_result), where removed is True if
		lock_path does not correspond to lock_fd, and False otherwise
	"""
    try:
        fstat_st = os.fstat(lock_fd)
    except OSError as e:
        if e.errno not in (errno.ENOENT, errno.ESTALE):
            _raise_exc(e)
        return (True, None)

    # Since stat is not reliable for removed files on NFS with the default
    # file attribute cache behavior ('ac' mount option), create a temporary
    # hardlink in order to prove that the file path exists on the NFS server.
    hardlink_path = hardlock_name(lock_path)
    try:
        os.unlink(hardlink_path)
    except OSError as e:
        if e.errno not in (errno.ENOENT, errno.ESTALE):
            _raise_exc(e)
    try:
        try:
            os.link(lock_path, hardlink_path)
        except OSError as e:
            if e.errno not in (errno.ENOENT, errno.ESTALE):
                _raise_exc(e)
            return (True, None)

        hardlink_stat = os.stat(hardlink_path)
        if hardlink_stat.st_ino != fstat_st.st_ino or hardlink_stat.st_dev != fstat_st.st_dev:
            # Create another hardlink in order to detect whether or not
            # hardlink inode numbers are expected to match. For example,
            # inode numbers are not expected to match for sshfs.
            inode_test = hardlink_path + '-inode-test'
            try:
                os.unlink(inode_test)
            except OSError as e:
                if e.errno not in (errno.ENOENT, errno.ESTALE):
                    _raise_exc(e)
            try:
                os.link(hardlink_path, inode_test)
            except OSError as e:
                if e.errno not in (errno.ENOENT, errno.ESTALE):
                    _raise_exc(e)
                return (True, None)
            else:
                if not os.path.samefile(hardlink_path, inode_test):
                    # This implies that inode numbers are not expected
                    # to match for this file system, so use a simple
                    # stat call to detect if lock_path has been removed.
                    return (not os.path.exists(lock_path), fstat_st)
            finally:
                try:
                    os.unlink(inode_test)
                except OSError as e:
                    if e.errno not in (errno.ENOENT, errno.ESTALE):
                        _raise_exc(e)
            return (True, None)
    finally:
        try:
            os.unlink(hardlink_path)
        except OSError as e:
            if e.errno not in (errno.ENOENT, errno.ESTALE):
                _raise_exc(e)
    return (False, fstat_st)
Ejemplo n.º 34
0
def emerge_main(args=None):
    """
    @param args: command arguments (default: sys.argv[1:])
    @type args: list
    """
    if args is None:
        args = sys.argv[1:]

    args = portage._decode_argv(args)

    # Use system locale.
    try:
        locale.setlocale(locale.LC_ALL, "")
    except locale.Error as e:
        writemsg_level("setlocale: %s\n" % e, level=logging.WARN)

    # Disable color until we're sure that it should be enabled (after
    # EMERGE_DEFAULT_OPTS has been parsed).
    portage.output.havecolor = 0

    # This first pass is just for options that need to be known as early as
    # possible, such as --config-root.  They will be parsed again later,
    # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
    # the value of --config-root).
    myaction, myopts, myfiles = parse_opts(args, silent=True)
    if "--debug" in myopts:
        os.environ["PORTAGE_DEBUG"] = "1"
    if "--config-root" in myopts:
        os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
    if "--sysroot" in myopts:
        os.environ["SYSROOT"] = myopts["--sysroot"]
    if "--root" in myopts:
        os.environ["ROOT"] = myopts["--root"]
    if "--prefix" in myopts:
        os.environ["EPREFIX"] = myopts["--prefix"]
    if "--accept-properties" in myopts:
        os.environ["ACCEPT_PROPERTIES"] = myopts["--accept-properties"]
    if "--accept-restrict" in myopts:
        os.environ["ACCEPT_RESTRICT"] = myopts["--accept-restrict"]

    # optimize --help (no need to load config / EMERGE_DEFAULT_OPTS)
    if myaction == "help":
        emerge_help()
        return os.EX_OK
    if myaction == "moo":
        print(COWSAY_MOO % platform.system())
        return os.EX_OK
    if myaction == "sync":
        # need to set this to True now in order for the repository config
        # loading to allow new repos with non-existent directories
        portage._sync_mode = True

    # Verify that /dev/null exists and is a device file as a cheap early
    # filter for obviously broken /dev/s.
    try:
        if os.stat(os.devnull).st_rdev == 0:
            writemsg_level(
                "Failed to validate a sane '/dev'.\n"
                "'/dev/null' is not a device file.\n",
                level=logging.ERROR,
                noiselevel=-1,
            )
            return 1
    except OSError:
        writemsg_level(
            "Failed to validate a sane '/dev'.\n"
            "'/dev/null' does not exist.\n",
            level=logging.ERROR,
            noiselevel=-1,
        )
        return 1

    # Verify that BASH process substitution works as another cheap early
    # filter. Process substitution uses '/dev/fd'.
    with open(os.devnull, "r+b") as dev_null:
        fd_pipes = {
            0: dev_null.fileno(),
            1: dev_null.fileno(),
            2: dev_null.fileno(),
        }
        if (portage.process.spawn_bash("[[ $(< <(echo foo) ) == foo ]]",
                                       fd_pipes=fd_pipes) != 0):
            writemsg_level(
                "Failed to validate a sane '/dev'.\n"
                "bash process substitution doesn't work; this may be an "
                "indication of a broken '/dev/fd'.\n",
                level=logging.ERROR,
                noiselevel=-1,
            )
            return 1

    # Portage needs to ensure a sane umask for the files it creates.
    os.umask(0o22)
    emerge_config = load_emerge_config(action=myaction,
                                       args=myfiles,
                                       opts=myopts)

    # Make locale variables from configuration files (make.defaults, make.conf) affect locale of emerge process.
    for locale_var_name in (
            "LANGUAGE",
            "LC_ALL",
            "LC_ADDRESS",
            "LC_COLLATE",
            "LC_CTYPE",
            "LC_IDENTIFICATION",
            "LC_MEASUREMENT",
            "LC_MESSAGES",
            "LC_MONETARY",
            "LC_NAME",
            "LC_NUMERIC",
            "LC_PAPER",
            "LC_TELEPHONE",
            "LC_TIME",
            "LANG",
    ):
        locale_var_value = emerge_config.running_config.settings.get(
            locale_var_name)
        if locale_var_value is not None:
            os.environ.setdefault(locale_var_name, locale_var_value)
    try:
        locale.setlocale(locale.LC_ALL, "")
    except locale.Error as e:
        writemsg_level("setlocale: %s\n" % e, level=logging.WARN)

    tmpcmdline = []
    if "--ignore-default-opts" not in myopts:
        tmpcmdline.extend(
            portage.util.shlex_split(
                emerge_config.target_config.settings.get(
                    "EMERGE_DEFAULT_OPTS", "")))
    tmpcmdline.extend(args)
    emerge_config.action, emerge_config.opts, emerge_config.args = parse_opts(
        tmpcmdline)

    try:
        return run_action(emerge_config)
    finally:
        # Call destructors for our portdbapi instances.
        for x in emerge_config.trees.values():
            if "porttree" in x.lazy_items:
                continue
            x["porttree"].dbapi.close_caches()
Ejemplo n.º 35
0
def UpdateChangeLog(pkgdir,
                    user,
                    msg,
                    skel_path,
                    category,
                    package,
                    new=(),
                    removed=(),
                    changed=(),
                    pretend=False,
                    quiet=False):
    """
	Write an entry to an existing ChangeLog, or create a new one.
	Updates copyright year on changed files, and updates the header of
	ChangeLog with the contents of skel.ChangeLog.
	"""

    if '<root@' in user:
        if not quiet:
            logging.critical('Please set ECHANGELOG_USER or run as non-root')
        return None

    # ChangeLog times are in UTC
    gmtime = time.gmtime()
    year = time.strftime('%Y', gmtime)
    date = time.strftime('%d %b %Y', gmtime)

    cl_path = os.path.join(pkgdir, 'ChangeLog')
    clold_lines = []
    clnew_lines = []
    old_header_lines = []
    header_lines = []

    clold_file = None
    try:
        clold_file = io.open(_unicode_encode(cl_path,
                                             encoding=_encodings['fs'],
                                             errors='strict'),
                             mode='r',
                             encoding=_encodings['repo.content'],
                             errors='replace')
    except EnvironmentError:
        pass

    f, clnew_path = tempfile.mkstemp()

    # construct correct header first
    try:
        if clold_file is not None:
            # retain header from old ChangeLog
            first_line = True
            for line in clold_file:
                line_strip = line.strip()
                if line_strip and line[:1] != "#":
                    clold_lines.append(line)
                    break
                # always make sure cat/pkg is up-to-date in case we are
                # moving packages around, or copied from another pkg, or ...
                if first_line:
                    if line.startswith('# ChangeLog for'):
                        line = '# ChangeLog for %s/%s\n' % (category, package)
                    first_line = False
                old_header_lines.append(line)
                header_lines.append(update_copyright_year(year, line))
                if not line_strip:
                    break

        clskel_file = None
        if not header_lines:
            # delay opening this until we find we need a header
            try:
                clskel_file = io.open(_unicode_encode(
                    skel_path, encoding=_encodings['fs'], errors='strict'),
                                      mode='r',
                                      encoding=_encodings['repo.content'],
                                      errors='replace')
            except EnvironmentError:
                pass

        if clskel_file is not None:
            # read skel.ChangeLog up to first empty line
            for line in clskel_file:
                line_strip = line.strip()
                if not line_strip:
                    break
                line = line.replace('<CATEGORY>', category)
                line = line.replace('<PACKAGE_NAME>', package)
                line = update_copyright_year(year, line)
                header_lines.append(line)
            header_lines.append('\n')
            clskel_file.close()

        # write new ChangeLog entry
        clnew_lines.extend(header_lines)
        newebuild = False
        for fn in new:
            if not fn.endswith('.ebuild'):
                continue
            ebuild = fn.split(os.sep)[-1][0:-7]
            clnew_lines.append('*%s (%s)\n' % (ebuild, date))
            newebuild = True
        if newebuild:
            clnew_lines.append('\n')
        trivial_files = ('ChangeLog', 'Manifest')
        display_new = ['+' + elem for elem in new if elem not in trivial_files]
        display_removed = ['-' + elem for elem in removed]
        display_changed = [
            elem for elem in changed if elem not in trivial_files
        ]
        if not (display_new or display_removed or display_changed):
            # If there's nothing else to display, show one of the
            # trivial files.
            for fn in trivial_files:
                if fn in new:
                    display_new = ['+' + fn]
                    break
                elif fn in changed:
                    display_changed = [fn]
                    break

        display_new.sort()
        display_removed.sort()
        display_changed.sort()

        mesg = '%s; %s %s:' % (date, user, ', '.join(
            chain(display_new, display_removed, display_changed)))
        for line in textwrap.wrap(mesg,
                                  80,
                                  initial_indent='  ',
                                  subsequent_indent='  ',
                                  break_on_hyphens=False):
            clnew_lines.append('%s\n' % line)
        for line in textwrap.wrap(msg,
                                  80,
                                  initial_indent='  ',
                                  subsequent_indent='  '):
            clnew_lines.append('%s\n' % line)
        # Don't append a trailing newline if the file is new.
        if clold_file is not None:
            clnew_lines.append('\n')

        f = io.open(f,
                    mode='w',
                    encoding=_encodings['repo.content'],
                    errors='backslashreplace')

        for line in clnew_lines:
            f.write(line)

        # append stuff from old ChangeLog
        if clold_file is not None:

            if clold_lines:
                # clold_lines may contain a saved non-header line
                # that we want to write first.
                # Also, append this line to clnew_lines so that the
                # unified_diff call doesn't show it as removed.
                for line in clold_lines:
                    f.write(line)
                    clnew_lines.append(line)

            else:
                # ensure that there is no more than one blank
                # line after our new entry
                for line in clold_file:
                    if line.strip():
                        f.write(line)
                        break

            # Now prepend old_header_lines to clold_lines, for use
            # in the unified_diff call below.
            clold_lines = old_header_lines + clold_lines

            # Trim any trailing newlines.
            lines = clold_file.readlines()
            clold_file.close()
            while lines and lines[-1] == '\n':
                del lines[-1]
            f.writelines(lines)
        f.close()

        # show diff
        if not quiet:
            for line in difflib.unified_diff(clold_lines,
                                             clnew_lines,
                                             fromfile=cl_path,
                                             tofile=cl_path,
                                             n=0):
                util.writemsg_stdout(line, noiselevel=-1)
            util.writemsg_stdout("\n", noiselevel=-1)

        if pretend:
            # remove what we've done
            os.remove(clnew_path)
        else:
            # rename to ChangeLog, and set permissions
            try:
                clold_stat = os.stat(cl_path)
            except OSError:
                clold_stat = None

            shutil.move(clnew_path, cl_path)

            if clold_stat is None:
                util.apply_permissions(cl_path, mode=0o644)
            else:
                util.apply_stat_permissions(cl_path, clold_stat)

        if clold_file is None:
            return True
        else:
            return False
    except IOError as e:
        err = 'Repoman is unable to create/write to Changelog.new file: %s' % (
            e, )
        logging.critical(err)
        # try to remove if possible
        try:
            os.remove(clnew_path)
        except OSError:
            pass
        return None
Ejemplo n.º 36
0
def process(mysettings, key, logentries, fulltext):
    if mysettings.get("PORTAGE_LOGDIR"):
        logdir = normalize_path(mysettings["PORTAGE_LOGDIR"])
    else:
        logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep),
                              "var", "log", "portage")

    if not os.path.isdir(logdir):
        # Only initialize group/mode if the directory doesn't
        # exist, so that we don't override permissions if they
        # were previously set by the administrator.
        # NOTE: These permissions should be compatible with our
        # default logrotate config as discussed in bug 374287.
        logdir_uid = -1
        if portage.data.secpass >= 2:
            logdir_uid = portage_uid
        ensure_dirs(logdir, uid=logdir_uid, gid=portage_gid, mode=0o2770)

    elogdir = os.path.join(logdir, "elog")
    _ensure_log_subdirs(logdir, elogdir)

    # TODO: Locking
    elogfilename = elogdir + "/summary.log"
    try:
        elogfile = io.open(_unicode_encode(elogfilename,
                                           encoding=_encodings['fs'],
                                           errors='strict'),
                           mode='a',
                           encoding=_encodings['content'],
                           errors='backslashreplace')
    except IOError as e:
        func_call = "open('%s', 'a')" % elogfilename
        if e.errno == errno.EACCES:
            raise portage.exception.PermissionDenied(func_call)
        elif e.errno == errno.EPERM:
            raise portage.exception.OperationNotPermitted(func_call)
        elif e.errno == errno.EROFS:
            raise portage.exception.ReadOnlyFileSystem(func_call)
        else:
            raise

    # Copy group permission bits from parent directory.
    elogdir_st = os.stat(elogdir)
    elogdir_gid = elogdir_st.st_gid
    elogdir_grp_mode = 0o060 & elogdir_st.st_mode

    # Copy the uid from the parent directory if we have privileges
    # to do so, for compatibility with our default logrotate
    # config (see bug 378451). With the "su portage portage"
    # directive and logrotate-3.8.0, logrotate's chown call during
    # the compression phase will only succeed if the log file's uid
    # is portage_uid.
    logfile_uid = -1
    if portage.data.secpass >= 2:
        logfile_uid = elogdir_st.st_uid
    apply_permissions(elogfilename,
                      uid=logfile_uid,
                      gid=elogdir_gid,
                      mode=elogdir_grp_mode,
                      mask=0)

    time_fmt = "%Y-%m-%d %H:%M:%S %Z"
    if sys.hexversion < 0x3000000:
        time_fmt = _unicode_encode(time_fmt)
    time_str = time.strftime(time_fmt, time.localtime(time.time()))
    # Avoid potential UnicodeDecodeError in Python 2, since strftime
    # returns bytes in Python 2, and %Z may contain non-ascii chars.
    time_str = _unicode_decode(time_str,
                               encoding=_encodings['content'],
                               errors='replace')
    elogfile.write(
        _(">>> Messages generated by process "
          "%(pid)d on %(time)s for package %(pkg)s:\n\n") % {
              "pid": os.getpid(),
              "time": time_str,
              "pkg": key
          })
    elogfile.write(_unicode_decode(fulltext))
    elogfile.write("\n")
    elogfile.close()

    return elogfilename
Ejemplo n.º 37
0
def lockfile(mypath,
             wantnewlockfile=0,
             unlinkfile=0,
             waiting_msg=None,
             flags=0):
    """
	If wantnewlockfile is True then this creates a lockfile in the parent
	directory as the file: '.' + basename + '.portage_lockfile'.
	"""

    if not mypath:
        raise InvalidData(_("Empty path given"))

    # Since Python 3.4, chown requires int type (no proxies).
    portage_gid = int(portage.data.portage_gid)

    # Support for file object or integer file descriptor parameters is
    # deprecated due to ambiguity in whether or not it's safe to close
    # the file descriptor, making it prone to "Bad file descriptor" errors
    # or file descriptor leaks.
    if isinstance(mypath, basestring) and mypath[-1] == '/':
        mypath = mypath[:-1]

    lockfilename_path = mypath
    if hasattr(mypath, 'fileno'):
        warnings.warn(
            "portage.locks.lockfile() support for "
            "file object parameters is deprecated. Use a file path instead.",
            DeprecationWarning,
            stacklevel=2)
        lockfilename_path = getattr(mypath, 'name', None)
        mypath = mypath.fileno()
    if isinstance(mypath, int):
        warnings.warn(
            "portage.locks.lockfile() support for integer file "
            "descriptor parameters is deprecated. Use a file path instead.",
            DeprecationWarning,
            stacklevel=2)
        lockfilename = mypath
        wantnewlockfile = 0
        unlinkfile = 0
    elif wantnewlockfile:
        base, tail = os.path.split(mypath)
        lockfilename = os.path.join(base, "." + tail + ".portage_lockfile")
        lockfilename_path = lockfilename
        unlinkfile = 1
    else:
        lockfilename = mypath

    if isinstance(mypath, basestring):
        if not os.path.exists(os.path.dirname(mypath)):
            raise DirectoryNotFound(os.path.dirname(mypath))
        preexisting = os.path.exists(lockfilename)
        old_mask = os.umask(000)
        try:
            try:
                myfd = os.open(lockfilename, os.O_CREAT | os.O_RDWR, 0o660)
            except OSError as e:
                func_call = "open('%s')" % lockfilename
                if e.errno == OperationNotPermitted.errno:
                    raise OperationNotPermitted(func_call)
                elif e.errno == PermissionDenied.errno:
                    raise PermissionDenied(func_call)
                elif e.errno == ReadOnlyFileSystem.errno:
                    raise ReadOnlyFileSystem(func_call)
                else:
                    raise

            if not preexisting:
                try:
                    if portage.data.secpass >= 1 and os.stat(
                            lockfilename).st_gid != portage_gid:
                        os.chown(lockfilename, -1, portage_gid)
                except OSError as e:
                    if e.errno in (errno.ENOENT, errno.ESTALE):
                        return lockfile(mypath,
                                        wantnewlockfile=wantnewlockfile,
                                        unlinkfile=unlinkfile,
                                        waiting_msg=waiting_msg,
                                        flags=flags)
                    else:
                        writemsg("%s: chown('%s', -1, %d)\n" % \
                         (e, lockfilename, portage_gid), noiselevel=-1)
                        writemsg(_("Cannot chown a lockfile: '%s'\n") % \
                         lockfilename, noiselevel=-1)
                        writemsg(_("Group IDs of current user: %s\n") % \
                         " ".join(str(n) for n in os.getgroups()),
                         noiselevel=-1)
        finally:
            os.umask(old_mask)

    elif isinstance(mypath, int):
        myfd = mypath

    else:
        raise ValueError(_("Unknown type passed in '%s': '%s'") % \
         (type(mypath), mypath))

    # try for a non-blocking lock, if it's held, throw a message
    # we're waiting on lockfile and use a blocking attempt.
    locking_method = portage._eintr_func_wrapper(_get_lock_fn())
    try:
        if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
            raise IOError(errno.ENOSYS, "Function not implemented")
        locking_method(myfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError as e:
        if not hasattr(e, "errno"):
            raise
        if e.errno in (errno.EACCES, errno.EAGAIN, errno.ENOLCK):
            # resource temp unavailable; eg, someone beat us to the lock.
            if flags & os.O_NONBLOCK:
                os.close(myfd)
                raise TryAgain(mypath)

            global _quiet
            if _quiet:
                out = None
            else:
                out = portage.output.EOutput()
            if waiting_msg is None:
                if isinstance(mypath, int):
                    waiting_msg = _("waiting for lock on fd %i") % myfd
                else:
                    waiting_msg = _("waiting for lock on %s") % lockfilename
            if out is not None:
                out.ebegin(waiting_msg)
            # try for the exclusive lock now.
            enolock_msg_shown = False
            while True:
                try:
                    locking_method(myfd, fcntl.LOCK_EX)
                except EnvironmentError as e:
                    if e.errno == errno.ENOLCK:
                        # This is known to occur on Solaris NFS (see
                        # bug #462694). Assume that the error is due
                        # to temporary exhaustion of record locks,
                        # and loop until one becomes available.
                        if not enolock_msg_shown:
                            enolock_msg_shown = True
                            if isinstance(mypath, int):
                                context_desc = _("Error while waiting "
                                                 "to lock fd %i") % myfd
                            else:
                                context_desc = _("Error while waiting "
                                                 "to lock '%s'") % lockfilename
                            writemsg("\n!!! %s: %s\n" % (context_desc, e),
                                     noiselevel=-1)

                        time.sleep(_HARDLINK_POLL_LATENCY)
                        continue

                    if out is not None:
                        out.eend(1, str(e))
                    raise
                else:
                    break

            if out is not None:
                out.eend(os.EX_OK)
        elif e.errno in (errno.ENOSYS, ):
            # We're not allowed to lock on this FS.
            if not isinstance(lockfilename, int):
                # If a file object was passed in, it's not safe
                # to close the file descriptor because it may
                # still be in use.
                os.close(myfd)
            lockfilename_path = _unicode_decode(lockfilename_path,
                                                encoding=_encodings['fs'],
                                                errors='strict')
            if not isinstance(lockfilename_path, basestring):
                raise
            link_success = hardlink_lockfile(lockfilename_path,
                                             waiting_msg=waiting_msg,
                                             flags=flags)
            if not link_success:
                raise
            lockfilename = lockfilename_path
            locking_method = None
            myfd = HARDLINK_FD
        else:
            raise


    if isinstance(lockfilename, basestring) and \
     myfd != HARDLINK_FD and _fstat_nlink(myfd) == 0:
        # The file was deleted on us... Keep trying to make one...
        os.close(myfd)
        writemsg(_("lockfile recurse\n"), 1)
        lockfilename, myfd, unlinkfile, locking_method = lockfile(
            mypath,
            wantnewlockfile=wantnewlockfile,
            unlinkfile=unlinkfile,
            waiting_msg=waiting_msg,
            flags=flags)

    if myfd != HARDLINK_FD:

        # FD_CLOEXEC is enabled by default in Python >=3.4.
        if sys.hexversion < 0x3040000:
            try:
                fcntl.FD_CLOEXEC
            except AttributeError:
                pass
            else:
                fcntl.fcntl(
                    myfd, fcntl.F_SETFD,
                    fcntl.fcntl(myfd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

        _open_fds.add(myfd)

    writemsg(str((lockfilename, myfd, unlinkfile)) + "\n", 1)
    return (lockfilename, myfd, unlinkfile, locking_method)
Ejemplo n.º 38
0
def verify_all(filename, mydict, calc_prelink=0, strict=0):
    """
	Verify all checksums against a file.

	@param filename: File to run the checksums against
	@type filename: String
	@param calc_prelink: Whether or not to reverse prelink before running the checksum
	@type calc_prelink: Integer
	@param strict: Enable/Disable strict checking (which stops exactly at a checksum failure and throws an exception)
	@type strict: Integer
	@rtype: Tuple
	@return: Result of the checks and possible message:
		1) If size fails, False, and a tuple containing a message, the given size, and the actual size
		2) If there is an os error, False, and a tuple containing the system error followed by 2 nulls
		3) If a checksum fails, False and a tuple containing a message, the given hash, and the actual hash
		4) If all checks succeed, return True and a fake reason
	"""
    # Dict relates to single file only.
    # returns: (passed,reason)
    file_is_ok = True
    reason = "Reason unknown"
    try:
        mysize = os.stat(filename)[stat.ST_SIZE]
        if mydict.get("size") is not None and mydict["size"] != mysize:
            return False, (_("Filesize does not match recorded size"), mysize,
                           mydict["size"])
    except OSError as e:
        if e.errno == errno.ENOENT:
            raise portage.exception.FileNotFound(filename)
        return False, (str(e), None, None)

    verifiable_hash_types = set(mydict).intersection(hashfunc_map)
    verifiable_hash_types.discard("size")
    if not verifiable_hash_types:
        expected = set(hashfunc_map)
        expected.discard("size")
        expected = list(expected)
        expected.sort()
        expected = " ".join(expected)
        got = set(mydict)
        got.discard("size")
        got = list(got)
        got.sort()
        got = " ".join(got)
        return False, (_("Insufficient data for checksum verification"), got,
                       expected)

    for x in sorted(mydict):
        if x == "size":
            continue
        elif x in hashfunc_map:
            myhash = perform_checksum(filename, x,
                                      calc_prelink=calc_prelink)[0]
            if mydict[x] != myhash:
                if strict:
                    raise portage.exception.DigestException(
                     ("Failed to verify '$(file)s' on " + \
                     "checksum type '%(type)s'") % \
                     {"file" : filename, "type" : x})
                else:
                    file_is_ok = False
                    reason = (("Failed on %s verification" % x), myhash,
                              mydict[x])
                    break

    return file_is_ok, reason
Ejemplo n.º 39
0
    def check(self, **kwargs):
        '''Checks the ebuild sources and files for errors

		@param xpkg: the pacakge being checked
		@param checkdir: string, directory path
		@param checkdir_relative: repolevel determined path
		@returns: boolean
		'''
        xpkg = kwargs.get('xpkg')
        checkdir = kwargs.get('checkdir')
        checkdir_relative = kwargs.get('checkdir_relative')
        changed = kwargs.get('changed').changed
        new = kwargs.get('changed').new
        _digests = self.digests(checkdir)
        fetchlist_dict = portage.FetchlistDict(checkdir, self.repoman_settings,
                                               self.portdb)
        myfiles_all = []
        self._src_uri_error = False
        for mykey in fetchlist_dict:
            try:
                myfiles_all.extend(fetchlist_dict[mykey])
            except portage.exception.InvalidDependString as e:
                self._src_uri_error = True
                try:
                    self.portdb.aux_get(mykey, ["SRC_URI"])
                except KeyError:
                    # This will be reported as an "ebuild.syntax" error.
                    pass
                else:
                    self.qatracker.add_error(
                        "SRC_URI.syntax", "%s.ebuild SRC_URI: %s" % (mykey, e))
        del fetchlist_dict
        if not self._src_uri_error:
            # This test can produce false positives if SRC_URI could not
            # be parsed for one or more ebuilds. There's no point in
            # producing a false error here since the root cause will
            # produce a valid error elsewhere, such as "SRC_URI.syntax"
            # or "ebuild.sytax".
            myfiles_all = set(myfiles_all)
            for entry in _digests:
                if entry not in myfiles_all:
                    self.qatracker.add_error("digest.unused",
                                             checkdir + "::" + entry)
            for entry in myfiles_all:
                if entry not in _digests:
                    self.qatracker.add_error("digest.missing",
                                             checkdir + "::" + entry)
        del myfiles_all

        if os.path.exists(checkdir + "/files"):
            filesdirlist = os.listdir(checkdir + "/files")

            # Recurse through files directory, use filesdirlist as a stack;
            # appending directories as needed,
            # so people can't hide > 20k files in a subdirectory.
            while filesdirlist:
                y = filesdirlist.pop(0)
                relative_path = os.path.join(xpkg, "files", y)
                full_path = os.path.join(self.repo_settings.repodir,
                                         relative_path)
                try:
                    mystat = os.stat(full_path)
                except OSError as oe:
                    if oe.errno == 2:
                        # don't worry about it.  it likely was removed via fix above.
                        continue
                    else:
                        raise oe
                if S_ISDIR(mystat.st_mode):
                    if self.vcs_settings.status.isVcsDir(y):
                        continue
                    for z in os.listdir(checkdir + "/files/" + y):
                        if self.vcs_settings.status.isVcsDir(z):
                            continue
                        filesdirlist.append(y + "/" + z)
                # Current policy is no files over 20 KiB, these are the checks.
                # File size between 20 KiB and 60 KiB causes a warning,
                # while file size over 60 KiB causes an error.
                elif mystat.st_size > 61440:
                    self.qatracker.add_error(
                        "file.size-fatal", "(%d KiB) %s/files/%s" %
                        (mystat.st_size // 1024, xpkg, y))
                elif mystat.st_size > 20480:
                    self.qatracker.add_error(
                        "file.size", "(%d KiB) %s/files/%s" %
                        (mystat.st_size // 1024, xpkg, y))
                elif mystat.st_size == 0:
                    self.qatracker.add_error("file.empty",
                                             "%s/files/%s" % (xpkg, y))

                index = self.repo_settings.repo_config.find_invalid_path_char(
                    y)
                if index != -1:
                    y_relative = os.path.join(checkdir_relative, "files", y)
                    if self.vcs_settings.vcs is not None \
                     and not vcs_new_changed(y_relative, changed, new):
                        # If the file isn't in the VCS new or changed set, then
                        # assume that it's an irrelevant temporary file (Manifest
                        # entries are not generated for file names containing
                        # prohibited characters). See bug #406877.
                        index = -1
                if index != -1:
                    self.qatracker.add_error(
                        "file.name",
                        "%s/files/%s: char '%s'" % (checkdir, y, y[index]))
        return False
Ejemplo n.º 40
0
def getentries(mydir, recursive=0):
    """(basedir,recursive=0)
	Scans the given directory and returns a datadict of all the entries in
	the directory separated as a dirs dict and a files dict."""
    myfn = mydir + "/CVS/Entries"
    # entries=[dirs, files]
    entries = {"dirs": {}, "files": {}}
    if not os.path.exists(mydir):
        return entries
    try:
        myfile = io.open(_unicode_encode(myfn,
                                         encoding=_encodings['fs'],
                                         errors='strict'),
                         mode='r',
                         encoding=_encodings['content'],
                         errors='strict')
        mylines = myfile.readlines()
        myfile.close()
    except SystemExit as e:
        raise
    except:
        mylines = []
    for line in mylines:
        if line and line[-1] == "\n":
            line = line[:-1]
        if not line:
            continue
        if line == "D":  # End of entries file
            break
        mysplit = line.split("/")
        if len(mysplit) != 6:
            print("Confused:", mysplit)
            continue
        if mysplit[0] == "D":
            entries["dirs"][mysplit[1]] = {
                "dirs": {},
                "files": {},
                "status": []
            }
            entries["dirs"][mysplit[1]]["status"] = ["cvs"]
            if os.path.isdir(mydir + "/" + mysplit[1]):
                entries["dirs"][mysplit[1]]["status"] += ["exists"]
                entries["dirs"][mysplit[1]]["flags"] = mysplit[2:]
                if recursive:
                    rentries = getentries(mydir + "/" + mysplit[1], recursive)
                    entries["dirs"][mysplit[1]]["dirs"] = rentries["dirs"]
                    entries["dirs"][mysplit[1]]["files"] = rentries["files"]
        else:
            # [D]/Name/revision/Date/Flags/Tags
            entries["files"][mysplit[1]] = {}
            entries["files"][mysplit[1]]["revision"] = mysplit[2]
            entries["files"][mysplit[1]]["date"] = mysplit[3]
            entries["files"][mysplit[1]]["flags"] = mysplit[4]
            entries["files"][mysplit[1]]["tags"] = mysplit[5]
            entries["files"][mysplit[1]]["status"] = ["cvs"]
            if entries["files"][mysplit[1]]["revision"][0] == "-":
                entries["files"][mysplit[1]]["status"] += ["removed"]

    for file in os.listdir(mydir):
        if file == "CVS":
            continue
        if os.path.isdir(mydir + "/" + file):
            if file not in entries["dirs"]:
                if ignore_list.match(file) is not None:
                    continue
                entries["dirs"][file] = {"dirs": {}, "files": {}}
                # It's normal for a directory to be unlisted in Entries
                # when checked out without -P (see bug #257660).
                rentries = getentries(mydir + "/" + file, recursive)
                entries["dirs"][file]["dirs"] = rentries["dirs"]
                entries["dirs"][file]["files"] = rentries["files"]
            if "status" in entries["dirs"][file]:
                if "exists" not in entries["dirs"][file]["status"]:
                    entries["dirs"][file]["status"] += ["exists"]
            else:
                entries["dirs"][file]["status"] = ["exists"]
        elif os.path.isfile(mydir + "/" + file):
            if file not in entries["files"]:
                if ignore_list.match(file) is not None:
                    continue
                entries["files"][file] = {
                    "revision": "",
                    "date": "",
                    "flags": "",
                    "tags": ""
                }
            if "status" in entries["files"][file]:
                if "exists" not in entries["files"][file]["status"]:
                    entries["files"][file]["status"] += ["exists"]
            else:
                entries["files"][file]["status"] = ["exists"]
            try:
                mystat = os.stat(mydir + "/" + file)
                mytime = time.asctime(time.gmtime(mystat[stat.ST_MTIME]))
                if "status" not in entries["files"][file]:
                    entries["files"][file]["status"] = []
                if mytime == entries["files"][file]["date"]:
                    entries["files"][file]["status"] += ["current"]
            except SystemExit as e:
                raise
            except Exception as e:
                print("failed to stat", file)
                print(e)
                return

        elif ignore_list.match(file) is not None:
            pass
        else:
            print()
            print("File of unknown type:", mydir + "/" + file)
            print()
    return entries
Ejemplo n.º 41
0
def fetch_metadata_xsd(metadata_xsd, repoman_settings):
	"""
	Fetch metadata.xsd if it doesn't exist or the ctime is older than
	metadata_xsd_ctime_interval.
	@rtype: bool
	@return: True if successful, otherwise False
	"""

	must_fetch = True
	metadata_xsd_st = None
	current_time = int(time.time())
	try:
		metadata_xsd_st = os.stat(metadata_xsd)
	except EnvironmentError as e:
		if e.errno not in (errno.ENOENT, errno.ESTALE):
			raise
		del e
	else:
		# Trigger fetch if metadata.xsd mtime is old or clock is wrong.
		if abs(current_time - metadata_xsd_st.st_ctime) \
			< metadata_xsd_ctime_interval:
			must_fetch = False

	if must_fetch:
		print()
		print(
			"%s the local copy of metadata.xsd "
			"needs to be refetched, doing that now" % green("***"))
		print()
		parsed_url = urlparse(metadata_xsd_uri)
		setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
		fcmd = repoman_settings.get(setting)
		if not fcmd:
			fcmd = repoman_settings.get('FETCHCOMMAND')
			if not fcmd:
				logging.error("FETCHCOMMAND is unset")
				return False

		destdir = repoman_settings["DISTDIR"]
		fd, metadata_xsd_tmp = tempfile.mkstemp(
			prefix='metadata.xsd.', dir=destdir)
		os.close(fd)

		try:
			if not portage.getbinpkg.file_get(
				metadata_xsd_uri, destdir, fcmd=fcmd,
				filename=os.path.basename(metadata_xsd_tmp)):
				logging.error(
					"failed to fetch metadata.xsd from '%s'" % metadata_xsd_uri)
				return False

			try:
				portage.util.apply_secpass_permissions(
					metadata_xsd_tmp,
					gid=portage.data.portage_gid, mode=0o664, mask=0o2)
			except portage.exception.PortageException:
				pass

			shutil.move(metadata_xsd_tmp, metadata_xsd)
		finally:
			try:
				os.unlink(metadata_xsd_tmp)
			except OSError:
				pass

	return True
Ejemplo n.º 42
0
    def __init__(self, _unused_param=DeprecationWarning, mysettings=None):
        """
		@param _unused_param: deprecated, use mysettings['PORTDIR'] instead
		@type _unused_param: None
		@param mysettings: an immutable config instance
		@type mysettings: portage.config
		"""

        from portage import config
        if mysettings:
            self.settings = mysettings
        else:
            from portage import settings
            self.settings = config(clone=settings)

        if _unused_param is not DeprecationWarning:
            warnings.warn("The first parameter of the " + \
             "portage.dbapi.porttree.portdbapi" + \
             " constructor is unused since portage-2.1.8. " + \
             "mysettings['PORTDIR'] is used instead.",
             DeprecationWarning, stacklevel=2)

        self.repositories = self.settings.repositories
        self.treemap = self.repositories.treemap

        # This is strictly for use in aux_get() doebuild calls when metadata
        # is generated by the depend phase.  It's safest to use a clone for
        # this purpose because doebuild makes many changes to the config
        # instance that is passed in.
        self.doebuild_settings = config(clone=self.settings)
        self.depcachedir = os.path.realpath(self.settings.depcachedir)

        if os.environ.get("SANDBOX_ON") == "1":
            # Make api consumers exempt from sandbox violations
            # when doing metadata cache updates.
            sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
            if self.depcachedir not in sandbox_write:
                sandbox_write.append(self.depcachedir)
                os.environ["SANDBOX_WRITE"] = \
                 ":".join(filter(None, sandbox_write))

        self.porttrees = list(self.settings.repositories.repoLocationList())

        # This is used as sanity check for aux_get(). If there is no
        # root eclass dir, we assume that PORTDIR is invalid or
        # missing. This check allows aux_get() to detect a missing
        # portage tree and return early by raising a KeyError.
        self._have_root_eclass_dir = os.path.isdir(
            os.path.join(self.settings.repositories.mainRepoLocation(),
                         "eclass"))

        #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
        self.xcache = {}
        self.frozen = 0

        #Keep a list of repo names, sorted by priority (highest priority first).
        self._ordered_repo_name_list = tuple(
            reversed(self.repositories.prepos_order))

        self.auxdbmodule = self.settings.load_best_module(
            "portdbapi.auxdbmodule")
        self.auxdb = {}
        self._pregen_auxdb = {}
        # If the current user doesn't have depcachedir write permission,
        # then the depcachedir cache is kept here read-only access.
        self._ro_auxdb = {}
        self._init_cache_dirs()
        try:
            depcachedir_st = os.stat(self.depcachedir)
            depcachedir_w_ok = os.access(self.depcachedir, os.W_OK)
        except OSError:
            depcachedir_st = None
            depcachedir_w_ok = False

        cache_kwargs = {}

        depcachedir_unshared = False
        if portage.data.secpass < 1 and \
         depcachedir_w_ok and \
         depcachedir_st is not None and \
         os.getuid() == depcachedir_st.st_uid and \
         os.getgid() == depcachedir_st.st_gid:
            # If this user owns depcachedir and is not in the
            # portage group, then don't bother to set permissions
            # on cache entries. This makes it possible to run
            # egencache without any need to be a member of the
            # portage group.
            depcachedir_unshared = True
        else:
            cache_kwargs.update({'gid': portage_gid, 'perms': 0o664})

        # If secpass < 1, we don't want to write to the cache
        # since then we won't be able to apply group permissions
        # to the cache entries/directories.
        if (secpass < 1 and not depcachedir_unshared) or not depcachedir_w_ok:
            for x in self.porttrees:
                self.auxdb[x] = volatile.database(self.depcachedir, x,
                                                  self._known_keys,
                                                  **cache_kwargs)
                try:
                    self._ro_auxdb[x] = self.auxdbmodule(self.depcachedir,
                                                         x,
                                                         self._known_keys,
                                                         readonly=True,
                                                         **cache_kwargs)
                except CacheError:
                    pass
        else:
            for x in self.porttrees:
                if x in self.auxdb:
                    continue
                # location, label, auxdbkeys
                self.auxdb[x] = self.auxdbmodule(self.depcachedir, x,
                                                 self._known_keys,
                                                 **cache_kwargs)
        if "metadata-transfer" not in self.settings.features:
            for x in self.porttrees:
                if x in self._pregen_auxdb:
                    continue
                cache = self._create_pregen_cache(x)
                if cache is not None:
                    self._pregen_auxdb[x] = cache
        # Selectively cache metadata in order to optimize dep matching.
        self._aux_cache_keys = set([
            "DEPEND", "EAPI", "HDEPEND", "INHERITED", "IUSE", "KEYWORDS",
            "LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
            "repository", "RESTRICT", "SLOT", "DEFINED_PHASES", "REQUIRED_USE"
        ])

        self._aux_cache = {}
        self._broken_ebuilds = set()
Ejemplo n.º 43
0
    def _update_recycle_db(self):

        start_time = self._config.start_time
        recycle_dir = self._config.options.recycle_dir
        recycle_db = self._config.recycle_db
        r_deletion_delay = self._config.options.recycle_deletion_delay

        # Use a dict optimize access.
        recycle_db_cache = dict(recycle_db.items())

        for filename in os.listdir(recycle_dir):

            recycle_file = os.path.join(recycle_dir, filename)

            try:
                st = os.stat(recycle_file)
            except OSError as e:
                if e.errno not in (errno.ENOENT, errno.ESTALE):
                    logging.error(("stat failed for '%s' in "
                                   "recycle: %s") % (filename, e))
                continue

            value = recycle_db_cache.pop(filename, None)
            if value is None:
                logging.debug(("add '%s' to " "recycle db") % filename)
                recycle_db[filename] = (st.st_size, start_time)
            else:
                r_size, r_time = value
                if long(r_size) != st.st_size:
                    recycle_db[filename] = (st.st_size, start_time)
                elif r_time + r_deletion_delay < start_time:
                    if self._config.options.dry_run:
                        logging.info(("dry-run: delete '%s' from "
                                      "recycle") % filename)
                        logging.info(("drop '%s' from "
                                      "recycle db") % filename)
                    else:
                        try:
                            os.unlink(recycle_file)
                        except OSError as e:
                            if e.errno not in (errno.ENOENT, errno.ESTALE):
                                logging.error(
                                    ("delete '%s' from "
                                     "recycle failed: %s") % (filename, e))
                        else:
                            logging.debug(("delete '%s' from "
                                           "recycle") % filename)
                            try:
                                del recycle_db[filename]
                            except KeyError:
                                pass
                            else:
                                logging.debug(("drop '%s' from "
                                               "recycle db") % filename)

        # Existing files were popped from recycle_db_cache,
        # so any remaining entries are for files that no
        # longer exist.
        for filename in recycle_db_cache:
            try:
                del recycle_db[filename]
            except KeyError:
                pass
            else:
                logging.debug(("drop non-existent '%s' from "
                               "recycle db") % filename)
Ejemplo n.º 44
0
def FindPortdir(settings):
	""" Try to figure out what repo we are in and whether we are in a regular
	tree or an overlay.
	
	Basic logic is:
	
	1. Determine what directory we are in (supports symlinks).
	2. Build a list of directories from / to our current location
	3. Iterate over PORTDIR_OVERLAY, if we find a match, search for a profiles directory
		 in the overlay.  If it has one, make it portdir, otherwise make it portdir_overlay.
	4. If we didn't find an overlay in PORTDIR_OVERLAY, see if we are in PORTDIR; if so, set
		 portdir_overlay to PORTDIR.  If we aren't in PORTDIR, see if PWD has a profiles dir, if
		 so, set portdir_overlay and portdir to PWD, else make them False.
	5. If we haven't found portdir_overlay yet, it means the user is doing something odd, report
		 an error.
	6. If we haven't found a portdir yet, set portdir to PORTDIR.
	
	Args:
		settings - portage.config instance, preferably repoman_settings
	Returns:
		list(portdir, portdir_overlay, location)
	"""

	portdir = None
	portdir_overlay = None
	location = os.getcwd()
	pwd = os.environ.get('PWD', '')
	if pwd and pwd != location and os.path.realpath(pwd) == location:
		# getcwd() returns the canonical path but that makes it hard for repoman to
		# orient itself if the user has symlinks in their portage tree structure.
		# We use os.environ["PWD"], if available, to get the non-canonical path of
		# the current working directory (from the shell).
		location = pwd

	location = normalize_path(location)

	path_ids = {}
	p = location
	s = None
	while True:
		s = os.stat(p)
		path_ids[(s.st_dev, s.st_ino)] = p
		if p == "/":
			break
		p = os.path.dirname(p)
	if location[-1] != "/":
		location += "/"

	for overlay in settings["PORTDIR_OVERLAY"].split():
		overlay = os.path.realpath(overlay)
		try:
			s = os.stat(overlay)
		except OSError:
			continue
		overlay = path_ids.get((s.st_dev, s.st_ino))
		if overlay is None:
			continue
		if overlay[-1] != "/":
			overlay += "/"
		if True:
			portdir_overlay = overlay
			subdir = location[len(overlay):]
			if subdir and subdir[-1] != "/":
				subdir += "/"
			if have_profile_dir(location, subdir.count("/")):
				portdir = portdir_overlay
			break

	# Couldn't match location with anything from PORTDIR_OVERLAY,
	# so fall back to have_profile_dir() checks alone. Assume that
	# an overlay will contain at least a "repo_name" file while a
	# master repo (portdir) will contain at least a "profiles.desc"
	# file.
	if not portdir_overlay:
		portdir_overlay = have_profile_dir(location, filename="repo_name")
		if portdir_overlay:
			subdir = location[len(portdir_overlay):]
			if subdir and subdir[-1] != os.sep:
				subdir += os.sep
			if have_profile_dir(location, subdir.count(os.sep)):
				portdir = portdir_overlay

	if not portdir_overlay:
		if (settings["PORTDIR"] + os.path.sep).startswith(location):
			portdir_overlay = settings["PORTDIR"]
		else:
			portdir_overlay = have_profile_dir(location)
		portdir = portdir_overlay
	
	if not portdir_overlay:
		msg = 'Repoman is unable to determine PORTDIR or PORTDIR_OVERLAY' + \
			' from the current working directory'
		logging.critical(msg)
		return (None, None, None)

	if not portdir:
		portdir = settings["PORTDIR"]

	if not portdir_overlay.endswith('/'):
		portdir_overlay += '/'
	
	if not portdir.endswith('/'):
		portdir += '/'

	return [normalize_path(x) for x in (portdir, portdir_overlay, location)]
Ejemplo n.º 45
0
    def _start(self):

        if self.config.options.fetch_log_dir is not None and \
         not self.config.options.dry_run:
            self._log_path = os.path.join(self.config.options.fetch_log_dir,
                                          self.distfile + '.log')

        self._previously_added = True
        if self.config.distfiles_db is not None and \
         self.distfile not in self.config.distfiles_db:
            self._previously_added = False
            self.config.distfiles_db[self.distfile] = self.cpv

        if not self._have_needed_digests():
            msg = "incomplete digests: %s" % " ".join(self.digests)
            self.scheduler.output(msg,
                                  background=self.background,
                                  log_path=self._log_path)
            self.config.log_failure("%s\t%s\t%s" %
                                    (self.cpv, self.distfile, msg))
            self.config.file_failures[self.distfile] = self.cpv
            self.returncode = os.EX_OK
            self._async_wait()
            return

        st = None
        for layout in self.config.layouts:
            distfile_path = os.path.join(self.config.options.distfiles,
                                         layout.get_path(self.distfile))
            try:
                st = os.stat(distfile_path)
            except OSError as e:
                if e.errno not in (errno.ENOENT, errno.ESTALE):
                    msg = "%s stat failed in %s: %s" % \
                     (self.distfile, "distfiles", e)
                    self.scheduler.output(msg + '\n',
                                          background=True,
                                          log_path=self._log_path)
                    logging.error(msg)
            else:
                break

        size_ok = st is not None and st.st_size == self.digests["size"]

        if not size_ok:
            if self.config.options.dry_run:
                if st is not None:
                    logging.info(
                        ("dry-run: delete '%s' with "
                         "wrong size from distfiles") % (self.distfile, ))
            else:
                # Do the unlink in order to ensure that the path is clear,
                # even if stat raised ENOENT, since a broken symlink can
                # trigger ENOENT.
                unlink_success = True
                for layout in self.config.layouts:
                    unlink_path = os.path.join(self.config.options.distfiles,
                                               layout.get_path(self.distfile))
                    if self._unlink_file(unlink_path, "distfiles"):
                        if st is not None:
                            logging.debug(("delete '%s' with "
                                           "wrong size from distfiles") %
                                          (self.distfile, ))
                    else:
                        self.config.log_failure("%s\t%s\t%s" %
                                                (self.cpv, self.distfile,
                                                 "unlink failed in distfiles"))
                        unlink_success = False
                if not unlink_success:
                    self.returncode = os.EX_OK
                    self._async_wait()
                    return

        if size_ok:
            if self.config.options.verify_existing_digest:
                self._start_task(
                    FileDigester(file_path=distfile_path,
                                 hash_names=(self._select_hash(), ),
                                 background=self.background,
                                 logfile=self._log_path),
                    self._distfiles_digester_exit)
                return

            self._success()
            self.returncode = os.EX_OK
            self._async_wait()
            return

        self._start_fetch()
Ejemplo n.º 46
0
	def testEbuildFetch(self):

		distfiles = {
			'bar': b'bar\n',
			'foo': b'foo\n',
		}

		ebuilds = {
			'dev-libs/A-1': {
				'EAPI': '7',
				'RESTRICT': 'primaryuri',
				'SRC_URI': '''{scheme}://{host}:{port}/distfiles/bar.txt -> bar
					{scheme}://{host}:{port}/distfiles/foo.txt -> foo''',
			},
		}

		loop = SchedulerInterface(global_event_loop())
		scheme = 'http'
		host = '127.0.0.1'
		content = {}
		for k, v in distfiles.items():
			content['/distfiles/{}.txt'.format(k)] = v

		with AsyncHTTPServer(host, content, loop) as server:
			ebuilds_subst = {}
			for cpv, metadata in ebuilds.items():
				metadata = metadata.copy()
				metadata['SRC_URI'] = metadata['SRC_URI'].format(
					scheme=scheme, host=host, port=server.server_port)
				ebuilds_subst[cpv] = metadata

			playground = ResolverPlayground(ebuilds=ebuilds_subst, distfiles=distfiles)
			ro_distdir = tempfile.mkdtemp()
			try:
				fetchcommand = portage.util.shlex_split(playground.settings['FETCHCOMMAND'])
				fetch_bin = portage.process.find_binary(fetchcommand[0])
				if fetch_bin is None:
					self.skipTest('FETCHCOMMAND not found: {}'.format(playground.settings['FETCHCOMMAND']))
				resumecommand = portage.util.shlex_split(playground.settings['RESUMECOMMAND'])
				resume_bin = portage.process.find_binary(resumecommand[0])
				if resume_bin is None:
					self.skipTest('RESUMECOMMAND not found: {}'.format(playground.settings['RESUMECOMMAND']))
				root_config = playground.trees[playground.eroot]['root_config']
				portdb = root_config.trees["porttree"].dbapi
				settings = config(clone=playground.settings)

				# Tests only work with one ebuild at a time, so the config
				# pool only needs a single config instance.
				class config_pool:
					@staticmethod
					def allocate():
						return settings
					@staticmethod
					def deallocate(settings):
						pass

				def async_fetch(pkg, ebuild_path):
					fetcher = EbuildFetcher(config_pool=config_pool, ebuild_path=ebuild_path,
						fetchonly=False, fetchall=True, pkg=pkg, scheduler=loop)
					fetcher.start()
					return fetcher.async_wait()

				for cpv in ebuilds:
					metadata = dict(zip(Package.metadata_keys,
						portdb.aux_get(cpv, Package.metadata_keys)))

					pkg = Package(built=False, cpv=cpv, installed=False,
						metadata=metadata, root_config=root_config,
						type_name='ebuild')

					settings.setcpv(pkg)
					ebuild_path = portdb.findname(pkg.cpv)
					portage.doebuild_environment(ebuild_path, 'fetch', settings=settings, db=portdb)

					# Test good files in DISTDIR
					for k in settings['AA'].split():
						os.stat(os.path.join(settings['DISTDIR'], k))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test digestgen with fetch
					os.unlink(os.path.join(os.path.dirname(ebuild_path), 'Manifest'))
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					with ForkExecutor(loop=loop) as executor:
						self.assertTrue(bool(loop.run_until_complete(
							loop.run_in_executor(executor, functools.partial(
								digestgen, mysettings=settings, myportdb=portdb)))))
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test missing files in DISTDIR
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test empty files in DISTDIR
					for k in settings['AA'].split():
						file_path = os.path.join(settings['DISTDIR'], k)
						with open(file_path, 'wb') as f:
							pass
						self.assertEqual(os.stat(file_path).st_size, 0)
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test non-empty files containing null bytes in DISTDIR
					for k in settings['AA'].split():
						file_path = os.path.join(settings['DISTDIR'], k)
						with open(file_path, 'wb') as f:
							f.write(len(distfiles[k]) * b'\0')
						self.assertEqual(os.stat(file_path).st_size, len(distfiles[k]))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test PORTAGE_RO_DISTDIRS
					settings['PORTAGE_RO_DISTDIRS'] = '"{}"'.format(ro_distdir)
					orig_fetchcommand = settings['FETCHCOMMAND']
					orig_resumecommand = settings['RESUMECOMMAND']
					try:
						settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = ''
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							os.rename(file_path, os.path.join(ro_distdir, k))
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							self.assertTrue(os.path.islink(file_path))
							with open(file_path, 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
							os.unlink(file_path)
					finally:
						settings.pop('PORTAGE_RO_DISTDIRS')
						settings['FETCHCOMMAND'] = orig_fetchcommand
						settings['RESUMECOMMAND'] = orig_resumecommand

					# Test local filesystem in GENTOO_MIRRORS
					orig_mirrors = settings['GENTOO_MIRRORS']
					orig_fetchcommand = settings['FETCHCOMMAND']
					try:
						settings['GENTOO_MIRRORS'] = ro_distdir
						settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = ''
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings['GENTOO_MIRRORS'] = orig_mirrors
						settings['FETCHCOMMAND'] = orig_fetchcommand
						settings['RESUMECOMMAND'] = orig_resumecommand

					# Test readonly DISTDIR
					orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode
					try:
						os.chmod(settings['DISTDIR'], 0o555)
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						os.chmod(settings['DISTDIR'], orig_distdir_mode)

					# Test parallel-fetch mode
					settings['PORTAGE_PARALLEL_FETCHONLY'] = '1'
					try:
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
						for k in settings['AA'].split():
							os.unlink(os.path.join(settings['DISTDIR'], k))
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings.pop('PORTAGE_PARALLEL_FETCHONLY')

					# Test RESUMECOMMAND
					orig_resume_min_size = settings['PORTAGE_FETCH_RESUME_MIN_SIZE']
					try:
						settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = '2'
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							os.unlink(file_path)
							with open(file_path + _download_suffix, 'wb') as f:
								f.write(distfiles[k][:2])
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = orig_resume_min_size
			finally:
				shutil.rmtree(ro_distdir)
				playground.cleanup()
Ejemplo n.º 47
0
    def _check_already_fetched(self, settings, uri_map):
        digests = self._get_digests()
        distdir = settings["DISTDIR"]
        allow_missing = self._get_manifest().allow_missing

        for filename in uri_map:
            # Use stat rather than lstat since fetch() creates
            # symlinks when PORTAGE_RO_DISTDIRS is used.
            try:
                st = os.stat(os.path.join(distdir, filename))
            except OSError:
                return False
            if st.st_size == 0:
                return False
            expected_size = digests.get(filename, {}).get('size')
            if expected_size is None:
                continue
            if st.st_size != expected_size:
                return False

        hash_filter = _hash_filter(settings.get("PORTAGE_CHECKSUM_FILTER", ""))
        if hash_filter.transparent:
            hash_filter = None
        stdout_orig = sys.stdout
        stderr_orig = sys.stderr
        global_havecolor = portage.output.havecolor
        out = io.StringIO()
        eout = portage.output.EOutput()
        eout.quiet = settings.get("PORTAGE_QUIET") == "1"
        success = True
        try:
            sys.stdout = out
            sys.stderr = out
            if portage.output.havecolor:
                portage.output.havecolor = not self.background

            for filename in uri_map:
                mydigests = digests.get(filename)
                if mydigests is None:
                    if not allow_missing:
                        success = False
                        break
                    continue
                ok, st = _check_distfile(os.path.join(distdir, filename),
                                         mydigests,
                                         eout,
                                         show_errors=False,
                                         hash_filter=hash_filter)
                if not ok:
                    success = False
                    break
        except portage.exception.FileNotFound:
            # A file disappeared unexpectedly.
            return False
        finally:
            sys.stdout = stdout_orig
            sys.stderr = stderr_orig
            portage.output.havecolor = global_havecolor

        if success:
            # When returning unsuccessfully, no messages are produced, since
            # we assume that a fetcher process will later be executed in order
            # to produce such messages.
            msg = out.getvalue()
            if msg:
                self.scheduler.output(msg, log_path=self.logfile)

        return success
Ejemplo n.º 48
0
    def pre_sync(self, repo):
        msg = ">>> Syncing repository '%s' into '%s'..." % (repo.name, repo.location)
        self.logger(self.xterm_titles, msg)
        writemsg_level(msg + "\n")
        try:
            st = os.stat(repo.location)
        except OSError:
            st = None

        self.usersync_uid = None
        spawn_kwargs = {}
        # Redirect command stderr to stdout, in order to prevent
        # spurious cron job emails (bug 566132).
        spawn_kwargs["fd_pipes"] = {
            0: portage._get_stdin().fileno(),
            1: sys.__stdout__.fileno(),
            2: sys.__stdout__.fileno(),
        }
        spawn_kwargs["env"] = self.settings.environ()
        if repo.sync_user is not None:

            def get_sync_user_data(sync_user):
                user = None
                group = None
                home = None
                logname = None

                spl = sync_user.split(":", 1)
                if spl[0]:
                    username = spl[0]
                    try:
                        try:
                            pw = pwd.getpwnam(username)
                        except KeyError:
                            pw = pwd.getpwuid(int(username))
                    except (ValueError, KeyError):
                        writemsg(
                            "!!! User '%s' invalid or does not exist\n" % username,
                            noiselevel=-1,
                        )
                        return (logname, user, group, home)
                    user = pw.pw_uid
                    group = pw.pw_gid
                    home = pw.pw_dir
                    logname = pw.pw_name

                if len(spl) > 1:
                    groupname = spl[1]
                    try:
                        try:
                            gp = grp.getgrnam(groupname)
                        except KeyError:
                            pw = grp.getgrgid(int(groupname))
                    except (ValueError, KeyError):
                        writemsg(
                            "!!! Group '%s' invalid or does not exist\n" % groupname,
                            noiselevel=-1,
                        )
                        return (logname, user, group, home)

                    group = gp.gr_gid

                return (logname, user, group, home)

            # user or user:group
            (logname, uid, gid, home) = get_sync_user_data(repo.sync_user)
            if uid is not None:
                spawn_kwargs["uid"] = uid
                self.usersync_uid = uid
            if gid is not None:
                spawn_kwargs["gid"] = gid
                spawn_kwargs["groups"] = [gid]
            if home is not None:
                spawn_kwargs["env"]["HOME"] = home
            if logname is not None:
                spawn_kwargs["env"]["LOGNAME"] = logname

        if st is None:
            perms = {"mode": 0o755}
            # respect sync-user if set
            if "umask" in spawn_kwargs:
                perms["mode"] &= ~spawn_kwargs["umask"]
            if "uid" in spawn_kwargs:
                perms["uid"] = spawn_kwargs["uid"]
            if "gid" in spawn_kwargs:
                perms["gid"] = spawn_kwargs["gid"]

            portage.util.ensure_dirs(repo.location, **perms)
            st = os.stat(repo.location)

        if (
            repo.sync_user is None
            and "usersync" in self.settings.features
            and portage.data.secpass >= 2
            and (
                st.st_uid != os.getuid()
                and st.st_mode & 0o700
                or st.st_gid != os.getgid()
                and st.st_mode & 0o070
            )
        ):
            try:
                pw = pwd.getpwuid(st.st_uid)
            except KeyError:
                pass
            else:
                # Drop privileges when syncing, in order to match
                # existing uid/gid settings.
                self.usersync_uid = st.st_uid
                spawn_kwargs["uid"] = st.st_uid
                spawn_kwargs["gid"] = st.st_gid
                spawn_kwargs["groups"] = [st.st_gid]
                spawn_kwargs["env"]["HOME"] = pw.pw_dir
                spawn_kwargs["env"]["LOGNAME"] = pw.pw_name
                umask = 0o002
                if not st.st_mode & 0o020:
                    umask = umask | 0o020
                spawn_kwargs["umask"] = umask
        # override the defaults when sync_umask is set
        if repo.sync_umask is not None:
            spawn_kwargs["umask"] = int(repo.sync_umask, 8)
        spawn_kwargs.setdefault("umask", 0o022)
        self.spawn_kwargs = spawn_kwargs

        if self.usersync_uid is not None:
            # PORTAGE_TMPDIR is used below, so validate it and
            # bail out if necessary.
            rval = _check_temp_dir(self.settings)
            if rval != os.EX_OK:
                return rval

        os.umask(0o022)
        return os.EX_OK
Ejemplo n.º 49
0
    def check_isebuild(self, **kwargs):
        '''Test the file for qualifications that is is an ebuild

		@param checkdirlist: list of files in the current package directory
		@param checkdir: current package directory path
		@param xpkg: current package directory being checked
		@param validity_future: Future instance
		@returns: dictionary, including {pkgs, can_force}
		'''
        checkdirlist = kwargs.get('checkdirlist').get()
        checkdir = kwargs.get('checkdir')
        xpkg = kwargs.get('xpkg')
        fuse = kwargs.get('validity_future')
        can_force = kwargs.get('can_force')
        self.continue_ = False
        ebuildlist = []
        pkgs = {}
        for y in checkdirlist:
            file_is_ebuild = y.endswith(".ebuild")
            file_should_be_non_executable = (
                y in self.repo_settings.qadata.no_exec or file_is_ebuild)

            if file_should_be_non_executable:
                file_is_executable = stat.S_IMODE(
                    os.stat(os.path.join(checkdir, y)).st_mode) & 0o111

                if file_is_executable:
                    self.qatracker.add_error("file.executable",
                                             os.path.join(checkdir, y))
            if file_is_ebuild:
                pf = y[:-7]
                ebuildlist.append(pf)
                catdir = xpkg.split("/")[0]
                cpv = "%s/%s" % (catdir, pf)
                allvars = self.repo_settings.qadata.allvars
                try:
                    myaux = dict(
                        zip(allvars, self.portdb.aux_get(cpv, allvars)))
                except KeyError:
                    fuse.set(False, ignore_InvalidState=True)
                    self.qatracker.add_error("ebuild.syntax",
                                             os.path.join(xpkg, y))
                    continue
                except IOError:
                    fuse.set(False, ignore_InvalidState=True)
                    self.qatracker.add_error("ebuild.output",
                                             os.path.join(xpkg, y))
                    continue
                except InvalidPackageName:
                    fuse.set(False, ignore_InvalidState=True)
                    self.qatracker.add_error("ebuild.invalidname",
                                             os.path.join(xpkg, y))
                    continue
                if not portage.eapi_is_supported(myaux["EAPI"]):
                    fuse.set(False, ignore_InvalidState=True)
                    self.qatracker.add_error("EAPI.unsupported",
                                             os.path.join(xpkg, y))
                    continue
                pkgs[pf] = Package(cpv=cpv,
                                   metadata=myaux,
                                   root_config=self.root_config,
                                   type_name="ebuild")

        if len(pkgs) != len(ebuildlist):
            # If we can't access all the metadata then it's totally unsafe to
            # commit since there's no way to generate a correct Manifest.
            # Do not try to do any more QA checks on this package since missing
            # metadata leads to false positives for several checks, and false
            # positives confuse users.
            self.continue_ = True
            can_force.set(False, ignore_InvalidState=True)
        self.pkgs = pkgs
        # set our updated data
        dyn_pkgs = kwargs.get('pkgs')
        dyn_pkgs.set(pkgs)
        return self.continue_
Ejemplo n.º 50
0
def getsize(filename):
    size = os.stat(filename).st_size
    return (size, size)
Ejemplo n.º 51
0
    def _testEbuildFetch(
        self,
        loop,
        scheme,
        host,
        orig_distfiles,
        ebuilds,
        content,
        server,
        playground,
        ro_distdir,
    ):
        mirror_layouts = (
            (
                "[structure]",
                "0=filename-hash BLAKE2B 8",
                "1=flat",
            ),
            (
                "[structure]",
                "1=filename-hash BLAKE2B 8",
                "0=flat",
            ),
            (
                "[structure]",
                "0=content-hash SHA512 8:8:8",
                "1=flat",
            ),
        )

        fetchcommand = portage.util.shlex_split(
            playground.settings["FETCHCOMMAND"])
        fetch_bin = portage.process.find_binary(fetchcommand[0])
        if fetch_bin is None:
            self.skipTest("FETCHCOMMAND not found: {}".format(
                playground.settings["FETCHCOMMAND"]))
        eubin = os.path.join(playground.eprefix, "usr", "bin")
        os.symlink(fetch_bin, os.path.join(eubin, os.path.basename(fetch_bin)))
        resumecommand = portage.util.shlex_split(
            playground.settings["RESUMECOMMAND"])
        resume_bin = portage.process.find_binary(resumecommand[0])
        if resume_bin is None:
            self.skipTest("RESUMECOMMAND not found: {}".format(
                playground.settings["RESUMECOMMAND"]))
        if resume_bin != fetch_bin:
            os.symlink(resume_bin,
                       os.path.join(eubin, os.path.basename(resume_bin)))
        root_config = playground.trees[playground.eroot]["root_config"]
        portdb = root_config.trees["porttree"].dbapi

        def run_async(func, *args, **kwargs):
            with ForkExecutor(loop=loop) as executor:
                return loop.run_until_complete(
                    loop.run_in_executor(
                        executor, functools.partial(func, *args, **kwargs)))

        for layout_lines in mirror_layouts:
            settings = config(clone=playground.settings)
            layout_data = "".join("{}\n".format(line) for line in layout_lines)
            mirror_conf = MirrorLayoutConfig()
            mirror_conf.read_from_file(io.StringIO(layout_data))
            layouts = mirror_conf.get_all_layouts()
            content["/distfiles/layout.conf"] = layout_data.encode("utf8")
            distfiles = {}
            for k, v in orig_distfiles.items():
                filename = DistfileName(
                    k,
                    digests=dict((algo, checksum_str(v, hashname=algo))
                                 for algo in MANIFEST2_HASH_DEFAULTS),
                )
                distfiles[filename] = v

                # mirror path
                for layout in layouts:
                    content["/distfiles/" + layout.get_path(filename)] = v
                # upstream path
                content["/distfiles/{}.txt".format(k)] = v

            shutil.rmtree(settings["DISTDIR"])
            os.makedirs(settings["DISTDIR"])
            with open(os.path.join(settings['DISTDIR'], 'layout.conf'),
                      'wt') as f:
                f.write(layout_data)

            if any(
                    isinstance(layout, ContentHashLayout)
                    for layout in layouts):
                content_db = os.path.join(playground.eprefix,
                                          'var/db/emirrordist/content.db')
                os.makedirs(os.path.dirname(content_db), exist_ok=True)
                try:
                    os.unlink(content_db)
                except OSError:
                    pass
            else:
                content_db = None

            # Demonstrate that fetch preserves a stale file in DISTDIR when no digests are given.
            foo_uri = {
                'foo': ('{scheme}://{host}:{port}/distfiles/foo'.format(
                    scheme=scheme, host=host, port=server.server_port), )
            }
            foo_path = os.path.join(settings['DISTDIR'], 'foo')
            foo_stale_content = b'stale content\n'
            with open(foo_path, 'wb') as f:
                f.write(b'stale content\n')

            self.assertTrue(
                bool(run_async(fetch, foo_uri, settings, try_mirrors=False)))

            with open(foo_path, 'rb') as f:
                self.assertEqual(f.read(), foo_stale_content)
            with open(foo_path, 'rb') as f:
                self.assertNotEqual(f.read(), distfiles['foo'])

            # Use force=True to update the stale file.
            self.assertTrue(
                bool(
                    run_async(fetch,
                              foo_uri,
                              settings,
                              try_mirrors=False,
                              force=True)))

            with open(foo_path, 'rb') as f:
                self.assertEqual(f.read(), distfiles['foo'])

            # Test force=True with FEATURES=skiprocheck, using read-only DISTDIR.
            # FETCHCOMMAND is set to temporarily chmod +w DISTDIR. Note that
            # FETCHCOMMAND must perform atomic rename itself due to read-only
            # DISTDIR.
            with open(foo_path, 'wb') as f:
                f.write(b'stale content\n')
            orig_fetchcommand = settings['FETCHCOMMAND']
            orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode
            temp_fetchcommand = os.path.join(eubin, 'fetchcommand')
            with open(temp_fetchcommand, 'w') as f:
                f.write("""
					set -e
					URI=$1
					DISTDIR=$2
					FILE=$3
					trap 'chmod a-w "${DISTDIR}"' EXIT
					chmod ug+w "${DISTDIR}"
					%s
					mv -f "${DISTDIR}/${FILE}.__download__" "${DISTDIR}/${FILE}"
				""" % orig_fetchcommand.replace('${FILE}', '${FILE}.__download__'))
            settings[
                'FETCHCOMMAND'] = '"%s" "%s" "${URI}" "${DISTDIR}" "${FILE}"' % (
                    BASH_BINARY, temp_fetchcommand)
            settings.features.add('skiprocheck')
            settings.features.remove('distlocks')
            os.chmod(settings['DISTDIR'], 0o555)
            try:
                self.assertTrue(
                    bool(
                        run_async(fetch,
                                  foo_uri,
                                  settings,
                                  try_mirrors=False,
                                  force=True)))
            finally:
                settings['FETCHCOMMAND'] = orig_fetchcommand
                os.chmod(settings['DISTDIR'], orig_distdir_mode)
                settings.features.remove('skiprocheck')
                settings.features.add('distlocks')
                os.unlink(temp_fetchcommand)

            with open(foo_path, 'rb') as f:
                self.assertEqual(f.read(), distfiles['foo'])

            # Test emirrordist invocation.
            emirrordist_cmd = (portage._python_interpreter, '-b', '-Wd',
                               os.path.join(self.bindir, 'emirrordist'),
                               '--distfiles', settings['DISTDIR'],
                               '--config-root', settings['EPREFIX'],
                               '--delete', '--repositories-configuration',
                               settings.repositories.config_string(), '--repo',
                               'test_repo', '--mirror')

            if content_db is not None:
                emirrordist_cmd = emirrordist_cmd + (
                    '--content-db',
                    content_db,
                )

            env = settings.environ()
            env['PYTHONPATH'] = ':'.join(
                filter(None, [PORTAGE_PYM_PATH] +
                       os.environ.get('PYTHONPATH', '').split(':')))

            for k in distfiles:
                try:
                    os.unlink(os.path.join(settings['DISTDIR'], k))
                except OSError:
                    pass

            proc = loop.run_until_complete(
                asyncio.create_subprocess_exec(*emirrordist_cmd, env=env))
            self.assertEqual(loop.run_until_complete(proc.wait()), 0)

            for k in distfiles:
                with open(
                        os.path.join(settings['DISTDIR'],
                                     layouts[0].get_path(k)), 'rb') as f:
                    self.assertEqual(f.read(), distfiles[k])

            if content_db is not None:
                loop.run_until_complete(
                    self._test_content_db(
                        emirrordist_cmd,
                        env,
                        layouts,
                        content_db,
                        distfiles,
                        settings,
                        portdb,
                    ))

            # Tests only work with one ebuild at a time, so the config
            # pool only needs a single config instance.
            class config_pool:
                @staticmethod
                def allocate():
                    return settings

                @staticmethod
                def deallocate(settings):
                    pass

            def async_fetch(pkg, ebuild_path):
                fetcher = EbuildFetcher(config_pool=config_pool,
                                        ebuild_path=ebuild_path,
                                        fetchonly=False,
                                        fetchall=True,
                                        pkg=pkg,
                                        scheduler=loop)
                fetcher.start()
                return fetcher.async_wait()

            for cpv in ebuilds:
                metadata = dict(
                    zip(Package.metadata_keys,
                        portdb.aux_get(cpv, Package.metadata_keys)))

                pkg = Package(built=False,
                              cpv=cpv,
                              installed=False,
                              metadata=metadata,
                              root_config=root_config,
                              type_name='ebuild')

                settings.setcpv(pkg)
                ebuild_path = portdb.findname(pkg.cpv)
                portage.doebuild_environment(ebuild_path,
                                             'fetch',
                                             settings=settings,
                                             db=portdb)

                # Test good files in DISTDIR
                for k in settings['AA'].split():
                    os.stat(os.path.join(settings['DISTDIR'], k))
                self.assertEqual(
                    loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
                for k in settings['AA'].split():
                    with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
                        self.assertEqual(f.read(), distfiles[k])

                # Test digestgen with fetch
                os.unlink(
                    os.path.join(os.path.dirname(ebuild_path), 'Manifest'))
                for k in settings['AA'].split():
                    os.unlink(os.path.join(settings['DISTDIR'], k))
                with ForkExecutor(loop=loop) as executor:
                    self.assertTrue(
                        bool(
                            loop.run_until_complete(
                                loop.run_in_executor(
                                    executor,
                                    functools.partial(digestgen,
                                                      mysettings=settings,
                                                      myportdb=portdb)))))
                for k in settings['AA'].split():
                    with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
                        self.assertEqual(f.read(), distfiles[k])

                # Test missing files in DISTDIR
                for k in settings['AA'].split():
                    os.unlink(os.path.join(settings['DISTDIR'], k))
                self.assertEqual(
                    loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
                for k in settings['AA'].split():
                    with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
                        self.assertEqual(f.read(), distfiles[k])

                # Test empty files in DISTDIR
                for k in settings['AA'].split():
                    file_path = os.path.join(settings['DISTDIR'], k)
                    with open(file_path, 'wb') as f:
                        pass
                    self.assertEqual(os.stat(file_path).st_size, 0)
                self.assertEqual(
                    loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
                for k in settings['AA'].split():
                    with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
                        self.assertEqual(f.read(), distfiles[k])

                # Test non-empty files containing null bytes in DISTDIR
                for k in settings['AA'].split():
                    file_path = os.path.join(settings['DISTDIR'], k)
                    with open(file_path, 'wb') as f:
                        f.write(len(distfiles[k]) * b'\0')
                    self.assertEqual(
                        os.stat(file_path).st_size, len(distfiles[k]))
                self.assertEqual(
                    loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
                for k in settings['AA'].split():
                    with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
                        self.assertEqual(f.read(), distfiles[k])

                # Test PORTAGE_RO_DISTDIRS
                settings['PORTAGE_RO_DISTDIRS'] = '"{}"'.format(ro_distdir)
                orig_fetchcommand = settings['FETCHCOMMAND']
                orig_resumecommand = settings['RESUMECOMMAND']
                try:
                    settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = ''
                    for k in settings['AA'].split():
                        file_path = os.path.join(settings['DISTDIR'], k)
                        os.rename(file_path, os.path.join(ro_distdir, k))
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings['AA'].split():
                        file_path = os.path.join(settings['DISTDIR'], k)
                        self.assertTrue(os.path.islink(file_path))
                        with open(file_path, 'rb') as f:
                            self.assertEqual(f.read(), distfiles[k])
                        os.unlink(file_path)
                finally:
                    settings.pop('PORTAGE_RO_DISTDIRS')
                    settings['FETCHCOMMAND'] = orig_fetchcommand
                    settings['RESUMECOMMAND'] = orig_resumecommand

                # Test local filesystem in GENTOO_MIRRORS
                orig_mirrors = settings['GENTOO_MIRRORS']
                orig_fetchcommand = settings['FETCHCOMMAND']
                try:
                    settings['GENTOO_MIRRORS'] = ro_distdir
                    settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = ''
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings['AA'].split():
                        with open(os.path.join(settings['DISTDIR'], k),
                                  'rb') as f:
                            self.assertEqual(f.read(), distfiles[k])
                finally:
                    settings['GENTOO_MIRRORS'] = orig_mirrors
                    settings['FETCHCOMMAND'] = orig_fetchcommand
                    settings['RESUMECOMMAND'] = orig_resumecommand

                # Test readonly DISTDIR
                orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode
                try:
                    os.chmod(settings['DISTDIR'], 0o555)
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings['AA'].split():
                        with open(os.path.join(settings['DISTDIR'], k),
                                  'rb') as f:
                            self.assertEqual(f.read(), distfiles[k])
                finally:
                    os.chmod(settings['DISTDIR'], orig_distdir_mode)

                # Test parallel-fetch mode
                settings['PORTAGE_PARALLEL_FETCHONLY'] = '1'
                try:
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings['AA'].split():
                        with open(os.path.join(settings['DISTDIR'], k),
                                  'rb') as f:
                            self.assertEqual(f.read(), distfiles[k])
                    for k in settings['AA'].split():
                        os.unlink(os.path.join(settings['DISTDIR'], k))
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings['AA'].split():
                        with open(os.path.join(settings['DISTDIR'], k),
                                  'rb') as f:
                            self.assertEqual(f.read(), distfiles[k])
                finally:
                    settings.pop('PORTAGE_PARALLEL_FETCHONLY')

                # Test RESUMECOMMAND
                orig_resume_min_size = settings[
                    'PORTAGE_FETCH_RESUME_MIN_SIZE']
                try:
                    settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = '2'
                    for k in settings['AA'].split():
                        file_path = os.path.join(settings['DISTDIR'], k)
                        os.unlink(file_path)
                        with open(file_path + _download_suffix, 'wb') as f:
                            f.write(distfiles[k][:2])
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings['AA'].split():
                        with open(os.path.join(settings['DISTDIR'], k),
                                  'rb') as f:
                            self.assertEqual(f.read(), distfiles[k])
                finally:
                    settings[
                        'PORTAGE_FETCH_RESUME_MIN_SIZE'] = orig_resume_min_size

                # Test readonly DISTDIR + skiprocheck, with FETCHCOMMAND set to temporarily chmod DISTDIR
                orig_fetchcommand = settings['FETCHCOMMAND']
                orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode
                for k in settings['AA'].split():
                    os.unlink(os.path.join(settings['DISTDIR'], k))
                try:
                    os.chmod(settings['DISTDIR'], 0o555)
                    settings[
                        'FETCHCOMMAND'] = '"%s" -c "chmod ug+w \\"${DISTDIR}\\"; %s; status=\\$?; chmod a-w \\"${DISTDIR}\\"; exit \\$status"' % (
                            BASH_BINARY, orig_fetchcommand.replace('"', '\\"'))
                    settings.features.add('skiprocheck')
                    settings.features.remove('distlocks')
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                finally:
                    settings['FETCHCOMMAND'] = orig_fetchcommand
                    os.chmod(settings['DISTDIR'], orig_distdir_mode)
                    settings.features.remove('skiprocheck')
                    settings.features.add('distlocks')
Ejemplo n.º 52
0
def process(mysettings, key, logentries, fulltext):

    if mysettings.get("PORTAGE_LOGDIR"):
        logdir = normalize_path(mysettings["PORTAGE_LOGDIR"])
    else:
        logdir = os.path.join(
            os.sep, mysettings["EPREFIX"].lstrip(os.sep), "var", "log", "portage"
        )

    if not os.path.isdir(logdir):
        # Only initialize group/mode if the directory doesn't
        # exist, so that we don't override permissions if they
        # were previously set by the administrator.
        # NOTE: These permissions should be compatible with our
        # default logrotate config as discussed in bug 374287.
        uid = -1
        if portage.data.secpass >= 2:
            uid = portage_uid
        ensure_dirs(logdir, uid=uid, gid=portage_gid, mode=0o2770)

    cat, pf = portage.catsplit(key)

    elogfilename = (
        pf
        + ":"
        + _unicode_decode(
            time.strftime("%Y%m%d-%H%M%S", time.gmtime(time.time())),
            encoding=_encodings["content"],
            errors="replace",
        )
        + ".log"
    )

    if "split-elog" in mysettings.features:
        log_subdir = os.path.join(logdir, "elog", cat)
        elogfilename = os.path.join(log_subdir, elogfilename)
    else:
        log_subdir = os.path.join(logdir, "elog")
        elogfilename = os.path.join(log_subdir, cat + ":" + elogfilename)
    _ensure_log_subdirs(logdir, log_subdir)

    try:
        with io.open(
            _unicode_encode(elogfilename, encoding=_encodings["fs"], errors="strict"),
            mode="w",
            encoding=_encodings["content"],
            errors="backslashreplace",
        ) as elogfile:
            elogfile.write(_unicode_decode(fulltext))
    except IOError as e:
        func_call = "open('%s', 'w')" % elogfilename
        if e.errno == errno.EACCES:
            raise portage.exception.PermissionDenied(func_call)
        elif e.errno == errno.EPERM:
            raise portage.exception.OperationNotPermitted(func_call)
        elif e.errno == errno.EROFS:
            raise portage.exception.ReadOnlyFileSystem(func_call)
        else:
            raise

    # Copy group permission bits from parent directory.
    elogdir_st = os.stat(log_subdir)
    elogdir_gid = elogdir_st.st_gid
    elogdir_grp_mode = 0o060 & elogdir_st.st_mode

    # Copy the uid from the parent directory if we have privileges
    # to do so, for compatibility with our default logrotate
    # config (see bug 378451). With the "su portage portage"
    # directive and logrotate-3.8.0, logrotate's chown call during
    # the compression phase will only succeed if the log file's uid
    # is portage_uid.
    logfile_uid = -1
    if portage.data.secpass >= 2:
        logfile_uid = elogdir_st.st_uid
    apply_permissions(
        elogfilename, uid=logfile_uid, gid=elogdir_gid, mode=elogdir_grp_mode, mask=0
    )

    return elogfilename
Ejemplo n.º 53
0
def collect_libraries_from_dir(dirs, mask, logger):
	''' Collects all libraries from specified list of directories.
		mask is list of pathes, that are ommited in scanning, can be eighter single file or entire directory
		Returns tuple composed of: list of libraries, list of symlinks, and toupe with pair
		(symlink_id, library_id) for resolving dependencies
	'''

	# contains list of directories found
	# allows us to reduce number of fnc calls
	found_directories = set()
	found_files = set()
	found_symlinks = set()
	found_la_files = set() # la libraries

	for _dir in dirs:
		if _dir in mask:
			continue

		try:
			for _listing in os.listdir(_dir):
				listing = os.path.join(_dir, _listing)
				if listing in mask or _listing in mask:
					continue

				if os.path.isdir(listing):
					if os.path.islink(listing):
						#we do not want scan symlink-directories
						pass
					else:
						found_directories.add(listing)
				elif os.path.isfile(listing):
					if (listing.endswith('.so') or
						listing.endswith('.a') or
						'.so.' in listing
						):

						if os.path.islink(listing):
							found_symlinks.add(listing)
						else:
							found_files.add(listing)
						continue
					elif listing.endswith('.la'):
						if listing in found_la_files:
							continue

						found_la_files.add(listing)
					else:
						# sometimes there are binaries in libs' subdir,
						# for example in nagios
						if not os.path.islink(listing):
							#if listing in found_files or listing in found_symlinks:
								#continue
							prv = os.stat(listing)[stat.ST_MODE]
							if prv & stat.S_IXUSR == stat.S_IXUSR or \
									prv & stat.S_IXGRP == stat.S_IXGRP or \
									prv & stat.S_IXOTH == stat.S_IXOTH:
								found_files.add(listing)
		except Exception as ex:
			logger.debug('\t' +
				yellow('Exception collecting libraries: ' +
				blue('%s')  %str(ex)))

	if found_directories:
		_file, la_file, link = \
			collect_libraries_from_dir(found_directories, mask, logger)
		found_files.update(_file)
		found_la_files.update(la_file)
		found_symlinks.update(link)
	return (found_files, found_la_files, found_symlinks)
Ejemplo n.º 54
0
def digestgen(myarchives=None, mysettings=None, myportdb=None):
	"""
	Generates a digest file if missing. Fetches files if necessary.
	NOTE: myarchives and mysettings used to be positional arguments,
		so their order must be preserved for backward compatibility.
	@param mysettings: the ebuild config (mysettings["O"] must correspond
		to the ebuild's parent directory)
	@type mysettings: config
	@param myportdb: a portdbapi instance
	@type myportdb: portdbapi
	@rtype: int
	@return: 1 on success and 0 on failure
	"""
	if mysettings is None or myportdb is None:
		raise TypeError("portage.digestgen(): 'mysettings' and 'myportdb' parameter are required.")

	try:
		portage._doebuild_manifest_exempt_depend += 1
		distfiles_map = {}
		fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
		for cpv in fetchlist_dict:
			try:
				for myfile in fetchlist_dict[cpv]:
					distfiles_map.setdefault(myfile, []).append(cpv)
			except InvalidDependString as e:
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				del e
				return 0
		mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
		try:
			mf = mysettings.repositories.get_repo_for_location(mytree)
		except KeyError:
			# backward compatibility
			mytree = os.path.realpath(mytree)
			mf = mysettings.repositories.get_repo_for_location(mytree)

		mf = mf.load_manifest(mysettings["O"], mysettings["DISTDIR"],
			fetchlist_dict=fetchlist_dict)

		if not mf.allow_create:
			writemsg_stdout(_(">>> Skipping creating Manifest for %s; "
				"repository is configured to not use them\n") % mysettings["O"])
			return 1

		# Don't require all hashes since that can trigger excessive
		# fetches when sufficient digests already exist.  To ease transition
		# while Manifest 1 is being removed, only require hashes that will
		# exist before and after the transition.
		required_hash_types = set()
		required_hash_types.add("size")
		required_hash_types.add(MANIFEST2_REQUIRED_HASH)
		dist_hashes = mf.fhashdict.get("DIST", {})

		# To avoid accidental regeneration of digests with the incorrect
		# files (such as partially downloaded files), trigger the fetch
		# code if the file exists and it's size doesn't match the current
		# manifest entry. If there really is a legitimate reason for the
		# digest to change, `ebuild --force digest` can be used to avoid
		# triggering this code (or else the old digests can be manually
		# removed from the Manifest).
		missing_files = []
		for myfile in distfiles_map:
			myhashes = dist_hashes.get(myfile)
			if not myhashes:
				try:
					st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
				except OSError:
					st = None
				if st is None or st.st_size == 0:
					missing_files.append(myfile)
				continue
			size = myhashes.get("size")

			try:
				st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
			except OSError as e:
				if e.errno != errno.ENOENT:
					raise
				del e
				if size == 0:
					missing_files.append(myfile)
					continue
				if required_hash_types.difference(myhashes):
					missing_files.append(myfile)
					continue
			else:
				if st.st_size == 0 or size is not None and size != st.st_size:
					missing_files.append(myfile)
					continue

		if missing_files:
				for myfile in missing_files:
					uris = set()
					all_restrict = set()
					for cpv in distfiles_map[myfile]:
						uris.update(myportdb.getFetchMap(
							cpv, mytree=mytree)[myfile])
						restrict = myportdb.aux_get(cpv, ['RESTRICT'],
							mytree=mytree)[0]
						# Here we ignore conditional parts of RESTRICT since
						# they don't apply unconditionally. Assume such
						# conditionals only apply on the client side where
						# digestgen() does not need to be called.
						all_restrict.update(use_reduce(restrict,
							flat=True, matchnone=True))

						# fetch() uses CATEGORY and PF to display a message
						# when fetch restriction is triggered.
						cat, pf = catsplit(cpv)
						mysettings["CATEGORY"] = cat
						mysettings["PF"] = pf

					# fetch() uses PORTAGE_RESTRICT to control fetch
					# restriction, which is only applied to files that
					# are not fetchable via a mirror:// URI.
					mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)

					try:
						st = os.stat(os.path.join(
							mysettings["DISTDIR"],myfile))
					except OSError:
						st = None

					if not fetch({myfile : uris}, mysettings):
						myebuild = os.path.join(mysettings["O"],
							catsplit(cpv)[1] + ".ebuild")
						spawn_nofetch(myportdb, myebuild)
						writemsg(_("!!! Fetch failed for %s, can't update "
							"Manifest\n") % myfile, noiselevel=-1)
						if myfile in dist_hashes and \
							st is not None and st.st_size > 0:
							# stat result is obtained before calling fetch(),
							# since fetch may rename the existing file if the
							# digest does not match.
							writemsg(_("!!! If you would like to "
								"forcefully replace the existing "
								"Manifest entry\n!!! for %s, use "
								"the following command:\n") % myfile + \
								"!!!    " + colorize("INFORM",
								"ebuild --force %s manifest" % \
								os.path.basename(myebuild)) + "\n",
								noiselevel=-1)
						return 0
		writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
		try:
			mf.create(assumeDistHashesSometimes=True,
				assumeDistHashesAlways=(
				"assume-digests" in mysettings.features))
		except FileNotFound as e:
			writemsg(_("!!! File %s doesn't exist, can't update "
				"Manifest\n") % e, noiselevel=-1)
			return 0
		except PortagePackageException as e:
			writemsg(("!!! %s\n") % (e,), noiselevel=-1)
			return 0
		try:
			mf.write(sign=False)
		except PermissionDenied as e:
			writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
			return 0
		if "assume-digests" not in mysettings.features:
			distlist = list(mf.fhashdict.get("DIST", {}))
			distlist.sort()
			auto_assumed = []
			for filename in distlist:
				if not os.path.exists(
					os.path.join(mysettings["DISTDIR"], filename)):
					auto_assumed.append(filename)
			if auto_assumed:
				cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
				pkgs = myportdb.cp_list(cp, mytree=mytree)
				pkgs.sort()
				writemsg_stdout("  digest.assumed" + colorize("WARN",
					str(len(auto_assumed)).rjust(18)) + "\n")
				for pkg_key in pkgs:
					fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
					pv = pkg_key.split("/")[1]
					for filename in auto_assumed:
						if filename in fetchlist:
							writemsg_stdout(
								"   %s::%s\n" % (pv, filename))
		return 1
	finally:
		portage._doebuild_manifest_exempt_depend -= 1
Ejemplo n.º 55
0
def hardlink_is_mine(link, lock):
    try:
        return os.stat(link).st_nlink == 2
    except OSError:
        return False
Ejemplo n.º 56
0
def lockfile(mypath,
             wantnewlockfile=0,
             unlinkfile=0,
             waiting_msg=None,
             flags=0):
    """
	If wantnewlockfile is True then this creates a lockfile in the parent
	directory as the file: '.' + basename + '.portage_lockfile'.
	"""
    import fcntl

    if not mypath:
        raise InvalidData(_("Empty path given"))

    if isinstance(mypath, basestring) and mypath[-1] == '/':
        mypath = mypath[:-1]

    if hasattr(mypath, 'fileno'):
        mypath = mypath.fileno()
    if isinstance(mypath, int):
        lockfilename = mypath
        wantnewlockfile = 0
        unlinkfile = 0
    elif wantnewlockfile:
        base, tail = os.path.split(mypath)
        lockfilename = os.path.join(base, "." + tail + ".portage_lockfile")
        del base, tail
        unlinkfile = 1
    else:
        lockfilename = mypath

    if isinstance(mypath, basestring):
        if not os.path.exists(os.path.dirname(mypath)):
            raise DirectoryNotFound(os.path.dirname(mypath))
        preexisting = os.path.exists(lockfilename)
        old_mask = os.umask(000)
        try:
            try:
                myfd = os.open(lockfilename, os.O_CREAT | os.O_RDWR, 0o660)
            except OSError as e:
                func_call = "open('%s')" % lockfilename
                if e.errno == OperationNotPermitted.errno:
                    raise OperationNotPermitted(func_call)
                elif e.errno == PermissionDenied.errno:
                    raise PermissionDenied(func_call)
                else:
                    raise

            if not preexisting:
                try:
                    if os.stat(lockfilename).st_gid != portage_gid:
                        os.chown(lockfilename, -1, portage_gid)
                except OSError as e:
                    if e.errno in (errno.ENOENT, errno.ESTALE):
                        return lockfile(mypath,
                                        wantnewlockfile=wantnewlockfile,
                                        unlinkfile=unlinkfile,
                                        waiting_msg=waiting_msg,
                                        flags=flags)
                    else:
                        writemsg(_("Cannot chown a lockfile: '%s'\n") % \
                         lockfilename, noiselevel=-1)

        finally:
            os.umask(old_mask)

    elif isinstance(mypath, int):
        myfd = mypath

    else:
        raise ValueError(_("Unknown type passed in '%s': '%s'") % \
         (type(mypath), mypath))

    # try for a non-blocking lock, if it's held, throw a message
    # we're waiting on lockfile and use a blocking attempt.
    locking_method = fcntl.lockf
    try:
        fcntl.lockf(myfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
    except IOError as e:
        if "errno" not in dir(e):
            raise
        if e.errno in (errno.EACCES, errno.EAGAIN):
            # resource temp unavailable; eg, someone beat us to the lock.
            if flags & os.O_NONBLOCK:
                raise TryAgain(mypath)

            global _quiet
            out = EOutput()
            out.quiet = _quiet
            if waiting_msg is None:
                if isinstance(mypath, int):
                    waiting_msg = _("waiting for lock on fd %i") % myfd
                else:
                    waiting_msg = _("waiting for lock on %s\n") % lockfilename
            out.ebegin(waiting_msg)
            # try for the exclusive lock now.
            try:
                fcntl.lockf(myfd, fcntl.LOCK_EX)
            except EnvironmentError as e:
                out.eend(1, str(e))
                raise
            out.eend(os.EX_OK)
        elif e.errno == errno.ENOLCK:
            # We're not allowed to lock on this FS.
            os.close(myfd)
            link_success = False
            if lockfilename == str(lockfilename):
                if wantnewlockfile:
                    try:
                        if os.stat(lockfilename)[stat.ST_NLINK] == 1:
                            os.unlink(lockfilename)
                    except OSError:
                        pass
                    link_success = hardlink_lockfile(lockfilename)
            if not link_success:
                raise
            locking_method = None
            myfd = HARDLINK_FD
        else:
            raise


    if isinstance(lockfilename, basestring) and \
     myfd != HARDLINK_FD and _fstat_nlink(myfd) == 0:
        # The file was deleted on us... Keep trying to make one...
        os.close(myfd)
        writemsg(_("lockfile recurse\n"), 1)
        lockfilename, myfd, unlinkfile, locking_method = lockfile(
            mypath,
            wantnewlockfile=wantnewlockfile,
            unlinkfile=unlinkfile,
            waiting_msg=waiting_msg,
            flags=flags)

    writemsg(str((lockfilename, myfd, unlinkfile)) + "\n", 1)
    return (lockfilename, myfd, unlinkfile, locking_method)
Ejemplo n.º 57
0
def _env_update(makelinks, target_root, prev_mtimes, contents, env,
                writemsg_level):
    if writemsg_level is None:
        writemsg_level = portage.util.writemsg_level
    if target_root is None:
        target_root = portage.settings["ROOT"]
    if prev_mtimes is None:
        prev_mtimes = portage.mtimedb["ldpath"]
    if env is None:
        settings = portage.settings
    else:
        settings = env

    eprefix = settings.get("EPREFIX", "")
    eprefix_lstrip = eprefix.lstrip(os.sep)
    eroot = normalize_path(os.path.join(target_root, eprefix_lstrip)).rstrip(
        os.sep) + os.sep
    envd_dir = os.path.join(eroot, "etc", "env.d")
    ensure_dirs(envd_dir, mode=0o755)
    fns = listdir(envd_dir, EmptyOnError=1)
    fns.sort()
    templist = []
    for x in fns:
        if len(x) < 3:
            continue
        if not x[0].isdigit() or not x[1].isdigit():
            continue
        if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
            continue
        templist.append(x)
    fns = templist
    del templist

    space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
    colon_separated = set([
        "ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH", "CLASSPATH", "INFODIR",
        "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH", "PATH", "PKG_CONFIG_PATH",
        "PRELINK_PATH", "PRELINK_PATH_MASK", "PYTHONPATH", "ROOTPATH"
    ])

    config_list = []

    for x in fns:
        file_path = os.path.join(envd_dir, x)
        try:
            myconfig = getconfig(file_path, expand=False)
        except ParseError as e:
            writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
            del e
            continue
        if myconfig is None:
            # broken symlink or file removed by a concurrent process
            writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
            continue

        config_list.append(myconfig)
        if "SPACE_SEPARATED" in myconfig:
            space_separated.update(myconfig["SPACE_SEPARATED"].split())
            del myconfig["SPACE_SEPARATED"]
        if "COLON_SEPARATED" in myconfig:
            colon_separated.update(myconfig["COLON_SEPARATED"].split())
            del myconfig["COLON_SEPARATED"]

    env = {}
    specials = {}
    for var in space_separated:
        mylist = []
        for myconfig in config_list:
            if var in myconfig:
                for item in myconfig[var].split():
                    if item and not item in mylist:
                        mylist.append(item)
                del myconfig[var]  # prepare for env.update(myconfig)
        if mylist:
            env[var] = " ".join(mylist)
        specials[var] = mylist

    for var in colon_separated:
        mylist = []
        for myconfig in config_list:
            if var in myconfig:
                for item in myconfig[var].split(":"):
                    if item and not item in mylist:
                        mylist.append(item)
                del myconfig[var]  # prepare for env.update(myconfig)
        if mylist:
            env[var] = ":".join(mylist)
        specials[var] = mylist

    for myconfig in config_list:
        """Cumulative variables have already been deleted from myconfig so that
		they won't be overwritten by this dict.update call."""
        env.update(myconfig)

    ldsoconf_path = os.path.join(eroot, "etc", "ld.so.conf")
    try:
        myld = io.open(_unicode_encode(ldsoconf_path,
                                       encoding=_encodings['fs'],
                                       errors='strict'),
                       mode='r',
                       encoding=_encodings['content'],
                       errors='replace')
        myldlines = myld.readlines()
        myld.close()
        oldld = []
        for x in myldlines:
            #each line has at least one char (a newline)
            if x[:1] == "#":
                continue
            oldld.append(x[:-1])
    except (IOError, OSError) as e:
        if e.errno != errno.ENOENT:
            raise
        oldld = None

    newld = specials["LDPATH"]
    if oldld != newld:
        #ld.so.conf needs updating and ldconfig needs to be run
        myfd = atomic_ofstream(ldsoconf_path)
        myfd.write(
            "# ld.so.conf autogenerated by env-update; make all changes to\n")
        myfd.write("# contents of /etc/env.d directory\n")
        for x in specials["LDPATH"]:
            myfd.write(x + "\n")
        myfd.close()

    potential_lib_dirs = set()
    for lib_dir_glob in ('usr/lib*', 'lib*'):
        x = os.path.join(eroot, lib_dir_glob)
        for y in glob.glob(
                _unicode_encode(x, encoding=_encodings['fs'],
                                errors='strict')):
            try:
                y = _unicode_decode(y,
                                    encoding=_encodings['fs'],
                                    errors='strict')
            except UnicodeDecodeError:
                continue
            if os.path.basename(y) != 'libexec':
                potential_lib_dirs.add(y[len(eroot):])

    # Update prelink.conf if we are prelink-enabled
    if prelink_capable:
        prelink_d = os.path.join(eroot, 'etc', 'prelink.conf.d')
        ensure_dirs(prelink_d)
        newprelink = atomic_ofstream(os.path.join(prelink_d, 'portage.conf'))
        newprelink.write(
            "# prelink.conf autogenerated by env-update; make all changes to\n"
        )
        newprelink.write("# contents of /etc/env.d directory\n")

        for x in sorted(potential_lib_dirs) + ['bin', 'sbin']:
            newprelink.write('-l /%s\n' % (x, ))
        prelink_paths = set()
        prelink_paths |= set(specials.get('LDPATH', []))
        prelink_paths |= set(specials.get('PATH', []))
        prelink_paths |= set(specials.get('PRELINK_PATH', []))
        prelink_path_mask = specials.get('PRELINK_PATH_MASK', [])
        for x in prelink_paths:
            if not x:
                continue
            if x[-1:] != '/':
                x += "/"
            plmasked = 0
            for y in prelink_path_mask:
                if not y:
                    continue
                if y[-1] != '/':
                    y += "/"
                if y == x[0:len(y)]:
                    plmasked = 1
                    break
            if not plmasked:
                newprelink.write("-h %s\n" % (x, ))
        for x in prelink_path_mask:
            newprelink.write("-b %s\n" % (x, ))
        newprelink.close()

        # Migration code path.  If /etc/prelink.conf was generated by us, then
        # point it to the new stuff until the prelink package re-installs.
        prelink_conf = os.path.join(eroot, 'etc', 'prelink.conf')
        try:
            with open(
                    _unicode_encode(prelink_conf,
                                    encoding=_encodings['fs'],
                                    errors='strict'), 'rb') as f:
                if f.readline(
                ) == b'# prelink.conf autogenerated by env-update; make all changes to\n':
                    f = atomic_ofstream(prelink_conf)
                    f.write('-c /etc/prelink.conf.d/*.conf\n')
                    f.close()
        except IOError as e:
            if e.errno != errno.ENOENT:
                raise

    current_time = int(time.time())
    mtime_changed = False

    lib_dirs = set()
    for lib_dir in set(specials['LDPATH']) | potential_lib_dirs:
        x = os.path.join(eroot, lib_dir.lstrip(os.sep))
        try:
            newldpathtime = os.stat(x)[stat.ST_MTIME]
            lib_dirs.add(normalize_path(x))
        except OSError as oe:
            if oe.errno == errno.ENOENT:
                try:
                    del prev_mtimes[x]
                except KeyError:
                    pass
                # ignore this path because it doesn't exist
                continue
            raise
        if newldpathtime == current_time:
            # Reset mtime to avoid the potential ambiguity of times that
            # differ by less than 1 second.
            newldpathtime -= 1
            os.utime(x, (newldpathtime, newldpathtime))
            prev_mtimes[x] = newldpathtime
            mtime_changed = True
        elif x in prev_mtimes:
            if prev_mtimes[x] == newldpathtime:
                pass
            else:
                prev_mtimes[x] = newldpathtime
                mtime_changed = True
        else:
            prev_mtimes[x] = newldpathtime
            mtime_changed = True

    if makelinks and \
     not mtime_changed and \
     contents is not None:
        libdir_contents_changed = False
        for mypath, mydata in contents.items():
            if mydata[0] not in ("obj", "sym"):
                continue
            head, tail = os.path.split(mypath)
            if head in lib_dirs:
                libdir_contents_changed = True
                break
        if not libdir_contents_changed:
            makelinks = False

    if "CHOST" in settings and "CBUILD" in settings and \
     settings["CHOST"] != settings["CBUILD"]:
        ldconfig = find_binary("%s-ldconfig" % settings["CHOST"])
    else:
        ldconfig = os.path.join(eroot, "sbin", "ldconfig")

    if ldconfig is None:
        pass
    elif not (os.access(ldconfig, os.X_OK) and os.path.isfile(ldconfig)):
        ldconfig = None

    # Only run ldconfig as needed
    if makelinks and ldconfig:
        # ldconfig has very different behaviour between FreeBSD and Linux
        if ostype == "Linux" or ostype.lower().endswith("gnu"):
            # We can't update links if we haven't cleaned other versions first, as
            # an older package installed ON TOP of a newer version will cause ldconfig
            # to overwrite the symlinks we just made. -X means no links. After 'clean'
            # we can safely create links.
            writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
             (target_root,))
            os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
        elif ostype in ("FreeBSD", "DragonFly"):
            writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
             target_root)
            os.system(("cd / ; %s -elf -i " + \
             "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
             (ldconfig, target_root, target_root))

    del specials["LDPATH"]

    notice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
    notice += "# DO NOT EDIT THIS FILE."
    penvnotice = notice + " CHANGES TO STARTUP PROFILES\n"
    cenvnotice = penvnotice[:]
    penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
    cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"

    #create /etc/profile.env for bash support
    profile_env_path = os.path.join(eroot, "etc", "profile.env")
    outfile = atomic_ofstream(profile_env_path)
    outfile.write(penvnotice)

    env_keys = [x for x in env if x != "LDPATH"]
    env_keys.sort()
    for k in env_keys:
        v = env[k]
        if v.startswith('$') and not v.startswith('${'):
            outfile.write("export %s=$'%s'\n" % (k, v[1:]))
        else:
            outfile.write("export %s='%s'\n" % (k, v))
    outfile.close()

    # Create the systemd user environment configuration file
    # /etc/environment.d/10-gentoo-env.conf with the
    # environment configuration from /etc/env.d.
    systemd_environment_dir = os.path.join(eroot, "etc", "environment.d")
    os.makedirs(systemd_environment_dir, exist_ok=True)

    systemd_gentoo_env_path = os.path.join(systemd_environment_dir,
                                           "10-gentoo-env.conf")
    systemd_gentoo_env = atomic_ofstream(systemd_gentoo_env_path)
    try:
        senvnotice = notice + "\n\n"
        systemd_gentoo_env.write(senvnotice)

        for env_key in env_keys:
            env_key_value = env[env_key]

            # Skip variables with the empty string
            # as value. Those sometimes appear in
            # profile.env (e.g. "export GCC_SPECS=''"),
            # but are invalid in systemd's syntax.
            if not env_key_value:
                continue

            # Transform into systemd environment.d
            # conf syntax, basically shell variable
            # assignment (without "export ").
            line = f"{env_key}={env_key_value}\n"

            systemd_gentoo_env.write(line)
    except:
        systemd_gentoo_env.abort()
        raise
    systemd_gentoo_env.close()

    #create /etc/csh.env for (t)csh support
    outfile = atomic_ofstream(os.path.join(eroot, "etc", "csh.env"))
    outfile.write(cenvnotice)
    for x in env_keys:
        outfile.write("setenv %s '%s'\n" % (x, env[x]))
    outfile.close()
Ejemplo n.º 58
0
def cacheddir(my_original_path,
              ignorecvs,
              ignorelist,
              EmptyOnError,
              followSymlinks=True):
    global cacheHit, cacheMiss, cacheStale
    mypath = normalize_path(my_original_path)
    if mypath in dircache:
        cacheHit += 1
        cached_mtime, list, ftype = dircache[mypath]
    else:
        cacheMiss += 1
        cached_mtime, list, ftype = -1, [], []
    try:
        pathstat = os.stat(mypath)
        if stat.S_ISDIR(pathstat[stat.ST_MODE]):
            mtime = pathstat.st_mtime
        else:
            raise DirectoryNotFound(mypath)
    except EnvironmentError as e:
        if e.errno == PermissionDenied.errno:
            raise PermissionDenied(mypath)
        del e
        return [], []
    except PortageException:
        return [], []
    # Python retuns mtime in seconds, so if it was changed in the last few seconds, it could be invalid
    if mtime != cached_mtime or time.time() - mtime < 4:
        if mypath in dircache:
            cacheStale += 1
        try:
            list = os.listdir(mypath)
        except EnvironmentError as e:
            if e.errno != errno.EACCES:
                raise
            del e
            raise PermissionDenied(mypath)
        ftype = []
        for x in list:
            try:
                if followSymlinks:
                    pathstat = os.stat(mypath + "/" + x)
                else:
                    pathstat = os.lstat(mypath + "/" + x)

                if stat.S_ISREG(pathstat[stat.ST_MODE]):
                    ftype.append(0)
                elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
                    ftype.append(1)
                elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
                    ftype.append(2)
                else:
                    ftype.append(3)
            except (IOError, OSError):
                ftype.append(3)
        dircache[mypath] = mtime, list, ftype

    ret_list = []
    ret_ftype = []
    for x in range(0, len(list)):
        if list[x] in ignorelist:
            pass
        elif ignorecvs:
            if list[x][:2] != ".#" and \
             not (ftype[x] == 1 and list[x] in _ignorecvs_dirs):
                ret_list.append(list[x])
                ret_ftype.append(ftype[x])
        else:
            ret_list.append(list[x])
            ret_ftype.append(ftype[x])

    writemsg(
        "cacheddirStats: H:%d/M:%d/S:%d\n" % (cacheHit, cacheMiss, cacheStale),
        10)
    return ret_list, ret_ftype
Ejemplo n.º 59
0
    def singleBuilder(cls, options, settings, trees):
        vardbapi = trees["vartree"].dbapi
        mode = options.get("mode", "older")
        if str(mode).lower() not in ["newer", "older"]:
            raise SetConfigError(
                _("invalid 'mode' value %s (use either 'newer' or 'older')") %
                mode)

        formats = []
        if options.get("package") is not None:
            formats.append("package")
        if options.get("filestamp") is not None:
            formats.append("filestamp")
        if options.get("seconds") is not None:
            formats.append("seconds")
        if options.get("date") is not None:
            formats.append("date")

        if not formats:
            raise SetConfigError(
                _("none of these options specified: 'package', 'filestamp', 'seconds', 'date'"
                  ))
        elif len(formats) > 1:
            raise SetConfigError(
                _("no more than one of these options is allowed: 'package', 'filestamp', 'seconds', 'date'"
                  ))

        setformat = formats[0]

        if (setformat == "package"):
            package = options.get("package")
            try:
                cpv = vardbapi.match(package)[0]
                date, = vardbapi.aux_get(cpv, ('BUILD_TIME', ))
                date = int(date)
            except (KeyError, ValueError):
                raise SetConfigError(
                    _("cannot determine installation date of package %s") %
                    package)
        elif (setformat == "filestamp"):
            filestamp = options.get("filestamp")
            try:
                date = int(os.stat(filestamp).st_mtime)
            except (OSError, ValueError):
                raise SetConfigError(
                    _("cannot determine 'filestamp' of '%s'") % filestamp)
        elif (setformat == "seconds"):
            try:
                date = int(options.get("seconds"))
            except ValueError:
                raise SetConfigError(_("option 'seconds' must be an integer"))
        else:
            dateopt = options.get("date")
            try:
                dateformat = options.get("dateformat", "%x %X")
                date = int(time.mktime(time.strptime(dateopt, dateformat)))
            except ValueError:
                raise SetConfigError(
                    _("'date=%s' does not match 'dateformat=%s'") %
                    (dateopt, dateformat))
        return DateSet(vardb=vardbapi, date=date, mode=mode)
Ejemplo n.º 60
0
def update_copyright(fn_path, year, pretend=False):
    """
	Check file for a Copyright statement, and update its year.  The
	patterns used for replacing copyrights are taken from echangelog.
	Only the first lines of each file that start with a hash ('#') are
	considered, until a line is found that doesn't start with a hash.
	Files are read and written in binary mode, so that this function
	will work correctly with files encoded in any character set, as
	long as the copyright statements consist of plain ASCII.

	@param fn_path: file path
	@type str
	@param year: current year
	@type str
	@param pretend: pretend mode
	@type bool
	@rtype: bool
	@return: True if copyright update was needed, False otherwise
	"""

    try:
        fn_hdl = io.open(_unicode_encode(fn_path,
                                         encoding=_encodings['fs'],
                                         errors='strict'),
                         mode='rb')
    except EnvironmentError:
        return

    orig_header = []
    new_header = []

    for line in fn_hdl:
        line_strip = line.strip()
        orig_header.append(line)
        if not line_strip or line_strip[:1] != b'#':
            new_header.append(line)
            break

        line = update_copyright_year(year, line)
        new_header.append(line)

    difflines = 0
    for diffline in difflib.unified_diff(
        [_unicode_decode(diffline) for diffline in orig_header],
        [_unicode_decode(diffline) for diffline in new_header],
            fromfile=fn_path,
            tofile=fn_path,
            n=0):
        util.writemsg_stdout(diffline, noiselevel=-1)
        difflines += 1
    util.writemsg_stdout("\n", noiselevel=-1)

    # unified diff has three lines to start with
    if difflines > 3 and not pretend:
        # write new file with changed header
        f, fnnew_path = mkstemp()
        f = io.open(f, mode='wb')
        for line in new_header:
            f.write(line)
        for line in fn_hdl:
            f.write(line)
        f.close()
        try:
            fn_stat = os.stat(fn_path)
        except OSError:
            fn_stat = None

        shutil.move(fnnew_path, fn_path)

        if fn_stat is None:
            util.apply_permissions(fn_path, mode=0o644)
        else:
            util.apply_stat_permissions(fn_path, fn_stat)
    fn_hdl.close()
    return difflines > 3