Beispiel #1
0
    def _setitem(self, cpv, values):
        with tempfile.NamedTemporaryFile(delete=False,
                                         dir=self.location,
                                         prefix=cpv.replace('/', '_')) as temp:
            temp.close()
            try:
                with io.open(temp.name,
                             mode='w',
                             encoding=_encodings['repo.content'],
                             errors='backslashreplace') as myf:
                    for k in self._write_keys:
                        v = values.get(k)
                        if not v:
                            continue
                        # NOTE: This format string requires unicode_literals, so that
                        # k and v are coerced to unicode, in order to prevent TypeError
                        # when writing raw bytes to TextIOWrapper with Python 2.
                        myf.write("%s=%s\n" % (k, v))

                self._ensure_access(temp.name)

                # Update written, we can move it.
                new_fp = os.path.join(self.location, cpv)
                try:
                    os.rename(temp.name, new_fp)
                except OSError as e:
                    if e.errno == errno.ENOENT:
                        self._ensure_dirs(cpv)
                        os.rename(temp.name, new_fp)
                    else:
                        raise cache_errors.CacheCorruption(cpv, e)

            except EnvironmentError as e:
                os.remove(temp.name)
                raise cache_errors.CacheCorruption(cpv, e)
Beispiel #2
0
    def _chpathtool_exit(self, chpathtool):
        if self._final_exit(chpathtool) != os.EX_OK:
            self._unlock_builddir()
            self._writemsg_level("!!! Error Adjusting Prefix to %s\n" %
                                 (self.settings["EPREFIX"], ),
                                 noiselevel=-1,
                                 level=logging.ERROR)
            self.wait()
            return

        # We want to install in "our" prefix, not the binary one
        with io.open(_unicode_encode(os.path.join(self._infloc, "EPREFIX"),
                                     encoding=_encodings['fs'],
                                     errors='strict'),
                     mode='w',
                     encoding=_encodings['repo.content'],
                     errors='strict') as f:
            f.write(self.settings["EPREFIX"] + "\n")

        # Move the files to the correct location for merge.
        image_tmp_dir = os.path.join(self.settings["PORTAGE_BUILDDIR"],
                                     "image_tmp")
        build_d = os.path.join(self.settings["D"],
                               self._build_prefix.lstrip(os.sep))
        if not os.path.isdir(build_d):
            # Assume this is a virtual package or something.
            shutil.rmtree(self._image_dir)
            ensure_dirs(self.settings["ED"])
        else:
            os.rename(build_d, image_tmp_dir)
            shutil.rmtree(self._image_dir)
            ensure_dirs(os.path.dirname(self.settings["ED"].rstrip(os.sep)))
            os.rename(image_tmp_dir, self.settings["ED"])

        self.wait()
Beispiel #3
0
def file_archive_post_process(archive):
    """Rename the archive file with the .dist.new suffix to a .dist suffix"""
    if os.path.lexists(archive + ".dist.new"):
        dest = "%s.dist" % archive
        if os.path.isdir(dest) and not os.path.islink(dest):
            _file_archive_rotate(dest)
        os.rename(archive + ".dist.new", dest)
Beispiel #4
0
	def _migrate(self, update_location):
		"""
		When repo.user_location is a normal directory, migrate it to
		storage so that it can be replaced with a symlink. After migration,
		commit the content as the latest snapshot.
		"""
		try:
			os.rename(self._user_location, update_location)
		except OSError:
			portage.util.ensure_dirs(update_location)
			portage.util.apply_stat_permissions(update_location,
				os.stat(self._user_location))
			# It's probably on a different device, so copy it.
			yield self._check_call(['rsync', '-a',
				self._user_location + '/', update_location + '/'])

			# Remove the old copy so that symlink can be created. Run with
			# maximum privileges, since removal requires write access to
			# the parent directory.
			yield self._check_call(['rm', '-rf', user_location], privileged=True)

		self._update_location = update_location

		# Make this copy the latest snapshot
		yield self.commit_update()
Beispiel #5
0
def file_archive_post_process(archive):
	"""Rename the archive file with the .dist.new suffix to a .dist suffix"""
	if os.path.lexists(archive + '.dist.new'):
		dest = "%s.dist" % archive
		if os.path.isdir(dest) and not os.path.islink(dest):
			_file_archive_rotate(dest)
		os.rename(archive + '.dist.new', dest)
Beispiel #6
0
	def recompose_mem(self, xpdata, break_hardlinks=True):
		"""
		Update the xpak segment.
		@param xpdata: A new xpak segment to be written, like that returned
			from the xpak_mem() function.
		@param break_hardlinks: If hardlinks exist, create a copy in order
			to break them. This makes it safe to use hardlinks to create
			cheap snapshots of the repository, which is useful for solving
			race conditions on binhosts as described here:
			http://code.google.com/p/chromium-os/issues/detail?id=3225.
			Default is True.
		"""
		self.scan() # Don't care about condition... We'll rewrite the data anyway.

		if break_hardlinks and self.filestat.st_nlink > 1:
			tmp_fname = "%s.%d" % (self.file, os.getpid())
			shutil.copyfile(self.file, tmp_fname)
			try:
				portage.util.apply_stat_permissions(self.file, self.filestat)
			except portage.exception.OperationNotPermitted:
				pass
			os.rename(tmp_fname, self.file)

		myfile = open(_unicode_encode(self.file,
			encoding=_encodings['fs'], errors='strict'), 'ab+')
		if not myfile:
			raise IOError
		myfile.seek(-self.xpaksize,2) # 0,2 or -0,2 just mean EOF.
		myfile.truncate()
		myfile.write(xpdata+encodeint(len(xpdata)) + b'STOP')
		myfile.flush()
		myfile.close()
		return 1
Beispiel #7
0
	def close(self):
		"""Closes the temporary file, copies permissions (if possible),
		and performs the atomic replacement via os.rename().  If the abort()
		method has been called, then the temp file is closed and removed."""
		f = object.__getattribute__(self, '_file')
		real_name = object.__getattribute__(self, '_real_name')
		if not f.closed:
			try:
				f.close()
				if not object.__getattribute__(self, '_aborted'):
					try:
						apply_stat_permissions(f.name, os.stat(real_name))
					except OperationNotPermitted:
						pass
					except FileNotFound:
						pass
					except OSError as oe: # from the above os.stat call
						if oe.errno in (errno.ENOENT, errno.EPERM):
							pass
						else:
							raise
					os.rename(f.name, real_name)
			finally:
				# Make sure we cleanup the temp file
				# even if an exception is raised.
				try:
					os.unlink(f.name)
				except OSError as oe:
					pass
	def close(self):
		"""Closes the temporary file, copies permissions (if possible),
		and performs the atomic replacement via os.rename().  If the abort()
		method has been called, then the temp file is closed and removed."""
		f = object.__getattribute__(self, '_file')
		real_name = object.__getattribute__(self, '_real_name')
		if not f.closed:
			try:
				f.close()
				if not object.__getattribute__(self, '_aborted'):
					try:
						apply_stat_permissions(f.name, os.stat(real_name))
					except OperationNotPermitted:
						pass
					except FileNotFound:
						pass
					except OSError as oe: # from the above os.stat call
						if oe.errno in (errno.ENOENT, errno.EPERM):
							pass
						else:
							raise
					os.rename(f.name, real_name)
			finally:
				# Make sure we cleanup the temp file
				# even if an exception is raised.
				try:
					os.unlink(f.name)
				except OSError as oe:
					pass
Beispiel #9
0
	def _setitem(self, cpv, values):
		s = cpv.rfind("/")
		fp=os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
		try:
			myf = codecs.open(_unicode_encode(fp,
				encoding=_encodings['fs'], errors='strict'),
				mode='w', encoding=_encodings['repo.content'],
				errors='backslashreplace')
		except (OSError, IOError) as e:
			if errno.ENOENT == e.errno:
				try:
					self._ensure_dirs(cpv)
					myf = codecs.open(_unicode_encode(fp,
						encoding=_encodings['fs'], errors='strict'),
						mode='w', encoding=_encodings['repo.content'],
						errors='backslashreplace')
				except (OSError, IOError) as e:
					raise cache_errors.CacheCorruption(cpv, e)
			else:
				raise cache_errors.CacheCorruption(cpv, e)
		

		for x in self.auxdbkey_order:
			myf.write(values.get(x,"")+"\n")

		myf.close()
		self._ensure_access(fp, mtime=values["_mtime_"])
		#update written.  now we move it.
		new_fp = os.path.join(self.location,cpv)
		try:
			os.rename(fp, new_fp)
		except (OSError, IOError) as e:
			os.remove(fp)
			raise cache_errors.CacheCorruption(cpv, e)
Beispiel #10
0
    def _migrate(self, update_location):
        """
		When repo.user_location is a normal directory, migrate it to
		storage so that it can be replaced with a symlink. After migration,
		commit the content as the latest snapshot.
		"""
        try:
            os.rename(self._user_location, update_location)
        except OSError:
            portage.util.ensure_dirs(update_location)
            portage.util.apply_stat_permissions(update_location,
                                                os.stat(self._user_location))
            # It's probably on a different device, so copy it.
            yield self._check_call([
                'rsync', '-a', self._user_location + '/', update_location + '/'
            ])

            # Remove the old copy so that symlink can be created. Run with
            # maximum privileges, since removal requires write access to
            # the parent directory.
            yield self._check_call(['rm', '-rf', user_location],
                                   privileged=True)

        self._update_location = update_location

        # Make this copy the latest snapshot
        yield self.commit_update()
Beispiel #11
0
	def _hardlink_atomic(self, src, dest, dir_info):

		head, tail = os.path.split(dest)
		hardlink_tmp = os.path.join(head, ".%s._mirrordist_hardlink_.%s" % \
			(tail, os.getpid()))

		try:
			try:
				os.link(src, hardlink_tmp)
			except OSError as e:
				if e.errno != errno.EXDEV:
					msg = "hardlink %s from %s failed: %s" % \
						(self.distfile, dir_info, e)
					self.scheduler.output(msg + '\n', background=True,
						log_path=self._log_path)
					logging.error(msg)
				return False

			try:
				os.rename(hardlink_tmp, dest)
			except OSError as e:
				msg = "hardlink rename '%s' from %s failed: %s" % \
					(self.distfile, dir_info, e)
				self.scheduler.output(msg + '\n', background=True,
					log_path=self._log_path)
				logging.error(msg)
				return False
		finally:
			try:
				os.unlink(hardlink_tmp)
			except OSError:
				pass

		return True
Beispiel #12
0
	def recompose_mem(self, xpdata, break_hardlinks=True):
		"""
		Update the xpak segment.
		@param xpdata: A new xpak segment to be written, like that returned
			from the xpak_mem() function.
		@param break_hardlinks: If hardlinks exist, create a copy in order
			to break them. This makes it safe to use hardlinks to create
			cheap snapshots of the repository, which is useful for solving
			race conditions on binhosts as described here:
			https://crbug.com/185031
			Default is True.
		"""
		self.scan() # Don't care about condition... We'll rewrite the data anyway.

		if break_hardlinks and self.filestat and self.filestat.st_nlink > 1:
			tmp_fname = "%s.%d" % (self.file, os.getpid())
			shutil.copyfile(self.file, tmp_fname)
			try:
				portage.util.apply_stat_permissions(self.file, self.filestat)
			except portage.exception.OperationNotPermitted:
				pass
			os.rename(tmp_fname, self.file)

		myfile = open(_unicode_encode(self.file,
			encoding=_encodings['fs'], errors='strict'), 'ab+')
		if not myfile:
			raise IOError
		myfile.seek(-self.xpaksize, 2) # 0,2 or -0,2 just mean EOF.
		myfile.truncate()
		myfile.write(xpdata + encodeint(len(xpdata)) + b'STOP')
		myfile.flush()
		myfile.close()
		return 1
Beispiel #13
0
	def _chpathtool_exit(self, chpathtool):
		if self._final_exit(chpathtool) != os.EX_OK:
			self._unlock_builddir()
			self._writemsg_level("!!! Error Adjusting Prefix to %s\n" %
				(self.settings["EPREFIX"],),
				noiselevel=-1, level=logging.ERROR)
			self.wait()
			return

		# We want to install in "our" prefix, not the binary one
		with io.open(_unicode_encode(os.path.join(self._infloc, "EPREFIX"),
			encoding=_encodings['fs'], errors='strict'), mode='w',
			encoding=_encodings['repo.content'], errors='strict') as f:
			f.write(self.settings["EPREFIX"] + "\n")

		# Move the files to the correct location for merge.
		image_tmp_dir = os.path.join(
			self.settings["PORTAGE_BUILDDIR"], "image_tmp")
		build_d = os.path.join(self.settings["D"],
			self._build_prefix.lstrip(os.sep))
		if not os.path.isdir(build_d):
			# Assume this is a virtual package or something.
			shutil.rmtree(self._image_dir)
			ensure_dirs(self.settings["ED"])
		else:
			os.rename(build_d, image_tmp_dir)
			shutil.rmtree(self._image_dir)
			ensure_dirs(os.path.dirname(self.settings["ED"].rstrip(os.sep)))
			os.rename(image_tmp_dir, self.settings["ED"])

		self.wait()
Beispiel #14
0
    def _fetch_digester_exit(self, digester):

        self._assert_current(digester)
        if self._was_cancelled():
            self.wait()
            return

        if digester.returncode != os.EX_OK:
            msg = "%s %s digester failed unexpectedly" % (
                self.distfile,
                self._fetch_tmp_dir_info,
            )
            self.scheduler.output(msg + "\n",
                                  background=True,
                                  log_path=self._log_path)
            logging.error(msg)
        else:
            bad_digest = self._find_bad_digest(digester.digests)
            if bad_digest is not None:
                msg = "%s has bad %s digest: expected %s, got %s" % (
                    self.distfile,
                    bad_digest,
                    self.digests[bad_digest],
                    digester.digests[bad_digest],
                )
                self.scheduler.output(msg + "\n",
                                      background=True,
                                      log_path=self._log_path)
                try:
                    os.unlink(self._fetch_tmp_file)
                except OSError:
                    pass
            else:
                dest = os.path.join(
                    self.config.options.distfiles,
                    self.config.layouts[0].get_path(self.distfile),
                )
                ensure_dirs(os.path.dirname(dest))
                try:
                    os.rename(self._fetch_tmp_file, dest)
                except OSError:
                    self._start_task(
                        FileCopier(
                            src_path=self._fetch_tmp_file,
                            dest_path=dest,
                            background=(self.background
                                        and self._log_path is not None),
                            logfile=self._log_path,
                        ),
                        self._fetch_copier_exit,
                    )
                    return
                else:
                    self._make_layout_links()
                    return

        self._try_next_mirror()
Beispiel #15
0
    def _start(self):

        distfile_path = os.path.join(self.config.options.distfiles,
                                     self.distfile)

        if self.config.options.recycle_dir is not None:
            distfile_path = os.path.join(self.config.options.distfiles,
                                         self.distfile)
            recycle_path = os.path.join(self.config.options.recycle_dir,
                                        self.distfile)
            if self.config.options.dry_run:
                logging.info(("dry-run: move '%s' from "
                              "distfiles to recycle") % self.distfile)
            else:
                logging.debug(("move '%s' from "
                               "distfiles to recycle") % self.distfile)
                try:
                    os.rename(distfile_path, recycle_path)
                except OSError as e:
                    if e.errno != errno.EXDEV:
                        logging.error(
                            ("rename %s from distfiles to "
                             "recycle failed: %s") % (self.distfile, e))
                else:
                    self.returncode = os.EX_OK
                    self._async_wait()
                    return

                self._start_task(
                    FileCopier(src_path=distfile_path,
                               dest_path=recycle_path,
                               background=False), self._recycle_copier_exit)
                return

        success = True

        if self.config.options.dry_run:
            logging.info(("dry-run: delete '%s' from "
                          "distfiles") % self.distfile)
        else:
            logging.debug(("delete '%s' from " "distfiles") % self.distfile)
            try:
                os.unlink(distfile_path)
            except OSError as e:
                if e.errno not in (errno.ENOENT, errno.ESTALE):
                    logging.error("%s unlink failed in distfiles: %s" %
                                  (self.distfile, e))
                    success = False

        if success:
            self._success()
            self.returncode = os.EX_OK
        else:
            self.returncode = 1

        self._async_wait()
Beispiel #16
0
	def _start(self):

		distfile_path = os.path.join(
			self.config.options.distfiles, self.distfile)

		if self.config.options.recycle_dir is not None:
			distfile_path = os.path.join(self.config.options.distfiles, self.distfile)
			recycle_path = os.path.join(
				self.config.options.recycle_dir, self.distfile)
			if self.config.options.dry_run:
				logging.info(("dry-run: move '%s' from "
					"distfiles to recycle") % self.distfile)
			else:
				logging.debug(("move '%s' from "
					"distfiles to recycle") % self.distfile)
				try:
					os.rename(distfile_path, recycle_path)
				except OSError as e:
					if e.errno != errno.EXDEV:
						logging.error(("rename %s from distfiles to "
							"recycle failed: %s") % (self.distfile, e))
				else:
					self.returncode = os.EX_OK
					self._async_wait()
					return

				self._start_task(
					FileCopier(src_path=distfile_path,
						dest_path=recycle_path,
						background=False),
					self._recycle_copier_exit)
				return

		success = True

		if self.config.options.dry_run:
			logging.info(("dry-run: delete '%s' from "
				"distfiles") % self.distfile)
		else:
			logging.debug(("delete '%s' from "
				"distfiles") % self.distfile)
			try:
				os.unlink(distfile_path)
			except OSError as e:
				if e.errno not in (errno.ENOENT, errno.ESTALE):
					logging.error("%s unlink failed in distfiles: %s" %
						(self.distfile, e))
					success = False

		if success:
			self._success()
			self.returncode = os.EX_OK
		else:
			self.returncode = 1

		self._async_wait()
Beispiel #17
0
def rcs_archive(archive, curconf, newconf, mrgconf):
	"""Archive existing config in rcs (on trunk). Then, if mrgconf is
	specified and an old branch version exists, merge the user's changes
	and the distributed changes and put the result into mrgconf.  Lastly,
	if newconf was specified, leave it in the archive dir with a .dist.new
	suffix along with the last 1.1.1 branch version with a .dist suffix."""

	try:
		os.makedirs(os.path.dirname(archive))
	except OSError:
		pass

	try:
		curconf_st = os.lstat(curconf)
	except OSError:
		curconf_st = None

	if curconf_st is not None and \
		(stat.S_ISREG(curconf_st.st_mode) or
		stat.S_ISLNK(curconf_st.st_mode)):
		_archive_copy(curconf_st, curconf, archive)

	if os.path.lexists(archive + ',v'):
		os.system(RCS_LOCK + ' ' + archive)
	os.system(RCS_PUT + ' ' + archive)

	ret = 0
	mystat = None
	if newconf:
		try:
			mystat = os.lstat(newconf)
		except OSError:
			pass

	if mystat is not None and \
		(stat.S_ISREG(mystat.st_mode) or
		stat.S_ISLNK(mystat.st_mode)):
		os.system(RCS_GET + ' -r' + RCS_BRANCH + ' ' + archive)
		has_branch = os.path.lexists(archive)
		if has_branch:
			os.rename(archive, archive + '.dist')

		_archive_copy(mystat, newconf, archive)

		if has_branch:
			if mrgconf and os.path.isfile(archive) and \
				os.path.isfile(mrgconf):
				# This puts the results of the merge into mrgconf.
				ret = os.system(RCS_MERGE % (archive, mrgconf))
				os.chmod(mrgconf, mystat.st_mode)
				os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
		os.rename(archive, archive + '.dist.new')

	return ret
Beispiel #18
0
def rcs_archive(archive, curconf, newconf, mrgconf):
	"""Archive existing config in rcs (on trunk). Then, if mrgconf is
	specified and an old branch version exists, merge the user's changes
	and the distributed changes and put the result into mrgconf.  Lastly,
	if newconf was specified, leave it in the archive dir with a .dist.new
	suffix along with the last 1.1.1 branch version with a .dist suffix."""

	try:
		os.makedirs(os.path.dirname(archive))
	except OSError:
		pass

	try:
		curconf_st = os.lstat(curconf)
	except OSError:
		curconf_st = None

	if curconf_st is not None and \
		(stat.S_ISREG(curconf_st.st_mode) or
		stat.S_ISLNK(curconf_st.st_mode)):
		_archive_copy(curconf_st, curconf, archive)

	if os.path.lexists(archive + ',v'):
		os.system(RCS_LOCK + ' ' + archive)
	os.system(RCS_PUT + ' ' + archive)

	ret = 0
	mystat = None
	if newconf:
		try:
			mystat = os.lstat(newconf)
		except OSError:
			pass

	if mystat is not None and \
		(stat.S_ISREG(mystat.st_mode) or
		stat.S_ISLNK(mystat.st_mode)):
		os.system(RCS_GET + ' -r' + RCS_BRANCH + ' ' + archive)
		has_branch = os.path.lexists(archive)
		if has_branch:
			os.rename(archive, archive + '.dist')

		_archive_copy(mystat, newconf, archive)

		if has_branch:
			if mrgconf and os.path.isfile(archive) and \
				os.path.isfile(mrgconf):
				# This puts the results of the merge into mrgconf.
				ret = os.system(RCS_MERGE % (archive, mrgconf))
				os.chmod(mrgconf, mystat.st_mode)
				os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
		os.rename(archive, archive + '.dist.new')

	return ret
Beispiel #19
0
def rcs_archive_post_process(archive):
    """Check in the archive file with the .dist.new suffix on the branch
    and remove the one with the .dist suffix."""
    os.rename(archive + ".dist.new", archive)
    if os.path.lexists(archive + ".dist"):
        # Commit the last-distributed version onto the branch.
        os.system(RCS_LOCK + RCS_BRANCH + " " + archive)
        os.system(RCS_PUT + " -r" + RCS_BRANCH + " " + archive)
        os.unlink(archive + ".dist")
    else:
        # Forcefully commit the last-distributed version onto the branch.
        os.system(RCS_PUT + " -f -r" + RCS_BRANCH + " " + archive)
Beispiel #20
0
def rcs_archive_post_process(archive):
	"""Check in the archive file with the .dist.new suffix on the branch
	and remove the one with the .dist suffix."""
	os.rename(archive + '.dist.new', archive)
	if os.path.lexists(archive + '.dist'):
		# Commit the last-distributed version onto the branch.
		os.system(RCS_LOCK + RCS_BRANCH + ' ' + archive)
		os.system(RCS_PUT + ' -r' + RCS_BRANCH + ' ' + archive)
		os.unlink(archive + '.dist')
	else:
		# Forcefully commit the last-distributed version onto the branch.
		os.system(RCS_PUT + ' -f -r' + RCS_BRANCH + ' ' + archive)
Beispiel #21
0
def gpgsign(filename, repoman_settings, options):
	gpgcmd = repoman_settings.get("PORTAGE_GPG_SIGNING_COMMAND")
	if gpgcmd in [None, '']:
		raise MissingParameter("PORTAGE_GPG_SIGNING_COMMAND is unset!"
			" Is make.globals missing?")
	if "${PORTAGE_GPG_KEY}" in gpgcmd and \
		"PORTAGE_GPG_KEY" not in repoman_settings:
		raise MissingParameter("PORTAGE_GPG_KEY is unset!")
	if "${PORTAGE_GPG_DIR}" in gpgcmd:
		if "PORTAGE_GPG_DIR" not in repoman_settings:
			repoman_settings["PORTAGE_GPG_DIR"] = \
				os.path.expanduser("~/.gnupg")
			logging.info(
				"Automatically setting PORTAGE_GPG_DIR to '%s'" %
				repoman_settings["PORTAGE_GPG_DIR"])
		else:
			repoman_settings["PORTAGE_GPG_DIR"] = \
				os.path.expanduser(repoman_settings["PORTAGE_GPG_DIR"])
		if not os.access(repoman_settings["PORTAGE_GPG_DIR"], os.X_OK):
			raise portage.exception.InvalidLocation(
				"Unable to access directory: PORTAGE_GPG_DIR='%s'" %
				repoman_settings["PORTAGE_GPG_DIR"])
	gpgvars = {"FILE": filename}
	for k in ("PORTAGE_GPG_DIR", "PORTAGE_GPG_KEY"):
		v = repoman_settings.get(k)
		if v is not None:
			gpgvars[k] = v
	gpgcmd = portage.util.varexpand(gpgcmd, mydict=gpgvars)
	if options.pretend:
		print("(" + gpgcmd + ")")
	else:
		# Encode unicode manually for bug #310789.
		gpgcmd = portage.util.shlex_split(gpgcmd)

		if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
			not os.path.isabs(gpgcmd[0]):
			# Python 3.1 _execvp throws TypeError for non-absolute executable
			# path passed as bytes (see http://bugs.python.org/issue8513).
			fullname = find_binary(gpgcmd[0])
			if fullname is None:
				raise portage.exception.CommandNotFound(gpgcmd[0])
			gpgcmd[0] = fullname

		gpgcmd = [
			_unicode_encode(arg, encoding=_encodings['fs'], errors='strict')
			for arg in gpgcmd]
		rValue = subprocess.call(gpgcmd)
		if rValue == os.EX_OK:
			os.rename(filename + ".asc", filename)
		else:
			raise portage.exception.PortageException(
				"!!! gpg exited with '" + str(rValue) + "' status")
Beispiel #22
0
def gpgsign(filename, repoman_settings, options):
    gpgcmd = repoman_settings.get("PORTAGE_GPG_SIGNING_COMMAND")
    if gpgcmd in [None, '']:
        raise MissingParameter("PORTAGE_GPG_SIGNING_COMMAND is unset!"
                               " Is make.globals missing?")
    if "${PORTAGE_GPG_KEY}" in gpgcmd and \
     "PORTAGE_GPG_KEY" not in repoman_settings:
        raise MissingParameter("PORTAGE_GPG_KEY is unset!")
    if "${PORTAGE_GPG_DIR}" in gpgcmd:
        if "PORTAGE_GPG_DIR" not in repoman_settings:
            repoman_settings["PORTAGE_GPG_DIR"] = \
             os.path.expanduser("~/.gnupg")
            logging.info("Automatically setting PORTAGE_GPG_DIR to '%s'" %
                         repoman_settings["PORTAGE_GPG_DIR"])
        else:
            repoman_settings["PORTAGE_GPG_DIR"] = \
             os.path.expanduser(repoman_settings["PORTAGE_GPG_DIR"])
        if not os.access(repoman_settings["PORTAGE_GPG_DIR"], os.X_OK):
            raise portage.exception.InvalidLocation(
                "Unable to access directory: PORTAGE_GPG_DIR='%s'" %
                repoman_settings["PORTAGE_GPG_DIR"])
    gpgvars = {"FILE": filename}
    for k in ("PORTAGE_GPG_DIR", "PORTAGE_GPG_KEY"):
        v = repoman_settings.get(k)
        if v is not None:
            gpgvars[k] = v
    gpgcmd = portage.util.varexpand(gpgcmd, mydict=gpgvars)
    if options.pretend:
        print("(" + gpgcmd + ")")
    else:
        # Encode unicode manually for bug #310789.
        gpgcmd = portage.util.shlex_split(gpgcmd)

        if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
         not os.path.isabs(gpgcmd[0]):
            # Python 3.1 _execvp throws TypeError for non-absolute executable
            # path passed as bytes (see https://bugs.python.org/issue8513).
            fullname = find_binary(gpgcmd[0])
            if fullname is None:
                raise portage.exception.CommandNotFound(gpgcmd[0])
            gpgcmd[0] = fullname

        gpgcmd = [
            _unicode_encode(arg, encoding=_encodings['fs'], errors='strict')
            for arg in gpgcmd
        ]
        rValue = subprocess.call(gpgcmd)
        if rValue == os.EX_OK:
            os.rename(filename + ".asc", filename)
        else:
            raise portage.exception.PortageException("!!! gpg exited with '" +
                                                     str(rValue) + "' status")
Beispiel #23
0
    def _setitem(self, cpv, values):
        s = cpv.rfind("/")
        fp = os.path.join(self.location, cpv[:s],
                          ".update.%i.%s" % (os.getpid(), cpv[s + 1:]))
        try:
            myf = io.open(_unicode_encode(fp,
                                          encoding=_encodings['fs'],
                                          errors='strict'),
                          mode='w',
                          encoding=_encodings['repo.content'],
                          errors='backslashreplace')
        except (IOError, OSError) as e:
            if errno.ENOENT == e.errno:
                try:
                    self._ensure_dirs(cpv)
                    myf = io.open(_unicode_encode(fp,
                                                  encoding=_encodings['fs'],
                                                  errors='strict'),
                                  mode='w',
                                  encoding=_encodings['repo.content'],
                                  errors='backslashreplace')
                except (OSError, IOError) as e:
                    raise cache_errors.CacheCorruption(cpv, e)
            else:
                raise cache_errors.CacheCorruption(cpv, e)

        try:
            for k in self._write_keys:
                v = values.get(k)
                if not v:
                    continue
                # NOTE: This format string requires unicode_literals, so that
                # k and v are coerced to unicode, in order to prevent TypeError
                # when writing raw bytes to TextIOWrapper with Python 2.
                myf.write("%s=%s\n" % (k, v))
        finally:
            myf.close()
        self._ensure_access(fp)

        #update written.  now we move it.

        new_fp = os.path.join(self.location, cpv)
        try:
            os.rename(fp, new_fp)
        except (OSError, IOError) as e:
            os.remove(fp)
            raise cache_errors.CacheCorruption(cpv, e)
Beispiel #24
0
def gpgsign(filename, repoman_settings, options):
    gpgcmd = repoman_settings.get("PORTAGE_GPG_SIGNING_COMMAND")
    if gpgcmd in [None, ""]:
        raise MissingParameter(
            "PORTAGE_GPG_SIGNING_COMMAND is unset!" " Is make.globals missing?"
        )
    if "${PORTAGE_GPG_KEY}" in gpgcmd and "PORTAGE_GPG_KEY" not in repoman_settings:
        raise MissingParameter("PORTAGE_GPG_KEY is unset!")
    if "${PORTAGE_GPG_DIR}" in gpgcmd:
        if "PORTAGE_GPG_DIR" not in repoman_settings:
            repoman_settings["PORTAGE_GPG_DIR"] = os.path.expanduser("~/.gnupg")
            logging.info(
                "Automatically setting PORTAGE_GPG_DIR to '%s'"
                % repoman_settings["PORTAGE_GPG_DIR"]
            )
        else:
            repoman_settings["PORTAGE_GPG_DIR"] = os.path.expanduser(
                repoman_settings["PORTAGE_GPG_DIR"]
            )
        if not os.access(repoman_settings["PORTAGE_GPG_DIR"], os.X_OK):
            raise portage.exception.InvalidLocation(
                "Unable to access directory: PORTAGE_GPG_DIR='%s'"
                % repoman_settings["PORTAGE_GPG_DIR"]
            )
    gpgvars = {"FILE": filename}
    for k in ("PORTAGE_GPG_DIR", "PORTAGE_GPG_KEY"):
        v = repoman_settings.get(k)
        if v is not None:
            gpgvars[k] = v
    gpgcmd = portage.util.varexpand(gpgcmd, mydict=gpgvars)
    if options.pretend:
        print("(" + gpgcmd + ")")
    else:
        # Encode unicode manually for bug #310789.
        gpgcmd = portage.util.shlex_split(gpgcmd)

        gpgcmd = [
            _unicode_encode(arg, encoding=_encodings["fs"], errors="strict")
            for arg in gpgcmd
        ]
        rValue = subprocess.call(gpgcmd)
        if rValue == os.EX_OK:
            os.rename(filename + ".asc", filename)
        else:
            raise portage.exception.PortageException(
                "!!! gpg exited with '" + str(rValue) + "' status"
            )
Beispiel #25
0
    def _setitem(self, cpv, values):
        s = cpv.rfind("/")
        fp = os.path.join(self.location, cpv[:s], ".update.%i.%s" % (os.getpid(), cpv[s + 1 :]))
        try:
            myf = io.open(
                _unicode_encode(fp, encoding=_encodings["fs"], errors="strict"),
                mode="w",
                encoding=_encodings["repo.content"],
                errors="backslashreplace",
            )
        except (IOError, OSError) as e:
            if errno.ENOENT == e.errno:
                try:
                    self._ensure_dirs(cpv)
                    myf = io.open(
                        _unicode_encode(fp, encoding=_encodings["fs"], errors="strict"),
                        mode="w",
                        encoding=_encodings["repo.content"],
                        errors="backslashreplace",
                    )
                except (OSError, IOError) as e:
                    raise cache_errors.CacheCorruption(cpv, e)
            else:
                raise cache_errors.CacheCorruption(cpv, e)

        try:
            for k in self._write_keys:
                v = values.get(k)
                if not v:
                    continue
                    # NOTE: This format string requires unicode_literals, so that
                    # k and v are coerced to unicode, in order to prevent TypeError
                    # when writing raw bytes to TextIOWrapper with Python 2.
                myf.write("%s=%s\n" % (k, v))
        finally:
            myf.close()
        self._ensure_access(fp)

        # update written.  now we move it.

        new_fp = os.path.join(self.location, cpv)
        try:
            os.rename(fp, new_fp)
        except (OSError, IOError) as e:
            os.remove(fp)
            raise cache_errors.CacheCorruption(cpv, e)
Beispiel #26
0
	def _fetch_digester_exit(self, digester):

		self._assert_current(digester)
		if self._was_cancelled():
			self.wait()
			return

		if digester.returncode != os.EX_OK:
			msg = "%s %s digester failed unexpectedly" % \
			(self.distfile, self._fetch_tmp_dir_info)
			self.scheduler.output(msg + '\n', background=True,
				log_path=self._log_path)
			logging.error(msg)
		else:
			bad_digest = self._find_bad_digest(digester.digests)
			if bad_digest is not None:
				msg = "%s has bad %s digest: expected %s, got %s" % \
					(self.distfile, bad_digest,
					self.digests[bad_digest], digester.digests[bad_digest])
				self.scheduler.output(msg + '\n', background=True,
					log_path=self._log_path)
				try:
					os.unlink(self._fetch_tmp_file)
				except OSError:
					pass
			else:
				dest = os.path.join(self.config.options.distfiles, self.distfile)
				try:
					os.rename(self._fetch_tmp_file, dest)
				except OSError:
					self._start_task(
						FileCopier(src_path=self._fetch_tmp_file,
							dest_path=dest,
							background=(self.background and
								self._log_path is not None),
							logfile=self._log_path),
						self._fetch_copier_exit)
					return
				else:
					self._success()
					self.returncode = os.EX_OK
					self.wait()
					return

		self._try_next_mirror()
Beispiel #27
0
    def _hardlink_atomic(self, src, dest, dir_info, symlink=False):

        head, tail = os.path.split(dest)
        hardlink_tmp = os.path.join(
            head, ".%s._mirrordist_hardlink_.%s" % (tail, portage.getpid()))

        try:
            try:
                if symlink:
                    os.symlink(src, hardlink_tmp)
                else:
                    os.link(src, hardlink_tmp)
            except OSError as e:
                if e.errno != errno.EXDEV:
                    msg = "hardlink %s from %s failed: %s" % (
                        self.distfile,
                        dir_info,
                        e,
                    )
                    self.scheduler.output(msg + "\n",
                                          background=True,
                                          log_path=self._log_path)
                    logging.error(msg)
                return False

            try:
                os.rename(hardlink_tmp, dest)
            except OSError as e:
                msg = "hardlink rename '%s' from %s failed: %s" % (
                    self.distfile,
                    dir_info,
                    e,
                )
                self.scheduler.output(msg + "\n",
                                      background=True,
                                      log_path=self._log_path)
                logging.error(msg)
                return False
        finally:
            try:
                os.unlink(hardlink_tmp)
            except OSError:
                pass

        return True
Beispiel #28
0
    def _setitem(self, cpv, values):
        #		import pdb;pdb.set_trace()
        s = cpv.rfind("/")
        fp = os.path.join(self.location, cpv[:s],
                          ".update.%i.%s" % (os.getpid(), cpv[s + 1:]))
        try:
            myf = codecs.open(_unicode_encode(fp,
                                              encoding=_encodings['fs'],
                                              errors='strict'),
                              mode='w',
                              encoding=_encodings['repo.content'],
                              errors='backslashreplace')
        except (IOError, OSError) as e:
            if errno.ENOENT == e.errno:
                try:
                    self._ensure_dirs(cpv)
                    myf = codecs.open(_unicode_encode(
                        fp, encoding=_encodings['fs'], errors='strict'),
                                      mode='w',
                                      encoding=_encodings['repo.content'],
                                      errors='backslashreplace')
                except (OSError, IOError) as e:
                    raise cache_errors.CacheCorruption(cpv, e)
            else:
                raise cache_errors.CacheCorruption(cpv, e)

        try:
            for k in self._write_keys:
                v = values.get(k)
                if not v:
                    continue
                myf.write("%s=%s\n" % (k, v))
        finally:
            myf.close()
        self._ensure_access(fp)

        #update written.  now we move it.

        new_fp = os.path.join(self.location, cpv)
        try:
            os.rename(fp, new_fp)
        except (OSError, IOError) as e:
            os.remove(fp)
            raise cache_errors.CacheCorruption(cpv, e)
Beispiel #29
0
    def _setitem(self, cpv, values):
        try:
            fd, fp = tempfile.mkstemp(dir=self.location)
        except EnvironmentError as e:
            raise cache_errors.CacheCorruption(cpv, e)

        with io.open(fd,
                     mode='w',
                     encoding=_encodings['repo.content'],
                     errors='backslashreplace') as myf:
            for k in self._write_keys:
                v = values.get(k)
                if not v:
                    continue
                # NOTE: This format string requires unicode_literals, so that
                # k and v are coerced to unicode, in order to prevent TypeError
                # when writing raw bytes to TextIOWrapper with Python 2.
                myf.write("%s=%s\n" % (k, v))

        self._ensure_access(fp)

        #update written.  now we move it.

        new_fp = os.path.join(self.location, cpv)
        try:
            os.rename(fp, new_fp)
        except EnvironmentError as e:
            success = False
            try:
                if errno.ENOENT == e.errno:
                    try:
                        self._ensure_dirs(cpv)
                        os.rename(fp, new_fp)
                        success = True
                    except EnvironmentError as e:
                        raise cache_errors.CacheCorruption(cpv, e)
                else:
                    raise cache_errors.CacheCorruption(cpv, e)
            finally:
                if not success:
                    os.remove(fp)
Beispiel #30
0
    def _gpg_proc_exit(self, gpg_proc):
        if self._default_exit(gpg_proc) != os.EX_OK:
            self.wait()
            return

        rename_args = (self._manifest_path + ".asc", self._manifest_path)
        try:
            os.rename(*rename_args)
        except OSError as e:
            writemsg("!!! rename('%s', '%s'): %s\n" % rename_args + (e, ),
                     noiselevel=-1)
            try:
                os.unlink(self._manifest_path + ".asc")
            except OSError:
                pass
            self.returncode = 1
        else:
            self.returncode = os.EX_OK

        self._current_task = None
        self.wait()
Beispiel #31
0
	def _gpg_proc_exit(self, gpg_proc):
		if self._default_exit(gpg_proc) != os.EX_OK:
			self.wait()
			return

		rename_args = (self._manifest_path + ".asc", self._manifest_path)
		try:
			os.rename(*rename_args)
		except OSError as e:
			writemsg("!!! rename('%s', '%s'): %s\n" % rename_args + (e,),
				noiselevel=-1)
			try:
				os.unlink(self._manifest_path + ".asc")
			except OSError:
				pass
			self.returncode = 1
		else:
			self.returncode = os.EX_OK

		self._current_task = None
		self.wait()
Beispiel #32
0
	def _setitem(self, cpv, values):
		try:
			fd, fp = tempfile.mkstemp(dir=self.location)
		except EnvironmentError as e:
			raise cache_errors.CacheCorruption(cpv, e)

		with io.open(fd, mode='w',
			encoding=_encodings['repo.content'],
			errors='backslashreplace') as myf:
			for k in self._write_keys:
				v = values.get(k)
				if not v:
					continue
				# NOTE: This format string requires unicode_literals, so that
				# k and v are coerced to unicode, in order to prevent TypeError
				# when writing raw bytes to TextIOWrapper with Python 2.
				myf.write("%s=%s\n" % (k, v))

		self._ensure_access(fp)

		#update written.  now we move it.

		new_fp = os.path.join(self.location,cpv)
		try:
			os.rename(fp, new_fp)
		except EnvironmentError as e:
			success = False
			try:
				if errno.ENOENT == e.errno:
					try:
						self._ensure_dirs(cpv)
						os.rename(fp, new_fp)
						success = True
					except EnvironmentError as e:
						raise cache_errors.CacheCorruption(cpv, e)
				else:
					raise cache_errors.CacheCorruption(cpv, e)
			finally:
				if not success:
					os.remove(fp)
Beispiel #33
0
	def _setitem(self, cpv, values):
#		import pdb;pdb.set_trace()
		s = cpv.rfind("/")
		fp = os.path.join(self.location,cpv[:s],".update.%i.%s" % (os.getpid(), cpv[s+1:]))
		try:
			myf = codecs.open(_unicode_encode(fp,
				encoding=_encodings['fs'], errors='strict'),
				mode='w', encoding=_encodings['repo.content'],
				errors='backslashreplace')
		except (IOError, OSError) as e:
			if errno.ENOENT == e.errno:
				try:
					self._ensure_dirs(cpv)
					myf = codecs.open(_unicode_encode(fp,
						encoding=_encodings['fs'], errors='strict'),
						mode='w', encoding=_encodings['repo.content'],
						errors='backslashreplace')
				except (OSError, IOError) as e:
					raise cache_errors.CacheCorruption(cpv, e)
			else:
				raise cache_errors.CacheCorruption(cpv, e)

		try:
			for k in self._write_keys:
				v = values.get(k)
				if not v:
					continue
				myf.write("%s=%s\n" % (k, v))
		finally:
			myf.close()
		self._ensure_access(fp)

		#update written.  now we move it.

		new_fp = os.path.join(self.location,cpv)
		try:
			os.rename(fp, new_fp)
		except (OSError, IOError) as e:
			os.remove(fp)
			raise cache_errors.CacheCorruption(cpv, e)
Beispiel #34
0
def _checksum_failure_temp_file(distdir, basename):
	"""
	First try to find a duplicate temp file with the same checksum and return
	that filename if available. Otherwise, use mkstemp to create a new unique
	filename._checksum_failure_.$RANDOM, rename the given file, and return the
	new filename. In any case, filename will be renamed or removed before this
	function returns a temp filename.
	"""

	filename = os.path.join(distdir, basename)
	size = os.stat(filename).st_size
	checksum = None
	tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
	for temp_filename in os.listdir(distdir):
		if not tempfile_re.match(temp_filename):
			continue
		temp_filename = os.path.join(distdir, temp_filename)
		try:
			if size != os.stat(temp_filename).st_size:
				continue
		except OSError:
			continue
		try:
			temp_checksum = perform_md5(temp_filename)
		except FileNotFound:
			# Apparently the temp file disappeared. Let it go.
			continue
		if checksum is None:
			checksum = perform_md5(filename)
		if checksum == temp_checksum:
			os.unlink(filename)
			return temp_filename

	fd, temp_filename = \
		tempfile.mkstemp("", basename + "._checksum_failure_.", distdir)
	os.close(fd)
	os.rename(filename, temp_filename)
	return temp_filename
Beispiel #35
0
    def _setitem(self, cpv, values):
        try:
            fd, fp = tempfile.mkstemp(dir=self.location)
        except EnvironmentError as e:
            raise cache_errors.CacheCorruption(cpv, e)

        with io.open(fd,
                     mode="w",
                     encoding=_encodings["repo.content"],
                     errors="backslashreplace") as myf:
            for k in self._write_keys:
                v = values.get(k)
                if not v:
                    continue
                myf.write("%s=%s\n" % (k, v))

        self._ensure_access(fp)

        # update written.  now we move it.

        new_fp = os.path.join(self.location, cpv)
        try:
            os.rename(fp, new_fp)
        except EnvironmentError as e:
            success = False
            try:
                if errno.ENOENT == e.errno:
                    try:
                        self._ensure_dirs(cpv)
                        os.rename(fp, new_fp)
                        success = True
                    except EnvironmentError as e:
                        raise cache_errors.CacheCorruption(cpv, e)
                else:
                    raise cache_errors.CacheCorruption(cpv, e)
            finally:
                if not success:
                    os.remove(fp)
Beispiel #36
0
def _checksum_failure_temp_file(distdir, basename):
	"""
	First try to find a duplicate temp file with the same checksum and return
	that filename if available. Otherwise, use mkstemp to create a new unique
	filename._checksum_failure_.$RANDOM, rename the given file, and return the
	new filename. In any case, filename will be renamed or removed before this
	function returns a temp filename.
	"""

	filename = os.path.join(distdir, basename)
	size = os.stat(filename).st_size
	checksum = None
	tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
	for temp_filename in os.listdir(distdir):
		if not tempfile_re.match(temp_filename):
			continue
		temp_filename = os.path.join(distdir, temp_filename)
		try:
			if size != os.stat(temp_filename).st_size:
				continue
		except OSError:
			continue
		try:
			temp_checksum = perform_md5(temp_filename)
		except FileNotFound:
			# Apparently the temp file disappeared. Let it go.
			continue
		if checksum is None:
			checksum = perform_md5(filename)
		if checksum == temp_checksum:
			os.unlink(filename)
			return temp_filename

	fd, temp_filename = \
		tempfile.mkstemp("", basename + "._checksum_failure_.", distdir)
	os.close(fd)
	os.rename(filename, temp_filename)
	return temp_filename
Beispiel #37
0
    def commit_update(self):
        update_location = self.current_update
        self._update_location = None
        try:
            snapshots = [int(name) for name in os.listdir(self._snapshots_dir)]
        except OSError:
            snapshots = []
            portage.util.ensure_dirs(self._snapshots_dir)
            portage.util.apply_stat_permissions(
                self._snapshots_dir, os.stat(self._storage_location))
        if snapshots:
            new_id = max(snapshots) + 1
        else:
            new_id = 1
        os.rename(update_location,
                  os.path.join(self._snapshots_dir, str(new_id)))
        new_symlink = self._latest_symlink + '.new'
        try:
            os.unlink(new_symlink)
        except OSError:
            pass
        os.symlink('snapshots/{}'.format(new_id), new_symlink)

        # If SyncManager.pre_sync creates an empty directory where
        # self._latest_symlink is suppose to be (which is normal if
        # sync-rcu-store-dir has been removed), then we need to remove
        # the directory or else rename will raise IsADirectoryError
        # when we try to replace the directory with a symlink.
        try:
            os.rmdir(self._latest_symlink)
        except OSError:
            pass

        os.rename(new_symlink, self._latest_symlink)

        try:
            user_location_correct = os.path.samefile(self._user_location,
                                                     self._latest_symlink)
        except OSError:
            user_location_correct = False

        if not user_location_correct:
            new_symlink = self._user_location + '.new'
            try:
                os.unlink(new_symlink)
            except OSError:
                pass
            os.symlink(self._latest_symlink, new_symlink)
            os.rename(new_symlink, self._user_location)

        coroutine_return()
        yield None
Beispiel #38
0
def _file_archive_rotate(archive):
	"""
	Rename archive to archive + '.1', and perform similar rotation
	for files up to archive + '.9'.

	@param archive: file path to archive
	@type archive: str
	"""

	max_suf = 0
	try:
		for max_suf, max_st, max_path in (
			(suf, os.lstat(path), path) for suf, path in (
			(suf, "%s.%s" % (archive, suf)) for suf in range(
			1, _ARCHIVE_ROTATE_MAX + 1))):
			pass
	except OSError as e:
		if e.errno not in (errno.ENOENT, errno.ESTALE):
			raise
		# There's already an unused suffix.
	else:
		# Free the max suffix in order to avoid possible problems
		# when we rename another file or directory to the same
		# location (see bug 256376).
		if stat.S_ISDIR(max_st.st_mode):
			# Removing a directory might destroy something important,
			# so rename it instead.
			head, tail = os.path.split(archive)
			placeholder = tempfile.NamedTemporaryFile(
				prefix="%s." % tail,
				dir=head)
			placeholder.close()
			os.rename(max_path, placeholder.name)
		else:
			os.unlink(max_path)

		# The max suffix is now unused.
		max_suf -= 1

	for suf in range(max_suf + 1, 1, -1):
		os.rename("%s.%s" % (archive, suf - 1), "%s.%s" % (archive, suf))

	os.rename(archive, "%s.1" % (archive,))
Beispiel #39
0
def _file_archive_rotate(archive):
    """
    Rename archive to archive + '.1', and perform similar rotation
    for files up to archive + '.9'.

    @param archive: file path to archive
    @type archive: str
    """

    max_suf = 0
    try:
        for max_suf, max_st, max_path in (
            (suf, os.lstat(path), path)
                for suf, path in ((suf, "%s.%s" % (archive, suf))
                                  for suf in range(1, _ARCHIVE_ROTATE_MAX +
                                                   1))):
            pass
    except OSError as e:
        if e.errno not in (errno.ENOENT, errno.ESTALE):
            raise
        # There's already an unused suffix.
    else:
        # Free the max suffix in order to avoid possible problems
        # when we rename another file or directory to the same
        # location (see bug 256376).
        if stat.S_ISDIR(max_st.st_mode):
            # Removing a directory might destroy something important,
            # so rename it instead.
            head, tail = os.path.split(archive)
            placeholder = tempfile.NamedTemporaryFile(prefix="%s." % tail,
                                                      dir=head)
            placeholder.close()
            os.rename(max_path, placeholder.name)
        else:
            os.unlink(max_path)

        # The max suffix is now unused.
        max_suf -= 1

    for suf in range(max_suf + 1, 1, -1):
        os.rename("%s.%s" % (archive, suf - 1), "%s.%s" % (archive, suf))

    os.rename(archive, "%s.1" % (archive, ))
Beispiel #40
0
    def commit_update(self):
        update_location = self.current_update
        self._update_location = None
        try:
            snapshots = [int(name) for name in os.listdir(self._snapshots_dir)]
        except OSError:
            snapshots = []
            portage.util.ensure_dirs(self._snapshots_dir)
            portage.util.apply_stat_permissions(
                self._snapshots_dir, os.stat(self._storage_location))
        if snapshots:
            new_id = max(snapshots) + 1
        else:
            new_id = 1
        os.rename(update_location,
                  os.path.join(self._snapshots_dir, str(new_id)))
        new_symlink = self._latest_symlink + '.new'
        try:
            os.unlink(new_symlink)
        except OSError:
            pass
        os.symlink('snapshots/{}'.format(new_id), new_symlink)
        os.rename(new_symlink, self._latest_symlink)

        try:
            user_location_correct = os.path.samefile(self._user_location,
                                                     self._latest_symlink)
        except OSError:
            user_location_correct = False

        if not user_location_correct:
            new_symlink = self._user_location + '.new'
            try:
                os.unlink(new_symlink)
            except OSError:
                pass
            os.symlink(self._latest_symlink, new_symlink)
            os.rename(new_symlink, self._user_location)

        coroutine_return()
        yield None
Beispiel #41
0
	def commit_update(self):
		update_location = self.current_update
		self._update_location = None
		try:
			snapshots = [int(name) for name in os.listdir(self._snapshots_dir)]
		except OSError:
			snapshots = []
			portage.util.ensure_dirs(self._snapshots_dir)
			portage.util.apply_stat_permissions(self._snapshots_dir,
				os.stat(self._storage_location))
		if snapshots:
			new_id = max(snapshots) + 1
		else:
			new_id = 1
		os.rename(update_location, os.path.join(self._snapshots_dir, str(new_id)))
		new_symlink = self._latest_symlink + '.new'
		try:
			os.unlink(new_symlink)
		except OSError:
			pass
		os.symlink('snapshots/{}'.format(new_id), new_symlink)
		os.rename(new_symlink, self._latest_symlink)

		try:
			user_location_correct = os.path.samefile(self._user_location, self._latest_symlink)
		except OSError:
			user_location_correct = False

		if not user_location_correct:
			new_symlink = self._user_location + '.new'
			try:
				os.unlink(new_symlink)
			except OSError:
				pass
			os.symlink(self._latest_symlink, new_symlink)
			os.rename(new_symlink, self._user_location)

		coroutine_return()
		yield None
Beispiel #42
0
    def _setitem(self, cpv, values):
        if "_eclasses_" in values:
            values = ProtectedDict(values)
            values["INHERITED"] = ' '.join(sorted(values["_eclasses_"]))

        new_content = []
        for k in self.auxdbkey_order:
            new_content.append(values.get(k, ''))
            new_content.append('\n')
        for i in range(magic_line_count - len(self.auxdbkey_order)):
            new_content.append('\n')
        new_content = ''.join(new_content)
        new_content = _unicode_encode(new_content,
                                      _encodings['repo.content'],
                                      errors='backslashreplace')

        new_fp = os.path.join(self.location, cpv)
        try:
            f = open(
                _unicode_encode(new_fp,
                                encoding=_encodings['fs'],
                                errors='strict'), 'rb')
        except EnvironmentError:
            pass
        else:
            try:
                try:
                    existing_st = os.fstat(f.fileno())
                    existing_content = f.read()
                finally:
                    f.close()
            except EnvironmentError:
                pass
            else:
                existing_mtime = existing_st[stat.ST_MTIME]
                if values['_mtime_'] == existing_mtime and \
                 existing_content == new_content:
                    return

                if self.raise_stat_collision and \
                 values['_mtime_'] == existing_mtime and \
                 len(new_content) == existing_st.st_size:
                    raise cache_errors.StatCollision(cpv, new_fp,
                                                     existing_mtime,
                                                     existing_st.st_size)

        s = cpv.rfind("/")
        fp = os.path.join(self.location, cpv[:s],
                          ".update.%i.%s" % (os.getpid(), cpv[s + 1:]))
        try:
            myf = open(
                _unicode_encode(fp, encoding=_encodings['fs'],
                                errors='strict'), 'wb')
        except EnvironmentError as e:
            if errno.ENOENT == e.errno:
                try:
                    self._ensure_dirs(cpv)
                    myf = open(
                        _unicode_encode(fp,
                                        encoding=_encodings['fs'],
                                        errors='strict'), 'wb')
                except EnvironmentError as e:
                    raise cache_errors.CacheCorruption(cpv, e)
            else:
                raise cache_errors.CacheCorruption(cpv, e)

        try:
            myf.write(new_content)
        finally:
            myf.close()
        self._ensure_access(fp, mtime=values["_mtime_"])

        try:
            os.rename(fp, new_fp)
        except EnvironmentError as e:
            try:
                os.unlink(fp)
            except EnvironmentError:
                pass
            raise cache_errors.CacheCorruption(cpv, e)
Beispiel #43
0
    def testSyncLocal(self):
        debug = False

        skip_reason = self._must_skip()
        if skip_reason:
            self.portage_skip = skip_reason
            self.assertFalse(True, skip_reason)
            return

        repos_conf = textwrap.dedent("""
			[test_repo]
			location = %(EPREFIX)s/var/repositories/test_repo
			sync-type = %(sync-type)s
			sync-uri = file:/%(EPREFIX)s/var/repositories/test_repo_sync
			auto-sync = yes
		""")

        profile = {
            "eapi": ("5", ),
            "package.use.stable.mask": ("dev-libs/A flag", )
        }

        ebuilds = {"dev-libs/A-0": {}}

        playground = ResolverPlayground(ebuilds=ebuilds,
                                        profile=profile,
                                        user_config={},
                                        debug=debug)
        settings = playground.settings
        eprefix = settings["EPREFIX"]
        eroot = settings["EROOT"]
        homedir = os.path.join(eroot, "home")
        distdir = os.path.join(eprefix, "distdir")
        repo = settings.repositories["test_repo"]
        metadata_dir = os.path.join(repo.location, "metadata")

        cmds = {}
        for cmd in ("emerge", "emaint"):
            cmds[cmd] = (portage._python_interpreter, "-b", "-Wd",
                         os.path.join(self.bindir, cmd))

        git_binary = find_binary("git")
        git_cmd = (git_binary, )

        committer_name = "Gentoo Dev"
        committer_email = "*****@*****.**"

        def change_sync_type(sync_type):
            env["PORTAGE_REPOSITORIES"] = repos_conf % \
             {"EPREFIX": eprefix, "sync-type": sync_type}

        sync_cmds = (
            (homedir, cmds["emerge"] + ("--sync", )),
            (homedir, lambda: self.assertTrue(
                os.path.exists(os.path.join(repo.location, "dev-libs", "A")),
                "dev-libs/A expected, but missing")),
            (homedir, cmds["emaint"] + ("sync", "-A")),
        )

        rename_repo = (
            (homedir,
             lambda: os.rename(repo.location, repo.location + "_sync")), )

        delete_sync_repo = ((homedir,
                             lambda: shutil.rmtree(repo.location + "_sync")), )

        git_repo_create = (
            (repo.location, git_cmd + (
                "config",
                "--global",
                "user.name",
                committer_name,
            )),
            (repo.location, git_cmd + (
                "config",
                "--global",
                "user.email",
                committer_email,
            )),
            (repo.location, git_cmd + ("init-db", )),
            (repo.location, git_cmd + ("add", ".")),
            (repo.location,
             git_cmd + ("commit", "-a", "-m", "add whole repo")),
        )

        sync_type_git = ((homedir, lambda: change_sync_type("git")), )

        pythonpath = os.environ.get("PYTHONPATH")
        if pythonpath is not None and not pythonpath.strip():
            pythonpath = None
        if pythonpath is not None and \
         pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
            pass
        else:
            if pythonpath is None:
                pythonpath = ""
            else:
                pythonpath = ":" + pythonpath
            pythonpath = PORTAGE_PYM_PATH + pythonpath

        env = {
            "PORTAGE_OVERRIDE_EPREFIX": eprefix,
            "DISTDIR": distdir,
            "GENTOO_COMMITTER_NAME": committer_name,
            "GENTOO_COMMITTER_EMAIL": committer_email,
            "HOME": homedir,
            "PATH": os.environ["PATH"],
            "PORTAGE_GRPNAME": os.environ["PORTAGE_GRPNAME"],
            "PORTAGE_USERNAME": os.environ["PORTAGE_USERNAME"],
            "PORTAGE_REPOSITORIES": repos_conf % {
                "EPREFIX": eprefix,
                "sync-type": "rsync"
            },
            "PYTHONPATH": pythonpath,
        }

        if os.environ.get("SANDBOX_ON") == "1":
            # avoid problems from nested sandbox instances
            env["FEATURES"] = "-sandbox -usersandbox"

        dirs = [homedir, metadata_dir]
        try:
            for d in dirs:
                ensure_dirs(d)

            timestamp_path = os.path.join(metadata_dir, 'timestamp.chk')
            with open(timestamp_path, 'w') as f:
                f.write(time.strftime('%s\n' % TIMESTAMP_FORMAT,
                                      time.gmtime()))

            if debug:
                # The subprocess inherits both stdout and stderr, for
                # debugging purposes.
                stdout = None
            else:
                # The subprocess inherits stderr so that any warnings
                # triggered by python -Wd will be visible.
                stdout = subprocess.PIPE

            for cwd, cmd in rename_repo + sync_cmds + \
             delete_sync_repo + git_repo_create + sync_type_git + \
             rename_repo + sync_cmds:

                if hasattr(cmd, '__call__'):
                    cmd()
                    continue

                abs_cwd = os.path.join(repo.location, cwd)
                proc = subprocess.Popen(cmd,
                                        cwd=abs_cwd,
                                        env=env,
                                        stdout=stdout)

                if debug:
                    proc.wait()
                else:
                    output = proc.stdout.readlines()
                    proc.wait()
                    proc.stdout.close()
                    if proc.returncode != os.EX_OK:
                        for line in output:
                            sys.stderr.write(_unicode_decode(line))

                self.assertEqual(os.EX_OK, proc.returncode,
                                 "%s failed in %s" % (
                                     cmd,
                                     cwd,
                                 ))

        finally:
            playground.cleanup()
Beispiel #44
0
    def _testEbuildFetch(
        self,
        loop,
        scheme,
        host,
        orig_distfiles,
        ebuilds,
        content,
        server,
        playground,
        ro_distdir,
    ):
        mirror_layouts = (
            (
                "[structure]",
                "0=filename-hash BLAKE2B 8",
                "1=flat",
            ),
            (
                "[structure]",
                "1=filename-hash BLAKE2B 8",
                "0=flat",
            ),
            (
                "[structure]",
                "0=content-hash SHA512 8:8:8",
                "1=flat",
            ),
        )

        fetchcommand = portage.util.shlex_split(
            playground.settings["FETCHCOMMAND"])
        fetch_bin = portage.process.find_binary(fetchcommand[0])
        if fetch_bin is None:
            self.skipTest("FETCHCOMMAND not found: {}".format(
                playground.settings["FETCHCOMMAND"]))
        eubin = os.path.join(playground.eprefix, "usr", "bin")
        os.symlink(fetch_bin, os.path.join(eubin, os.path.basename(fetch_bin)))
        resumecommand = portage.util.shlex_split(
            playground.settings["RESUMECOMMAND"])
        resume_bin = portage.process.find_binary(resumecommand[0])
        if resume_bin is None:
            self.skipTest("RESUMECOMMAND not found: {}".format(
                playground.settings["RESUMECOMMAND"]))
        if resume_bin != fetch_bin:
            os.symlink(resume_bin,
                       os.path.join(eubin, os.path.basename(resume_bin)))
        root_config = playground.trees[playground.eroot]["root_config"]
        portdb = root_config.trees["porttree"].dbapi

        def run_async(func, *args, **kwargs):
            with ForkExecutor(loop=loop) as executor:
                return loop.run_until_complete(
                    loop.run_in_executor(
                        executor, functools.partial(func, *args, **kwargs)))

        for layout_lines in mirror_layouts:
            settings = config(clone=playground.settings)
            layout_data = "".join("{}\n".format(line) for line in layout_lines)
            mirror_conf = MirrorLayoutConfig()
            mirror_conf.read_from_file(io.StringIO(layout_data))
            layouts = mirror_conf.get_all_layouts()
            content["/distfiles/layout.conf"] = layout_data.encode("utf8")
            distfiles = {}
            for k, v in orig_distfiles.items():
                filename = DistfileName(
                    k,
                    digests=dict((algo, checksum_str(v, hashname=algo))
                                 for algo in MANIFEST2_HASH_DEFAULTS),
                )
                distfiles[filename] = v

                # mirror path
                for layout in layouts:
                    content["/distfiles/" + layout.get_path(filename)] = v
                # upstream path
                content["/distfiles/{}.txt".format(k)] = v

            shutil.rmtree(settings["DISTDIR"])
            os.makedirs(settings["DISTDIR"])
            with open(os.path.join(settings["DISTDIR"], "layout.conf"),
                      "wt") as f:
                f.write(layout_data)

            if any(
                    isinstance(layout, ContentHashLayout)
                    for layout in layouts):
                content_db = os.path.join(playground.eprefix,
                                          "var/db/emirrordist/content.db")
                os.makedirs(os.path.dirname(content_db), exist_ok=True)
                try:
                    os.unlink(content_db)
                except OSError:
                    pass
            else:
                content_db = None

            # Demonstrate that fetch preserves a stale file in DISTDIR when no digests are given.
            foo_uri = {
                "foo": ("{scheme}://{host}:{port}/distfiles/foo".format(
                    scheme=scheme, host=host, port=server.server_port), )
            }
            foo_path = os.path.join(settings["DISTDIR"], "foo")
            foo_stale_content = b"stale content\n"
            with open(foo_path, "wb") as f:
                f.write(b"stale content\n")

            self.assertTrue(
                bool(run_async(fetch, foo_uri, settings, try_mirrors=False)))

            with open(foo_path, "rb") as f:
                self.assertEqual(f.read(), foo_stale_content)
            with open(foo_path, "rb") as f:
                self.assertNotEqual(f.read(), distfiles["foo"])

            # Use force=True to update the stale file.
            self.assertTrue(
                bool(
                    run_async(fetch,
                              foo_uri,
                              settings,
                              try_mirrors=False,
                              force=True)))

            with open(foo_path, "rb") as f:
                self.assertEqual(f.read(), distfiles["foo"])

            # Test force=True with FEATURES=skiprocheck, using read-only DISTDIR.
            # FETCHCOMMAND is set to temporarily chmod +w DISTDIR. Note that
            # FETCHCOMMAND must perform atomic rename itself due to read-only
            # DISTDIR.
            with open(foo_path, "wb") as f:
                f.write(b"stale content\n")
            orig_fetchcommand = settings["FETCHCOMMAND"]
            orig_distdir_mode = os.stat(settings["DISTDIR"]).st_mode
            temp_fetchcommand = os.path.join(eubin, "fetchcommand")
            with open(temp_fetchcommand, "w") as f:
                f.write("""
					set -e
					URI=$1
					DISTDIR=$2
					FILE=$3
					trap 'chmod a-w "${DISTDIR}"' EXIT
					chmod ug+w "${DISTDIR}"
					%s
					mv -f "${DISTDIR}/${FILE}.__download__" "${DISTDIR}/${FILE}"
				""" % orig_fetchcommand.replace("${FILE}", "${FILE}.__download__"))
            settings[
                "FETCHCOMMAND"] = '"%s" "%s" "${URI}" "${DISTDIR}" "${FILE}"' % (
                    BASH_BINARY,
                    temp_fetchcommand,
                )
            settings.features.add("skiprocheck")
            settings.features.remove("distlocks")
            os.chmod(settings["DISTDIR"], 0o555)
            try:
                self.assertTrue(
                    bool(
                        run_async(fetch,
                                  foo_uri,
                                  settings,
                                  try_mirrors=False,
                                  force=True)))
            finally:
                settings["FETCHCOMMAND"] = orig_fetchcommand
                os.chmod(settings["DISTDIR"], orig_distdir_mode)
                settings.features.remove("skiprocheck")
                settings.features.add("distlocks")
                os.unlink(temp_fetchcommand)

            with open(foo_path, "rb") as f:
                self.assertEqual(f.read(), distfiles["foo"])

            # Test emirrordist invocation.
            emirrordist_cmd = (
                portage._python_interpreter,
                "-b",
                "-Wd",
                os.path.join(self.bindir, "emirrordist"),
                "--distfiles",
                settings["DISTDIR"],
                "--config-root",
                settings["EPREFIX"],
                "--delete",
                "--repositories-configuration",
                settings.repositories.config_string(),
                "--repo",
                "test_repo",
                "--mirror",
            )

            if content_db is not None:
                emirrordist_cmd = emirrordist_cmd + (
                    "--content-db",
                    content_db,
                )

            env = settings.environ()
            env["PYTHONPATH"] = ":".join(
                filter(
                    None,
                    [PORTAGE_PYM_PATH] +
                    os.environ.get("PYTHONPATH", "").split(":"),
                ))

            for k in distfiles:
                try:
                    os.unlink(os.path.join(settings["DISTDIR"], k))
                except OSError:
                    pass

            proc = loop.run_until_complete(
                asyncio.create_subprocess_exec(*emirrordist_cmd, env=env))
            self.assertEqual(loop.run_until_complete(proc.wait()), 0)

            for k in distfiles:
                with open(
                        os.path.join(settings["DISTDIR"],
                                     layouts[0].get_path(k)), "rb") as f:
                    self.assertEqual(f.read(), distfiles[k])

            if content_db is not None:
                loop.run_until_complete(
                    self._test_content_db(
                        emirrordist_cmd,
                        env,
                        layouts,
                        content_db,
                        distfiles,
                        settings,
                        portdb,
                    ))

            # Tests only work with one ebuild at a time, so the config
            # pool only needs a single config instance.
            class config_pool:
                @staticmethod
                def allocate():
                    return settings

                @staticmethod
                def deallocate(settings):
                    pass

            def async_fetch(pkg, ebuild_path):
                fetcher = EbuildFetcher(
                    config_pool=config_pool,
                    ebuild_path=ebuild_path,
                    fetchonly=False,
                    fetchall=True,
                    pkg=pkg,
                    scheduler=loop,
                )
                fetcher.start()
                return fetcher.async_wait()

            for cpv in ebuilds:
                metadata = dict(
                    zip(
                        Package.metadata_keys,
                        portdb.aux_get(cpv, Package.metadata_keys),
                    ))

                pkg = Package(
                    built=False,
                    cpv=cpv,
                    installed=False,
                    metadata=metadata,
                    root_config=root_config,
                    type_name="ebuild",
                )

                settings.setcpv(pkg)
                ebuild_path = portdb.findname(pkg.cpv)
                portage.doebuild_environment(ebuild_path,
                                             "fetch",
                                             settings=settings,
                                             db=portdb)

                # Test good files in DISTDIR
                for k in settings["AA"].split():
                    os.stat(os.path.join(settings["DISTDIR"], k))
                self.assertEqual(
                    loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
                for k in settings["AA"].split():
                    with open(os.path.join(settings["DISTDIR"], k), "rb") as f:
                        self.assertEqual(f.read(), distfiles[k])

                # Test digestgen with fetch
                os.unlink(
                    os.path.join(os.path.dirname(ebuild_path), "Manifest"))
                for k in settings["AA"].split():
                    os.unlink(os.path.join(settings["DISTDIR"], k))
                with ForkExecutor(loop=loop) as executor:
                    self.assertTrue(
                        bool(
                            loop.run_until_complete(
                                loop.run_in_executor(
                                    executor,
                                    functools.partial(digestgen,
                                                      mysettings=settings,
                                                      myportdb=portdb),
                                ))))
                for k in settings["AA"].split():
                    with open(os.path.join(settings["DISTDIR"], k), "rb") as f:
                        self.assertEqual(f.read(), distfiles[k])

                # Test missing files in DISTDIR
                for k in settings["AA"].split():
                    os.unlink(os.path.join(settings["DISTDIR"], k))
                self.assertEqual(
                    loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
                for k in settings["AA"].split():
                    with open(os.path.join(settings["DISTDIR"], k), "rb") as f:
                        self.assertEqual(f.read(), distfiles[k])

                # Test empty files in DISTDIR
                for k in settings["AA"].split():
                    file_path = os.path.join(settings["DISTDIR"], k)
                    with open(file_path, "wb") as f:
                        pass
                    self.assertEqual(os.stat(file_path).st_size, 0)
                self.assertEqual(
                    loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
                for k in settings["AA"].split():
                    with open(os.path.join(settings["DISTDIR"], k), "rb") as f:
                        self.assertEqual(f.read(), distfiles[k])

                # Test non-empty files containing null bytes in DISTDIR
                for k in settings["AA"].split():
                    file_path = os.path.join(settings["DISTDIR"], k)
                    with open(file_path, "wb") as f:
                        f.write(len(distfiles[k]) * b"\0")
                    self.assertEqual(
                        os.stat(file_path).st_size, len(distfiles[k]))
                self.assertEqual(
                    loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
                for k in settings["AA"].split():
                    with open(os.path.join(settings["DISTDIR"], k), "rb") as f:
                        self.assertEqual(f.read(), distfiles[k])

                # Test PORTAGE_RO_DISTDIRS
                settings["PORTAGE_RO_DISTDIRS"] = '"{}"'.format(ro_distdir)
                orig_fetchcommand = settings["FETCHCOMMAND"]
                orig_resumecommand = settings["RESUMECOMMAND"]
                try:
                    settings["FETCHCOMMAND"] = settings["RESUMECOMMAND"] = ""
                    for k in settings["AA"].split():
                        file_path = os.path.join(settings["DISTDIR"], k)
                        os.rename(file_path, os.path.join(ro_distdir, k))
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings["AA"].split():
                        file_path = os.path.join(settings["DISTDIR"], k)
                        self.assertTrue(os.path.islink(file_path))
                        with open(file_path, "rb") as f:
                            self.assertEqual(f.read(), distfiles[k])
                        os.unlink(file_path)
                finally:
                    settings.pop("PORTAGE_RO_DISTDIRS")
                    settings["FETCHCOMMAND"] = orig_fetchcommand
                    settings["RESUMECOMMAND"] = orig_resumecommand

                # Test local filesystem in GENTOO_MIRRORS
                orig_mirrors = settings["GENTOO_MIRRORS"]
                orig_fetchcommand = settings["FETCHCOMMAND"]
                try:
                    settings["GENTOO_MIRRORS"] = ro_distdir
                    settings["FETCHCOMMAND"] = settings["RESUMECOMMAND"] = ""
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings["AA"].split():
                        with open(os.path.join(settings["DISTDIR"], k),
                                  "rb") as f:
                            self.assertEqual(f.read(), distfiles[k])
                finally:
                    settings["GENTOO_MIRRORS"] = orig_mirrors
                    settings["FETCHCOMMAND"] = orig_fetchcommand
                    settings["RESUMECOMMAND"] = orig_resumecommand

                # Test readonly DISTDIR
                orig_distdir_mode = os.stat(settings["DISTDIR"]).st_mode
                try:
                    os.chmod(settings["DISTDIR"], 0o555)
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings["AA"].split():
                        with open(os.path.join(settings["DISTDIR"], k),
                                  "rb") as f:
                            self.assertEqual(f.read(), distfiles[k])
                finally:
                    os.chmod(settings["DISTDIR"], orig_distdir_mode)

                # Test parallel-fetch mode
                settings["PORTAGE_PARALLEL_FETCHONLY"] = "1"
                try:
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings["AA"].split():
                        with open(os.path.join(settings["DISTDIR"], k),
                                  "rb") as f:
                            self.assertEqual(f.read(), distfiles[k])
                    for k in settings["AA"].split():
                        os.unlink(os.path.join(settings["DISTDIR"], k))
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings["AA"].split():
                        with open(os.path.join(settings["DISTDIR"], k),
                                  "rb") as f:
                            self.assertEqual(f.read(), distfiles[k])
                finally:
                    settings.pop("PORTAGE_PARALLEL_FETCHONLY")

                # Test RESUMECOMMAND
                orig_resume_min_size = settings[
                    "PORTAGE_FETCH_RESUME_MIN_SIZE"]
                try:
                    settings["PORTAGE_FETCH_RESUME_MIN_SIZE"] = "2"
                    for k in settings["AA"].split():
                        file_path = os.path.join(settings["DISTDIR"], k)
                        os.unlink(file_path)
                        with open(file_path + _download_suffix, "wb") as f:
                            f.write(distfiles[k][:2])
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                    for k in settings["AA"].split():
                        with open(os.path.join(settings["DISTDIR"], k),
                                  "rb") as f:
                            self.assertEqual(f.read(), distfiles[k])
                finally:
                    settings[
                        "PORTAGE_FETCH_RESUME_MIN_SIZE"] = orig_resume_min_size

                # Test readonly DISTDIR + skiprocheck, with FETCHCOMMAND set to temporarily chmod DISTDIR
                orig_fetchcommand = settings["FETCHCOMMAND"]
                orig_distdir_mode = os.stat(settings["DISTDIR"]).st_mode
                for k in settings["AA"].split():
                    os.unlink(os.path.join(settings["DISTDIR"], k))
                try:
                    os.chmod(settings["DISTDIR"], 0o555)
                    settings["FETCHCOMMAND"] = (
                        '"%s" -c "chmod ug+w \\"${DISTDIR}\\"; %s; status=\\$?; chmod a-w \\"${DISTDIR}\\"; exit \\$status"'
                        % (BASH_BINARY, orig_fetchcommand.replace('"', '\\"')))
                    settings.features.add("skiprocheck")
                    settings.features.remove("distlocks")
                    self.assertEqual(
                        loop.run_until_complete(async_fetch(pkg, ebuild_path)),
                        0)
                finally:
                    settings["FETCHCOMMAND"] = orig_fetchcommand
                    os.chmod(settings["DISTDIR"], orig_distdir_mode)
                    settings.features.remove("skiprocheck")
                    settings.features.add("distlocks")
Beispiel #45
0
    async def _async_test_simple(self, playground, metadata_xml_files, loop):

        debug = playground.debug
        settings = playground.settings
        eprefix = settings["EPREFIX"]
        eroot = settings["EROOT"]
        trees = playground.trees
        portdb = trees[eroot]["porttree"].dbapi
        test_repo_location = settings.repositories["test_repo"].location
        var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
        cachedir = os.path.join(var_cache_edb, "dep")
        cachedir_pregen = os.path.join(test_repo_location, "metadata",
                                       "md5-cache")

        portage_python = portage._python_interpreter
        dispatch_conf_cmd = (
            portage_python,
            "-b",
            "-Wd",
            os.path.join(self.sbindir, "dispatch-conf"),
        )
        ebuild_cmd = (portage_python, "-b", "-Wd",
                      os.path.join(self.bindir, "ebuild"))
        egencache_cmd = (
            portage_python,
            "-b",
            "-Wd",
            os.path.join(self.bindir, "egencache"),
            "--repo",
            "test_repo",
            "--repositories-configuration",
            settings.repositories.config_string(),
        )
        emerge_cmd = (portage_python, "-b", "-Wd",
                      os.path.join(self.bindir, "emerge"))
        emaint_cmd = (portage_python, "-b", "-Wd",
                      os.path.join(self.sbindir, "emaint"))
        env_update_cmd = (
            portage_python,
            "-b",
            "-Wd",
            os.path.join(self.sbindir, "env-update"),
        )
        etc_update_cmd = (BASH_BINARY, os.path.join(self.sbindir,
                                                    "etc-update"))
        fixpackages_cmd = (
            portage_python,
            "-b",
            "-Wd",
            os.path.join(self.sbindir, "fixpackages"),
        )
        portageq_cmd = (
            portage_python,
            "-b",
            "-Wd",
            os.path.join(self.bindir, "portageq"),
        )
        quickpkg_cmd = (
            portage_python,
            "-b",
            "-Wd",
            os.path.join(self.bindir, "quickpkg"),
        )
        regenworld_cmd = (
            portage_python,
            "-b",
            "-Wd",
            os.path.join(self.sbindir, "regenworld"),
        )

        rm_binary = find_binary("rm")
        self.assertEqual(rm_binary is None, False, "rm command not found")
        rm_cmd = (rm_binary, )

        egencache_extra_args = []
        if self._have_python_xml():
            egencache_extra_args.append("--update-use-local-desc")

        test_ebuild = portdb.findname("dev-libs/A-1")
        self.assertFalse(test_ebuild is None)

        cross_prefix = os.path.join(eprefix, "cross_prefix")
        cross_root = os.path.join(eprefix, "cross_root")
        cross_eroot = os.path.join(cross_root, eprefix.lstrip(os.sep))

        binhost_dir = os.path.join(eprefix, "binhost")
        binhost_address = "127.0.0.1"
        binhost_remote_path = "/binhost"
        binhost_server = AsyncHTTPServer(
            binhost_address, BinhostContentMap(binhost_remote_path,
                                               binhost_dir), loop).__enter__()
        binhost_uri = "http://{address}:{port}{path}".format(
            address=binhost_address,
            port=binhost_server.server_port,
            path=binhost_remote_path,
        )

        binpkg_format = settings.get("BINPKG_FORMAT",
                                     SUPPORTED_GENTOO_BINPKG_FORMATS[0])
        self.assertIn(binpkg_format, ("xpak", "gpkg"))
        if binpkg_format == "xpak":
            foo_filename = "foo-0-1.xpak"
        elif binpkg_format == "gpkg":
            foo_filename = "foo-0-1.gpkg.tar"

        test_commands = ()

        if hasattr(argparse.ArgumentParser, "parse_intermixed_args"):
            test_commands += (
                emerge_cmd + ("--oneshot", "dev-libs/A", "-v", "dev-libs/A"), )

        test_commands += (
            emerge_cmd + (
                "--usepkgonly",
                "--root",
                cross_root,
                "--quickpkg-direct=y",
                "--quickpkg-direct-root",
                "/",
                "dev-libs/A",
            ),
            emerge_cmd + (
                "--usepkgonly",
                "--quickpkg-direct=y",
                "--quickpkg-direct-root",
                cross_root,
                "dev-libs/A",
            ),
            env_update_cmd,
            portageq_cmd + (
                "envvar",
                "-v",
                "CONFIG_PROTECT",
                "EROOT",
                "PORTAGE_CONFIGROOT",
                "PORTAGE_TMPDIR",
                "USERLAND",
            ),
            etc_update_cmd,
            dispatch_conf_cmd,
            emerge_cmd + ("--version", ),
            emerge_cmd + ("--info", ),
            emerge_cmd + ("--info", "--verbose"),
            emerge_cmd + ("--list-sets", ),
            emerge_cmd + ("--check-news", ),
            rm_cmd + ("-rf", cachedir),
            rm_cmd + ("-rf", cachedir_pregen),
            emerge_cmd + ("--regen", ),
            rm_cmd + ("-rf", cachedir),
            ({
                "FEATURES": "metadata-transfer"
            }, ) + emerge_cmd + ("--regen", ),
            rm_cmd + ("-rf", cachedir),
            ({
                "FEATURES": "metadata-transfer"
            }, ) + emerge_cmd + ("--regen", ),
            rm_cmd + ("-rf", cachedir),
            egencache_cmd + ("--update", ) + tuple(egencache_extra_args),
            ({
                "FEATURES": "metadata-transfer"
            }, ) + emerge_cmd + ("--metadata", ),
            rm_cmd + ("-rf", cachedir),
            ({
                "FEATURES": "metadata-transfer"
            }, ) + emerge_cmd + ("--metadata", ),
            emerge_cmd + ("--metadata", ),
            rm_cmd + ("-rf", cachedir),
            emerge_cmd + ("--oneshot", "virtual/foo"),
            lambda: self.assertFalse(
                os.path.exists(
                    os.path.join(pkgdir, "virtual", "foo", foo_filename))),
            ({
                "FEATURES": "unmerge-backup"
            }, ) + emerge_cmd + ("--unmerge", "virtual/foo"),
            lambda: self.assertTrue(
                os.path.exists(
                    os.path.join(pkgdir, "virtual", "foo", foo_filename))),
            emerge_cmd + ("--pretend", "dev-libs/A"),
            ebuild_cmd +
            (test_ebuild, "manifest", "clean", "package", "merge"),
            emerge_cmd +
            ("--pretend", "--tree", "--complete-graph", "dev-libs/A"),
            emerge_cmd + ("-p", "dev-libs/B"),
            emerge_cmd + ("-p", "--newrepo", "dev-libs/B"),
            emerge_cmd + (
                "-B",
                "dev-libs/B",
            ),
            emerge_cmd + (
                "--oneshot",
                "--usepkg",
                "dev-libs/B",
            ),
            # trigger clean prior to pkg_pretend as in bug #390711
            ebuild_cmd + (test_ebuild, "unpack"),
            emerge_cmd + (
                "--oneshot",
                "dev-libs/A",
            ),
            emerge_cmd + (
                "--noreplace",
                "dev-libs/A",
            ),
            emerge_cmd + (
                "--config",
                "dev-libs/A",
            ),
            emerge_cmd + ("--info", "dev-libs/A", "dev-libs/B"),
            emerge_cmd +
            ("--pretend", "--depclean", "--verbose", "dev-libs/B"),
            emerge_cmd + (
                "--pretend",
                "--depclean",
            ),
            emerge_cmd + ("--depclean", ),
            quickpkg_cmd + (
                "--include-config",
                "y",
                "dev-libs/A",
            ),
            # Test bug #523684, where a file renamed or removed by the
            # admin forces replacement files to be merged with config
            # protection.
            lambda: self.assertEqual(
                0,
                len(
                    list(
                        find_updated_config_files(
                            eroot, shlex_split(settings["CONFIG_PROTECT"])))),
            ),
            lambda: os.unlink(os.path.join(eprefix, "etc", "A-0")),
            emerge_cmd + ("--usepkgonly", "dev-libs/A"),
            lambda: self.assertEqual(
                1,
                len(
                    list(
                        find_updated_config_files(
                            eroot, shlex_split(settings["CONFIG_PROTECT"])))),
            ),
            emaint_cmd + ("--check", "all"),
            emaint_cmd + ("--fix", "all"),
            fixpackages_cmd,
            regenworld_cmd,
            portageq_cmd + ("match", eroot, "dev-libs/A"),
            portageq_cmd + ("best_visible", eroot, "dev-libs/A"),
            portageq_cmd + ("best_visible", eroot, "binary", "dev-libs/A"),
            portageq_cmd + ("contents", eroot, "dev-libs/A-1"),
            portageq_cmd + ("metadata", eroot, "ebuild", "dev-libs/A-1",
                            "EAPI", "IUSE", "RDEPEND"),
            portageq_cmd + ("metadata", eroot, "binary", "dev-libs/A-1",
                            "EAPI", "USE", "RDEPEND"),
            portageq_cmd + (
                "metadata",
                eroot,
                "installed",
                "dev-libs/A-1",
                "EAPI",
                "USE",
                "RDEPEND",
            ),
            portageq_cmd + ("owners", eroot, eroot + "usr"),
            emerge_cmd + ("-p", eroot + "usr"),
            emerge_cmd + ("-p", "--unmerge", "-q", eroot + "usr"),
            emerge_cmd + ("--unmerge", "--quiet", "dev-libs/A"),
            emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
            # If EMERGE_DEFAULT_OPTS contains --autounmask=n, then --autounmask
            # must be specified with --autounmask-continue.
            (
                {
                    "EMERGE_DEFAULT_OPTS": "--autounmask=n"
                }, ) + emerge_cmd + (
                    "--autounmask",
                    "--autounmask-continue",
                    "dev-libs/C",
                ),
            # Verify that the above --autounmask-continue command caused
            # USE=flag to be applied correctly to dev-libs/D.
            portageq_cmd + ("match", eroot, "dev-libs/D[flag]"),
            # Test cross-prefix usage, including chpathtool for binpkgs.
            # EAPI 7
            (
                {
                    "EPREFIX": cross_prefix
                }, ) + emerge_cmd + ("dev-libs/C", ),
            ({
                "EPREFIX": cross_prefix
            }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/C"),
            ({
                "EPREFIX": cross_prefix
            }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/D"),
            ({
                "ROOT": cross_root
            }, ) + emerge_cmd + ("dev-libs/D", ),
            portageq_cmd + ("has_version", cross_eroot, "dev-libs/D"),
            # EAPI 5
            (
                {
                    "EPREFIX": cross_prefix
                }, ) + emerge_cmd + ("--usepkgonly", "dev-libs/A"),
            ({
                "EPREFIX": cross_prefix
            }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
            ({
                "EPREFIX": cross_prefix
            }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),
            ({
                "EPREFIX": cross_prefix
            }, ) + emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
            ({
                "EPREFIX": cross_prefix
            }, ) + emerge_cmd + ("-C", "--quiet", "dev-libs/A"),
            ({
                "EPREFIX": cross_prefix
            }, ) + emerge_cmd + ("dev-libs/A", ),
            ({
                "EPREFIX": cross_prefix
            }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
            ({
                "EPREFIX": cross_prefix
            }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),
            # Test ROOT support
            (
                {
                    "ROOT": cross_root
                }, ) + emerge_cmd + ("dev-libs/B", ),
            portageq_cmd + ("has_version", cross_eroot, "dev-libs/B"),
        )

        # Test binhost support if FETCHCOMMAND is available.
        binrepos_conf_file = os.path.join(os.sep, eprefix, BINREPOS_CONF_FILE)
        with open(binrepos_conf_file, "wt") as f:
            f.write("[test-binhost]\n")
            f.write("sync-uri = {}\n".format(binhost_uri))
        fetchcommand = portage.util.shlex_split(
            playground.settings["FETCHCOMMAND"])
        fetch_bin = portage.process.find_binary(fetchcommand[0])
        if fetch_bin is not None:
            test_commands = test_commands + (
                lambda: os.rename(pkgdir, binhost_dir),
                emerge_cmd + ("-e", "--getbinpkgonly", "dev-libs/A"),
                lambda: shutil.rmtree(pkgdir),
                lambda: os.rename(binhost_dir, pkgdir),
                # Remove binrepos.conf and test PORTAGE_BINHOST.
                lambda: os.unlink(binrepos_conf_file),
                lambda: os.rename(pkgdir, binhost_dir),
                ({
                    "PORTAGE_BINHOST": binhost_uri
                }, ) + emerge_cmd + ("-fe", "--getbinpkgonly", "dev-libs/A"),
                lambda: shutil.rmtree(pkgdir),
                lambda: os.rename(binhost_dir, pkgdir),
            )

        distdir = playground.distdir
        pkgdir = playground.pkgdir
        fake_bin = os.path.join(eprefix, "bin")
        portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
        profile_path = settings.profile_path
        user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH)

        path = os.environ.get("PATH")
        if path is not None and not path.strip():
            path = None
        if path is None:
            path = ""
        else:
            path = ":" + path
        path = fake_bin + path

        pythonpath = os.environ.get("PYTHONPATH")
        if pythonpath is not None and not pythonpath.strip():
            pythonpath = None
        if pythonpath is not None and pythonpath.split(
                ":")[0] == PORTAGE_PYM_PATH:
            pass
        else:
            if pythonpath is None:
                pythonpath = ""
            else:
                pythonpath = ":" + pythonpath
            pythonpath = PORTAGE_PYM_PATH + pythonpath

        env = {
            "PORTAGE_OVERRIDE_EPREFIX":
            eprefix,
            "CLEAN_DELAY":
            "0",
            "DISTDIR":
            distdir,
            "EMERGE_WARNING_DELAY":
            "0",
            "INFODIR":
            "",
            "INFOPATH":
            "",
            "PATH":
            path,
            "PKGDIR":
            pkgdir,
            "PORTAGE_INST_GID":
            str(portage.data.portage_gid),
            "PORTAGE_INST_UID":
            str(portage.data.portage_uid),
            "PORTAGE_PYTHON":
            portage_python,
            "PORTAGE_REPOSITORIES":
            settings.repositories.config_string(),
            "PORTAGE_TMPDIR":
            portage_tmpdir,
            "PORTAGE_LOGDIR":
            portage_tmpdir,
            "PYTHONDONTWRITEBYTECODE":
            os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
            "PYTHONPATH":
            pythonpath,
            "__PORTAGE_TEST_PATH_OVERRIDE":
            fake_bin,
        }

        if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
            env["__PORTAGE_TEST_HARDLINK_LOCKS"] = os.environ[
                "__PORTAGE_TEST_HARDLINK_LOCKS"]

        updates_dir = os.path.join(test_repo_location, "profiles", "updates")
        dirs = [
            cachedir,
            cachedir_pregen,
            cross_eroot,
            cross_prefix,
            distdir,
            fake_bin,
            portage_tmpdir,
            updates_dir,
            user_config_dir,
            var_cache_edb,
        ]
        etc_symlinks = ("dispatch-conf.conf", "etc-update.conf")
        # Override things that may be unavailable, or may have portability
        # issues when running tests in exotic environments.
        #   prepstrip - bug #447810 (bash read builtin EINTR problem)
        true_symlinks = ["find", "prepstrip", "sed", "scanelf"]
        true_binary = find_binary("true")
        self.assertEqual(true_binary is None, False, "true command not found")
        try:
            for d in dirs:
                ensure_dirs(d)
            for x in true_symlinks:
                os.symlink(true_binary, os.path.join(fake_bin, x))
            for x in etc_symlinks:
                os.symlink(os.path.join(self.cnf_etc_path, x),
                           os.path.join(eprefix, "etc", x))
            with open(os.path.join(var_cache_edb, "counter"), "wb") as f:
                f.write(b"100")
            # non-empty system set keeps --depclean quiet
            with open(os.path.join(profile_path, "packages"), "w") as f:
                f.write("*dev-libs/token-system-pkg")
            for cp, xml_data in metadata_xml_files:
                with open(os.path.join(test_repo_location, cp, "metadata.xml"),
                          "w") as f:
                    f.write(playground.metadata_xml_template % xml_data)
            with open(os.path.join(updates_dir, "1Q-2010"), "w") as f:
                f.write("""
slotmove =app-doc/pms-3 2 3
move dev-util/git dev-vcs/git
""")

            if debug:
                # The subprocess inherits both stdout and stderr, for
                # debugging purposes.
                stdout = None
            else:
                # The subprocess inherits stderr so that any warnings
                # triggered by python -Wd will be visible.
                stdout = subprocess.PIPE

            for args in test_commands:

                if hasattr(args, "__call__"):
                    args()
                    continue

                if isinstance(args[0], dict):
                    local_env = env.copy()
                    local_env.update(args[0])
                    args = args[1:]
                else:
                    local_env = env

                proc = await asyncio.create_subprocess_exec(*args,
                                                            env=local_env,
                                                            stderr=None,
                                                            stdout=stdout)

                if debug:
                    await proc.wait()
                else:
                    output, _err = await proc.communicate()
                    await proc.wait()
                    if proc.returncode != os.EX_OK:
                        portage.writemsg(output)

                self.assertEqual(os.EX_OK, proc.returncode,
                                 "emerge failed with args %s" % (args, ))
        finally:
            binhost_server.__exit__(None, None, None)
            playground.cleanup()
Beispiel #46
0
	def _setitem(self, cpv, values):
		if "_eclasses_" in values:
			values = ProtectedDict(values)
			values["INHERITED"] = ' '.join(sorted(values["_eclasses_"]))

		new_content = []
		for k in self.auxdbkey_order:
			new_content.append(values.get(k, ''))
			new_content.append('\n')
		for i in range(magic_line_count - len(self.auxdbkey_order)):
			new_content.append('\n')
		new_content = ''.join(new_content)
		new_content = _unicode_encode(new_content,
			_encodings['repo.content'], errors='backslashreplace')

		new_fp = os.path.join(self.location, cpv)
		try:
			f = open(_unicode_encode(new_fp,
				encoding=_encodings['fs'], errors='strict'), 'rb')
		except EnvironmentError:
			pass
		else:
			try:
				try:
					existing_st = os.fstat(f.fileno())
					existing_content = f.read()
				finally:
					f.close()
			except EnvironmentError:
				pass
			else:
				existing_mtime = existing_st[stat.ST_MTIME]
				if values['_mtime_'] == existing_mtime and \
					existing_content == new_content:
					return

				if self.raise_stat_collision and \
					values['_mtime_'] == existing_mtime and \
					len(new_content) == existing_st.st_size:
					raise cache_errors.StatCollision(cpv, new_fp,
						existing_mtime, existing_st.st_size)

		s = cpv.rfind("/")
		fp = os.path.join(self.location,cpv[:s],
			".update.%i.%s" % (os.getpid(), cpv[s+1:]))
		try:
			myf = open(_unicode_encode(fp,
				encoding=_encodings['fs'], errors='strict'), 'wb')
		except EnvironmentError as e:
			if errno.ENOENT == e.errno:
				try:
					self._ensure_dirs(cpv)
					myf = open(_unicode_encode(fp,
						encoding=_encodings['fs'], errors='strict'), 'wb')
				except EnvironmentError as e:
					raise cache_errors.CacheCorruption(cpv, e)
			else:
				raise cache_errors.CacheCorruption(cpv, e)

		try:
			myf.write(new_content)
		finally:
			myf.close()
		self._ensure_access(fp, mtime=values["_mtime_"])

		try:
			os.rename(fp, new_fp)
		except EnvironmentError as e:
			try:
				os.unlink(fp)
			except EnvironmentError:
				pass
			raise cache_errors.CacheCorruption(cpv, e)
Beispiel #47
0
def fetch_metadata_dtd(metadata_dtd, repoman_settings):
	"""
	Fetch metadata.dtd if it doesn't exist or the ctime is older than
	metadata_dtd_ctime_interval.
	@rtype: bool
	@return: True if successful, otherwise False
	"""

	must_fetch = True
	metadata_dtd_st = None
	current_time = int(time.time())
	try:
		metadata_dtd_st = os.stat(metadata_dtd)
	except EnvironmentError as e:
		if e.errno not in (errno.ENOENT, errno.ESTALE):
			raise
		del e
	else:
		# Trigger fetch if metadata.dtd mtime is old or clock is wrong.
		if abs(current_time - metadata_dtd_st.st_ctime) \
			< metadata_dtd_ctime_interval:
			must_fetch = False

	if must_fetch:
		print()
		print(
			"%s the local copy of metadata.dtd "
			"needs to be refetched, doing that now" % green("***"))
		print()
		parsed_url = urlparse(metadata_dtd_uri)
		setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
		fcmd = repoman_settings.get(setting)
		if not fcmd:
			fcmd = repoman_settings.get('FETCHCOMMAND')
			if not fcmd:
				logging.error("FETCHCOMMAND is unset")
				return False

		destdir = repoman_settings["DISTDIR"]
		fd, metadata_dtd_tmp = tempfile.mkstemp(
			prefix='metadata.dtd.', dir=destdir)
		os.close(fd)

		try:
			if not portage.getbinpkg.file_get(
				metadata_dtd_uri, destdir, fcmd=fcmd,
				filename=os.path.basename(metadata_dtd_tmp)):
				logging.error(
					"failed to fetch metadata.dtd from '%s'" % metadata_dtd_uri)
				return False

			try:
				portage.util.apply_secpass_permissions(
					metadata_dtd_tmp,
					gid=portage.data.portage_gid, mode=0o664, mask=0o2)
			except portage.exception.PortageException:
				pass

			os.rename(metadata_dtd_tmp, metadata_dtd)
		finally:
			try:
				os.unlink(metadata_dtd_tmp)
			except OSError:
				pass

	return True
Beispiel #48
0
def chk_updated_info_files(root, infodirs, prev_mtimes):

    if os.path.exists("/usr/bin/install-info"):
        out = portage.output.EOutput()
        regen_infodirs = []
        for z in infodirs:
            if z == '':
                continue
            inforoot = portage.util.normalize_path(root + z)
            if os.path.isdir(inforoot) and \
             not [x for x in os.listdir(inforoot) \
             if x.startswith('.keepinfodir')]:
                infomtime = os.stat(inforoot)[stat.ST_MTIME]
                if inforoot not in prev_mtimes or \
                 prev_mtimes[inforoot] != infomtime:
                    regen_infodirs.append(inforoot)

        if not regen_infodirs:
            portage.util.writemsg_stdout("\n")
            if portage.util.noiselimit >= 0:
                out.einfo("GNU info directory index is up-to-date.")
        else:
            portage.util.writemsg_stdout("\n")
            if portage.util.noiselimit >= 0:
                out.einfo("Regenerating GNU info directory index...")

            dir_extensions = ("", ".gz", ".bz2")
            icount = 0
            badcount = 0
            errmsg = ""
            for inforoot in regen_infodirs:
                if inforoot == '':
                    continue

                if not os.path.isdir(inforoot) or \
                 not os.access(inforoot, os.W_OK):
                    continue

                file_list = os.listdir(inforoot)
                file_list.sort()
                dir_file = os.path.join(inforoot, "dir")
                moved_old_dir = False
                processed_count = 0
                for x in file_list:
                    if x.startswith(".") or \
                     os.path.isdir(os.path.join(inforoot, x)):
                        continue
                    if x.startswith("dir"):
                        skip = False
                        for ext in dir_extensions:
                            if x == "dir" + ext or \
                             x == "dir" + ext + ".old":
                                skip = True
                                break
                        if skip:
                            continue
                    if processed_count == 0:
                        for ext in dir_extensions:
                            try:
                                os.rename(dir_file + ext,
                                          dir_file + ext + ".old")
                                moved_old_dir = True
                            except EnvironmentError as e:
                                if e.errno != errno.ENOENT:
                                    raise
                                del e
                    processed_count += 1
                    try:
                        proc = subprocess.Popen([
                            '/usr/bin/install-info',
                            '--dir-file=%s' % os.path.join(inforoot, "dir"),
                            os.path.join(inforoot, x)
                        ],
                                                env=dict(os.environ,
                                                         LANG="C",
                                                         LANGUAGE="C"),
                                                stdout=subprocess.PIPE,
                                                stderr=subprocess.STDOUT)
                    except OSError:
                        myso = None
                    else:
                        myso = portage._unicode_decode(
                            proc.communicate()[0]).rstrip("\n")
                        proc.wait()
                    existsstr = "already exists, for file `"
                    if myso:
                        if re.search(existsstr, myso):
                            # Already exists... Don't increment the count for this.
                            pass
                        elif myso[:
                                  44] == "install-info: warning: no info dir entry in ":
                            # This info file doesn't contain a DIR-header: install-info produces this
                            # (harmless) warning (the --quiet switch doesn't seem to work).
                            # Don't increment the count for this.
                            pass
                        else:
                            badcount = badcount + 1
                            errmsg += myso + "\n"
                    icount = icount + 1

                if moved_old_dir and not os.path.exists(dir_file):
                    # We didn't generate a new dir file, so put the old file
                    # back where it was originally found.
                    for ext in dir_extensions:
                        try:
                            os.rename(dir_file + ext + ".old", dir_file + ext)
                        except EnvironmentError as e:
                            if e.errno != errno.ENOENT:
                                raise
                            del e

                # Clean dir.old cruft so that they don't prevent
                # unmerge of otherwise empty directories.
                for ext in dir_extensions:
                    try:
                        os.unlink(dir_file + ext + ".old")
                    except EnvironmentError as e:
                        if e.errno != errno.ENOENT:
                            raise
                        del e

                #update mtime so we can potentially avoid regenerating.
                prev_mtimes[inforoot] = os.stat(inforoot)[stat.ST_MTIME]

            if badcount:
                out.eerror("Processed %d info files; %d errors." % \
                 (icount, badcount))
                portage.util.writemsg_level(errmsg,
                                            level=logging.ERROR,
                                            noiselevel=-1)
            else:
                if icount > 0 and portage.util.noiselimit >= 0:
                    out.einfo("Processed %d info files." % (icount, ))
Beispiel #49
0
	def testSyncLocal(self):
		debug = False

		skip_reason = self._must_skip()
		if skip_reason:
			self.portage_skip = skip_reason
			self.assertFalse(True, skip_reason)
			return

		repos_conf = textwrap.dedent("""
			[DEFAULT]
			%(default_keys)s
			[test_repo]
			location = %(EPREFIX)s/var/repositories/test_repo
			sync-type = %(sync-type)s
			sync-uri = file://%(EPREFIX)s/var/repositories/test_repo_sync
			auto-sync = %(auto-sync)s
			%(repo_extra_keys)s
		""")

		profile = {
			"eapi": ("5",),
			"package.use.stable.mask": ("dev-libs/A flag",)
		}

		ebuilds = {
			"dev-libs/A-0": {}
		}

		user_config = {
			'make.conf': ('FEATURES="metadata-transfer"',)
		}

		playground = ResolverPlayground(ebuilds=ebuilds,
			profile=profile, user_config=user_config, debug=debug)
		settings = playground.settings
		eprefix = settings["EPREFIX"]
		eroot = settings["EROOT"]
		homedir = os.path.join(eroot, "home")
		distdir = os.path.join(eprefix, "distdir")
		repo = settings.repositories["test_repo"]
		metadata_dir = os.path.join(repo.location, "metadata")

		cmds = {}
		for cmd in ("emerge", "emaint"):
			path = os.path.join(self.bindir, cmd)
			assert os.path.exists(path)
			cmds[cmd] =  (portage._python_interpreter,
				"-b", "-Wd", path)

		git_binary = find_binary("git")
		git_cmd = (git_binary,)

		committer_name = "Gentoo Dev"
		committer_email = "*****@*****.**"

		def repos_set_conf(sync_type, dflt_keys=None, xtra_keys=None,
			auto_sync="yes"):
			env["PORTAGE_REPOSITORIES"] = repos_conf % {\
				"EPREFIX": eprefix, "sync-type": sync_type,
				"auto-sync": auto_sync,
				"default_keys": "" if dflt_keys is None else dflt_keys,
				"repo_extra_keys": "" if xtra_keys is None else xtra_keys}

		def alter_ebuild():
			with open(os.path.join(repo.location + "_sync",
				"dev-libs", "A", "A-0.ebuild"), "a") as f:
				f.write("\n")
			os.unlink(os.path.join(metadata_dir, 'timestamp.chk'))

		sync_cmds = (
			(homedir, cmds["emerge"] + ("--sync",)),
			(homedir, lambda: self.assertTrue(os.path.exists(
				os.path.join(repo.location, "dev-libs", "A")
				), "dev-libs/A expected, but missing")),
			(homedir, cmds["emaint"] + ("sync", "-A")),
		)

		sync_cmds_auto_sync = (
			(homedir, lambda: repos_set_conf("rsync", auto_sync="no")),
			(homedir, cmds["emerge"] + ("--sync",)),
			(homedir, lambda: self.assertFalse(os.path.exists(
				os.path.join(repo.location, "dev-libs", "A")
				), "dev-libs/A found, expected missing")),
			(homedir, lambda: repos_set_conf("rsync", auto_sync="yes")),
		)

		rename_repo = (
			(homedir, lambda: os.rename(repo.location,
				repo.location + "_sync")),
		)

		rsync_opts_repos = (
			(homedir, alter_ebuild),
			(homedir, lambda: repos_set_conf("rsync", None,
				"sync-rsync-extra-opts = --backup --backup-dir=%s" %
				_shell_quote(repo.location + "_back"))),
			(homedir, cmds['emerge'] + ("--sync",)),
			(homedir, lambda: self.assertTrue(os.path.exists(
				repo.location + "_back"))),
			(homedir, lambda: shutil.rmtree(repo.location + "_back")),
			(homedir, lambda: repos_set_conf("rsync")),
		)

		rsync_opts_repos_default = (
			(homedir, alter_ebuild),
			(homedir, lambda: repos_set_conf("rsync",
					"sync-rsync-extra-opts = --backup --backup-dir=%s" %
					_shell_quote(repo.location+"_back"))),
			(homedir, cmds['emerge'] + ("--sync",)),
			(homedir, lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
			(homedir, lambda: shutil.rmtree(repo.location + "_back")),
			(homedir, lambda: repos_set_conf("rsync")),
		)

		rsync_opts_repos_default_ovr = (
			(homedir, alter_ebuild),
			(homedir, lambda: repos_set_conf("rsync",
				"sync-rsync-extra-opts = --backup --backup-dir=%s" %
				_shell_quote(repo.location + "_back_nowhere"),
				"sync-rsync-extra-opts = --backup --backup-dir=%s" %
				_shell_quote(repo.location + "_back"))),
			(homedir, cmds['emerge'] + ("--sync",)),
			(homedir, lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
			(homedir, lambda: shutil.rmtree(repo.location + "_back")),
			(homedir, lambda: repos_set_conf("rsync")),
		)

		rsync_opts_repos_default_cancel = (
			(homedir, alter_ebuild),
			(homedir, lambda: repos_set_conf("rsync",
				"sync-rsync-extra-opts = --backup --backup-dir=%s" %
				_shell_quote(repo.location + "_back_nowhere"),
				"sync-rsync-extra-opts = ")),
			(homedir, cmds['emerge'] + ("--sync",)),
			(homedir, lambda: self.assertFalse(os.path.exists(repo.location + "_back"))),
			(homedir, lambda: repos_set_conf("rsync")),
		)

		delete_sync_repo = (
			(homedir, lambda: shutil.rmtree(
				repo.location + "_sync")),
		)

		git_repo_create = (
			(repo.location, git_cmd +
				("config", "--global", "user.name", committer_name,)),
			(repo.location, git_cmd +
				("config", "--global", "user.email", committer_email,)),
			(repo.location, git_cmd + ("init-db",)),
			(repo.location, git_cmd + ("add", ".")),
			(repo.location, git_cmd +
				("commit", "-a", "-m", "add whole repo")),
		)

		sync_type_git = (
			(homedir, lambda: repos_set_conf("git")),
		)

		pythonpath =  os.environ.get("PYTHONPATH")
		if pythonpath is not None and not pythonpath.strip():
			pythonpath = None
		if pythonpath is not None and \
			pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
			pass
		else:
			if pythonpath is None:
				pythonpath = ""
			else:
				pythonpath = ":" + pythonpath
			pythonpath = PORTAGE_PYM_PATH + pythonpath

		env = {
			"PORTAGE_OVERRIDE_EPREFIX" : eprefix,
			"DISTDIR" : distdir,
			"GENTOO_COMMITTER_NAME" : committer_name,
			"GENTOO_COMMITTER_EMAIL" : committer_email,
			"HOME" : homedir,
			"PATH" : os.environ["PATH"],
			"PORTAGE_GRPNAME" : os.environ["PORTAGE_GRPNAME"],
			"PORTAGE_USERNAME" : os.environ["PORTAGE_USERNAME"],
			"PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
			"PYTHONPATH" : pythonpath,
		}
		repos_set_conf("rsync")

		if os.environ.get("SANDBOX_ON") == "1":
			# avoid problems from nested sandbox instances
			env["FEATURES"] = "-sandbox -usersandbox"

		dirs = [homedir, metadata_dir]
		try:
			for d in dirs:
				ensure_dirs(d)

			timestamp_path = os.path.join(metadata_dir, 'timestamp.chk')
			with open(timestamp_path, 'w') as f:
				f.write(time.strftime('%s\n' % TIMESTAMP_FORMAT, time.gmtime()))

			if debug:
				# The subprocess inherits both stdout and stderr, for
				# debugging purposes.
				stdout = None
			else:
				# The subprocess inherits stderr so that any warnings
				# triggered by python -Wd will be visible.
				stdout = subprocess.PIPE

			for cwd, cmd in rename_repo + sync_cmds_auto_sync + sync_cmds + \
				rsync_opts_repos + rsync_opts_repos_default + \
				rsync_opts_repos_default_ovr + rsync_opts_repos_default_cancel + \
				delete_sync_repo + git_repo_create + sync_type_git + \
				rename_repo + sync_cmds:

				if hasattr(cmd, '__call__'):
					cmd()
					continue

				abs_cwd = os.path.join(repo.location, cwd)
				proc = subprocess.Popen(cmd,
					cwd=abs_cwd, env=env, stdout=stdout)

				if debug:
					proc.wait()
				else:
					output = proc.stdout.readlines()
					proc.wait()
					proc.stdout.close()
					if proc.returncode != os.EX_OK:
						for line in output:
							sys.stderr.write(_unicode_decode(line))

				self.assertEqual(os.EX_OK, proc.returncode,
					"%s failed in %s" % (cmd, cwd,))


		finally:
			playground.cleanup()
Beispiel #50
0
def chk_updated_info_files(root, infodirs, prev_mtimes, retval):

	if os.path.exists("/usr/bin/install-info"):
		out = portage.output.EOutput()
		regen_infodirs=[]
		for z in infodirs:
			if z=='':
				continue
			inforoot=normpath(root+z)
			if os.path.isdir(inforoot):
				infomtime = os.stat(inforoot)[stat.ST_MTIME]
				if inforoot not in prev_mtimes or \
					prev_mtimes[inforoot] != infomtime:
						regen_infodirs.append(inforoot)

		if not regen_infodirs:
			portage.writemsg_stdout("\n")
			out.einfo("GNU info directory index is up-to-date.")
		else:
			portage.writemsg_stdout("\n")
			out.einfo("Regenerating GNU info directory index...")

			dir_extensions = ("", ".gz", ".bz2")
			icount=0
			badcount=0
			errmsg = ""
			for inforoot in regen_infodirs:
				if inforoot=='':
					continue

				if not os.path.isdir(inforoot) or \
					not os.access(inforoot, os.W_OK):
					continue

				file_list = os.listdir(inforoot)
				file_list.sort()
				dir_file = os.path.join(inforoot, "dir")
				moved_old_dir = False
				processed_count = 0
				for x in file_list:
					if x.startswith(".") or \
						os.path.isdir(os.path.join(inforoot, x)):
						continue
					if x.startswith("dir"):
						skip = False
						for ext in dir_extensions:
							if x == "dir" + ext or \
								x == "dir" + ext + ".old":
								skip = True
								break
						if skip:
							continue
					if processed_count == 0:
						for ext in dir_extensions:
							try:
								os.rename(dir_file + ext, dir_file + ext + ".old")
								moved_old_dir = True
							except EnvironmentError as e:
								if e.errno != errno.ENOENT:
									raise
								del e
					processed_count += 1
					myso=subprocess_getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
					existsstr="already exists, for file `"
					if myso!="":
						if re.search(existsstr,myso):
							# Already exists... Don't increment the count for this.
							pass
						elif myso[:44]=="install-info: warning: no info dir entry in ":
							# This info file doesn't contain a DIR-header: install-info produces this
							# (harmless) warning (the --quiet switch doesn't seem to work).
							# Don't increment the count for this.
							pass
						else:
							badcount=badcount+1
							errmsg += myso + "\n"
					icount=icount+1

				if moved_old_dir and not os.path.exists(dir_file):
					# We didn't generate a new dir file, so put the old file
					# back where it was originally found.
					for ext in dir_extensions:
						try:
							os.rename(dir_file + ext + ".old", dir_file + ext)
						except EnvironmentError as e:
							if e.errno != errno.ENOENT:
								raise
							del e

				# Clean dir.old cruft so that they don't prevent
				# unmerge of otherwise empty directories.
				for ext in dir_extensions:
					try:
						os.unlink(dir_file + ext + ".old")
					except EnvironmentError as e:
						if e.errno != errno.ENOENT:
							raise
						del e

				#update mtime so we can potentially avoid regenerating.
				prev_mtimes[inforoot] = os.stat(inforoot)[stat.ST_MTIME]

			if badcount:
				out.eerror("Processed %d info files; %d errors." % \
					(icount, badcount))
				writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
			else:
				if icount > 0:
					out.einfo("Processed %d info files." % (icount,))
Beispiel #51
0
	def testEbuildFetch(self):

		user_config = {
			"make.conf":
				(
					'GENTOO_MIRRORS="{scheme}://{host}:{port}"',
				),
		}

		distfiles = {
			'bar': b'bar\n',
			'foo': b'foo\n',
		}

		ebuilds = {
			'dev-libs/A-1': {
				'EAPI': '7',
				'SRC_URI': '''{scheme}://{host}:{port}/distfiles/bar.txt -> bar
					{scheme}://{host}:{port}/distfiles/foo.txt -> foo''',
			},
		}

		loop = SchedulerInterface(global_event_loop())

		def run_async(func, *args, **kwargs):
			with ForkExecutor(loop=loop) as executor:
				return loop.run_until_complete(loop.run_in_executor(executor,
					functools.partial(func, *args, **kwargs)))

		scheme = 'http'
		host = '127.0.0.1'
		content = {}

		content['/distfiles/layout.conf'] = b'[structure]\n0=flat\n'

		for k, v in distfiles.items():
			# mirror path
			content['/distfiles/{}'.format(k)] = v
			# upstream path
			content['/distfiles/{}.txt'.format(k)] = v

		with AsyncHTTPServer(host, content, loop) as server:
			ebuilds_subst = {}
			for cpv, metadata in ebuilds.items():
				metadata = metadata.copy()
				metadata['SRC_URI'] = metadata['SRC_URI'].format(
					scheme=scheme, host=host, port=server.server_port)
				ebuilds_subst[cpv] = metadata

			user_config_subst = user_config.copy()
			for configname, configdata in user_config.items():

				configdata_sub = []
				for line in configdata:
					configdata_sub.append(line.format(
					scheme=scheme, host=host, port=server.server_port))
				user_config_subst[configname] = tuple(configdata_sub)

			playground = ResolverPlayground(ebuilds=ebuilds_subst, distfiles=distfiles, user_config=user_config_subst)
			ro_distdir = tempfile.mkdtemp()
			eubin = os.path.join(playground.eprefix, "usr", "bin")
			try:
				fetchcommand = portage.util.shlex_split(playground.settings['FETCHCOMMAND'])
				fetch_bin = portage.process.find_binary(fetchcommand[0])
				if fetch_bin is None:
					self.skipTest('FETCHCOMMAND not found: {}'.format(playground.settings['FETCHCOMMAND']))
				os.symlink(fetch_bin, os.path.join(eubin, os.path.basename(fetch_bin)))
				resumecommand = portage.util.shlex_split(playground.settings['RESUMECOMMAND'])
				resume_bin = portage.process.find_binary(resumecommand[0])
				if resume_bin is None:
					self.skipTest('RESUMECOMMAND not found: {}'.format(playground.settings['RESUMECOMMAND']))
				if resume_bin != fetch_bin:
					os.symlink(resume_bin, os.path.join(eubin, os.path.basename(resume_bin)))
				root_config = playground.trees[playground.eroot]['root_config']
				portdb = root_config.trees["porttree"].dbapi
				settings = config(clone=playground.settings)

				# Demonstrate that fetch preserves a stale file in DISTDIR when no digests are given.
				foo_uri = {'foo': ('{scheme}://{host}:{port}/distfiles/foo'.format(scheme=scheme, host=host, port=server.server_port),)}
				foo_path = os.path.join(settings['DISTDIR'], 'foo')
				foo_stale_content = b'stale content\n'
				with open(foo_path, 'wb') as f:
					f.write(b'stale content\n')

				self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False)))

				with open(foo_path, 'rb') as f:
					self.assertEqual(f.read(), foo_stale_content)
				with open(foo_path, 'rb') as f:
					self.assertNotEqual(f.read(), distfiles['foo'])

				# Use force=True to update the stale file.
				self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False, force=True)))

				with open(foo_path, 'rb') as f:
					self.assertEqual(f.read(), distfiles['foo'])

				# Test force=True with FEATURES=skiprocheck, using read-only DISTDIR.
				# FETCHCOMMAND is set to temporarily chmod +w DISTDIR. Note that
				# FETCHCOMMAND must perform atomic rename itself due to read-only
				# DISTDIR.
				with open(foo_path, 'wb') as f:
					f.write(b'stale content\n')
				orig_fetchcommand = settings['FETCHCOMMAND']
				orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode
				temp_fetchcommand = os.path.join(eubin, 'fetchcommand')
				with open(temp_fetchcommand, 'w') as f:
					f.write("""
					set -e
					URI=$1
					DISTDIR=$2
					FILE=$3
					trap 'chmod a-w "${DISTDIR}"' EXIT
					chmod ug+w "${DISTDIR}"
					%s
					mv -f "${DISTDIR}/${FILE}.__download__" "${DISTDIR}/${FILE}"
				""" % orig_fetchcommand.replace('${FILE}', '${FILE}.__download__'))
				settings['FETCHCOMMAND'] = '"%s" "%s" "${URI}" "${DISTDIR}" "${FILE}"' % (BASH_BINARY, temp_fetchcommand)
				settings.features.add('skiprocheck')
				settings.features.remove('distlocks')
				os.chmod(settings['DISTDIR'], 0o555)
				try:
					self.assertTrue(bool(run_async(fetch, foo_uri, settings, try_mirrors=False, force=True)))
				finally:
					settings['FETCHCOMMAND'] = orig_fetchcommand
					os.chmod(settings['DISTDIR'], orig_distdir_mode)
					settings.features.remove('skiprocheck')
					settings.features.add('distlocks')
					os.unlink(temp_fetchcommand)

				with open(foo_path, 'rb') as f:
					self.assertEqual(f.read(), distfiles['foo'])

				# Test emirrordist invocation.
				emirrordist_cmd = (portage._python_interpreter, '-b', '-Wd',
					os.path.join(self.bindir, 'emirrordist'),
					'--distfiles', settings['DISTDIR'],
					'--config-root', settings['EPREFIX'],
					'--repositories-configuration', settings.repositories.config_string(),
					'--repo', 'test_repo', '--mirror')

				env = settings.environ()
				env['PYTHONPATH'] = ':'.join(
					filter(None, [PORTAGE_PYM_PATH] + os.environ.get('PYTHONPATH', '').split(':')))

				for k in distfiles:
					os.unlink(os.path.join(settings['DISTDIR'], k))

				proc = loop.run_until_complete(asyncio.create_subprocess_exec(*emirrordist_cmd, env=env))
				self.assertEqual(loop.run_until_complete(proc.wait()), 0)

				for k in distfiles:
					with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
						self.assertEqual(f.read(), distfiles[k])

				# Tests only work with one ebuild at a time, so the config
				# pool only needs a single config instance.
				class config_pool:
					@staticmethod
					def allocate():
						return settings
					@staticmethod
					def deallocate(settings):
						pass

				def async_fetch(pkg, ebuild_path):
					fetcher = EbuildFetcher(config_pool=config_pool, ebuild_path=ebuild_path,
						fetchonly=False, fetchall=True, pkg=pkg, scheduler=loop)
					fetcher.start()
					return fetcher.async_wait()

				for cpv in ebuilds:
					metadata = dict(zip(Package.metadata_keys,
						portdb.aux_get(cpv, Package.metadata_keys)))

					pkg = Package(built=False, cpv=cpv, installed=False,
						metadata=metadata, root_config=root_config,
						type_name='ebuild')

					settings.setcpv(pkg)
					ebuild_path = portdb.findname(pkg.cpv)
					portage.doebuild_environment(ebuild_path, 'fetch', settings=settings, db=portdb)

					# Test good files in DISTDIR
					for k in settings['AA'].split():
						os.stat(os.path.join(settings['DISTDIR'], k))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test digestgen with fetch
					os.unlink(os.path.join(os.path.dirname(ebuild_path), 'Manifest'))
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					with ForkExecutor(loop=loop) as executor:
						self.assertTrue(bool(loop.run_until_complete(
							loop.run_in_executor(executor, functools.partial(
								digestgen, mysettings=settings, myportdb=portdb)))))
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test missing files in DISTDIR
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test empty files in DISTDIR
					for k in settings['AA'].split():
						file_path = os.path.join(settings['DISTDIR'], k)
						with open(file_path, 'wb') as f:
							pass
						self.assertEqual(os.stat(file_path).st_size, 0)
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test non-empty files containing null bytes in DISTDIR
					for k in settings['AA'].split():
						file_path = os.path.join(settings['DISTDIR'], k)
						with open(file_path, 'wb') as f:
							f.write(len(distfiles[k]) * b'\0')
						self.assertEqual(os.stat(file_path).st_size, len(distfiles[k]))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test PORTAGE_RO_DISTDIRS
					settings['PORTAGE_RO_DISTDIRS'] = '"{}"'.format(ro_distdir)
					orig_fetchcommand = settings['FETCHCOMMAND']
					orig_resumecommand = settings['RESUMECOMMAND']
					try:
						settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = ''
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							os.rename(file_path, os.path.join(ro_distdir, k))
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							self.assertTrue(os.path.islink(file_path))
							with open(file_path, 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
							os.unlink(file_path)
					finally:
						settings.pop('PORTAGE_RO_DISTDIRS')
						settings['FETCHCOMMAND'] = orig_fetchcommand
						settings['RESUMECOMMAND'] = orig_resumecommand

					# Test local filesystem in GENTOO_MIRRORS
					orig_mirrors = settings['GENTOO_MIRRORS']
					orig_fetchcommand = settings['FETCHCOMMAND']
					try:
						settings['GENTOO_MIRRORS'] = ro_distdir
						settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = ''
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings['GENTOO_MIRRORS'] = orig_mirrors
						settings['FETCHCOMMAND'] = orig_fetchcommand
						settings['RESUMECOMMAND'] = orig_resumecommand

					# Test readonly DISTDIR
					orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode
					try:
						os.chmod(settings['DISTDIR'], 0o555)
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						os.chmod(settings['DISTDIR'], orig_distdir_mode)

					# Test parallel-fetch mode
					settings['PORTAGE_PARALLEL_FETCHONLY'] = '1'
					try:
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
						for k in settings['AA'].split():
							os.unlink(os.path.join(settings['DISTDIR'], k))
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings.pop('PORTAGE_PARALLEL_FETCHONLY')

					# Test RESUMECOMMAND
					orig_resume_min_size = settings['PORTAGE_FETCH_RESUME_MIN_SIZE']
					try:
						settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = '2'
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							os.unlink(file_path)
							with open(file_path + _download_suffix, 'wb') as f:
								f.write(distfiles[k][:2])
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = orig_resume_min_size

					# Test readonly DISTDIR + skiprocheck, with FETCHCOMMAND set to temporarily chmod DISTDIR
					orig_fetchcommand = settings['FETCHCOMMAND']
					orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					try:
						os.chmod(settings['DISTDIR'], 0o555)
						settings['FETCHCOMMAND'] = '"%s" -c "chmod ug+w \\"${DISTDIR}\\"; %s; status=\\$?; chmod a-w \\"${DISTDIR}\\"; exit \\$status"' % (BASH_BINARY, orig_fetchcommand.replace('"', '\\"'))
						settings.features.add('skiprocheck')
						settings.features.remove('distlocks')
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					finally:
						settings['FETCHCOMMAND'] = orig_fetchcommand
						os.chmod(settings['DISTDIR'], orig_distdir_mode)
						settings.features.remove('skiprocheck')
						settings.features.add('distlocks')
			finally:
				shutil.rmtree(ro_distdir)
				playground.cleanup()
Beispiel #52
0
def chk_updated_info_files(root, infodirs, prev_mtimes):

	if os.path.exists("/usr/bin/install-info"):
		out = portage.output.EOutput()
		regen_infodirs = []
		for z in infodirs:
			if z == '':
				continue
			inforoot = portage.util.normalize_path(root + z)
			if os.path.isdir(inforoot) and \
				not [x for x in os.listdir(inforoot) \
				if x.startswith('.keepinfodir')]:
					infomtime = os.stat(inforoot)[stat.ST_MTIME]
					if inforoot not in prev_mtimes or \
						prev_mtimes[inforoot] != infomtime:
							regen_infodirs.append(inforoot)

		if not regen_infodirs:
			portage.util.writemsg_stdout("\n")
			if portage.util.noiselimit >= 0:
				out.einfo("GNU info directory index is up-to-date.")
		else:
			portage.util.writemsg_stdout("\n")
			if portage.util.noiselimit >= 0:
				out.einfo("Regenerating GNU info directory index...")

			dir_extensions = ("", ".gz", ".bz2")
			icount = 0
			badcount = 0
			errmsg = ""
			for inforoot in regen_infodirs:
				if inforoot == '':
					continue

				if not os.path.isdir(inforoot) or \
					not os.access(inforoot, os.W_OK):
					continue

				file_list = os.listdir(inforoot)
				file_list.sort()
				dir_file = os.path.join(inforoot, "dir")
				moved_old_dir = False
				processed_count = 0
				for x in file_list:
					if x.startswith(".") or \
						os.path.isdir(os.path.join(inforoot, x)):
						continue
					if x.startswith("dir"):
						skip = False
						for ext in dir_extensions:
							if x == "dir" + ext or \
								x == "dir" + ext + ".old":
								skip = True
								break
						if skip:
							continue
					if processed_count == 0:
						for ext in dir_extensions:
							try:
								os.rename(dir_file + ext, dir_file + ext + ".old")
								moved_old_dir = True
							except EnvironmentError as e:
								if e.errno != errno.ENOENT:
									raise
								del e
					processed_count += 1
					try:
						proc = subprocess.Popen(
							['/usr/bin/install-info',
							'--dir-file=%s' % os.path.join(inforoot, "dir"),
							os.path.join(inforoot, x)],
							env=dict(os.environ, LANG="C", LANGUAGE="C"),
							stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
					except OSError:
						myso = None
					else:
						myso = portage._unicode_decode(
							proc.communicate()[0]).rstrip("\n")
						proc.wait()
					existsstr = "already exists, for file `"
					if myso:
						if re.search(existsstr, myso):
							# Already exists... Don't increment the count for this.
							pass
						elif myso[:44] == "install-info: warning: no info dir entry in ":
							# This info file doesn't contain a DIR-header: install-info produces this
							# (harmless) warning (the --quiet switch doesn't seem to work).
							# Don't increment the count for this.
							pass
						else:
							badcount += 1
							errmsg += myso + "\n"
					icount += 1

				if moved_old_dir and not os.path.exists(dir_file):
					# We didn't generate a new dir file, so put the old file
					# back where it was originally found.
					for ext in dir_extensions:
						try:
							os.rename(dir_file + ext + ".old", dir_file + ext)
						except EnvironmentError as e:
							if e.errno != errno.ENOENT:
								raise
							del e

				# Clean dir.old cruft so that they don't prevent
				# unmerge of otherwise empty directories.
				for ext in dir_extensions:
					try:
						os.unlink(dir_file + ext + ".old")
					except EnvironmentError as e:
						if e.errno != errno.ENOENT:
							raise
						del e

				#update mtime so we can potentially avoid regenerating.
				prev_mtimes[inforoot] = os.stat(inforoot)[stat.ST_MTIME]

			if badcount:
				out.eerror("Processed %d info files; %d errors." % \
					(icount, badcount))
				portage.util.writemsg_level(errmsg,
					level=logging.ERROR, noiselevel=-1)
			else:
				if icount > 0 and portage.util.noiselimit >= 0:
					out.einfo("Processed %d info files." % (icount,))
Beispiel #53
0
def file_archive(archive, curconf, newconf, mrgconf):
	"""Archive existing config to the archive-dir, bumping old versions
	out of the way into .# versions (log-rotate style). Then, if mrgconf
	was specified and there is a .dist version, merge the user's changes
	and the distributed changes and put the result into mrgconf.  Lastly,
	if newconf was specified, archive it as a .dist.new version (which
	gets moved to the .dist version at the end of the processing)."""

	try:
		os.makedirs(os.path.dirname(archive))
	except OSError:
		pass

	# Archive the current config file if it isn't already saved
	if (os.path.lexists(archive) and
		len(diffstatusoutput_mixed(
		"diff -aq '%s' '%s'", curconf, archive)[1]) != 0):
		suf = 1
		while suf < 9 and os.path.lexists(archive + '.' + str(suf)):
			suf += 1

		while suf > 1:
			os.rename(archive + '.' + str(suf-1), archive + '.' + str(suf))
			suf -= 1

		os.rename(archive, archive + '.1')

	try:
		curconf_st = os.lstat(curconf)
	except OSError:
		curconf_st = None

	if curconf_st is not None and \
		(stat.S_ISREG(curconf_st.st_mode) or
		stat.S_ISLNK(curconf_st.st_mode)):
		_archive_copy(curconf_st, curconf, archive)

	mystat = None
	if newconf:
		try:
			mystat = os.lstat(newconf)
		except OSError:
			pass

	if mystat is not None and \
		(stat.S_ISREG(mystat.st_mode) or
		stat.S_ISLNK(mystat.st_mode)):
		# Save off new config file in the archive dir with .dist.new suffix
		newconf_archive = archive + '.dist.new'
		_archive_copy(mystat, newconf, newconf_archive)

		ret = 0
		if mrgconf and os.path.isfile(curconf) and \
			os.path.isfile(newconf) and \
			os.path.isfile(archive + '.dist'):
			# This puts the results of the merge into mrgconf.
			ret = os.system(DIFF3_MERGE % (curconf, archive + '.dist', newconf, mrgconf))
			os.chmod(mrgconf, mystat.st_mode)
			os.chown(mrgconf, mystat.st_uid, mystat.st_gid)

		return ret
Beispiel #54
0
def file_archive_post_process(archive):
	"""Rename the archive file with the .dist.new suffix to a .dist suffix"""
	if os.path.lexists(archive + '.dist.new'):
		os.rename(archive + '.dist.new', archive + '.dist')
Beispiel #55
0
    def testSyncLocal(self):
        debug = False

        skip_reason = self._must_skip()
        if skip_reason:
            self.portage_skip = skip_reason
            self.assertFalse(True, skip_reason)
            return

        repos_conf = textwrap.dedent("""
			[DEFAULT]
			%(default_keys)s
			[test_repo]
			location = %(EPREFIX)s/var/repositories/test_repo
			sync-type = %(sync-type)s
			sync-depth = %(sync-depth)s
			sync-uri = file://%(EPREFIX)s/var/repositories/test_repo_sync
			sync-rcu = %(sync-rcu)s
			sync-rcu-store-dir = %(EPREFIX)s/var/repositories/test_repo_rcu_storedir
			auto-sync = %(auto-sync)s
			%(repo_extra_keys)s
		""")

        profile = {
            "eapi": ("5", ),
            "package.use.stable.mask": ("dev-libs/A flag", )
        }

        ebuilds = {"dev-libs/A-0": {}}

        user_config = {'make.conf': ('FEATURES="metadata-transfer"', )}

        playground = ResolverPlayground(ebuilds=ebuilds,
                                        profile=profile,
                                        user_config=user_config,
                                        debug=debug)
        settings = playground.settings
        eprefix = settings["EPREFIX"]
        eroot = settings["EROOT"]
        homedir = os.path.join(eroot, "home")
        distdir = os.path.join(eprefix, "distdir")
        repo = settings.repositories["test_repo"]
        metadata_dir = os.path.join(repo.location, "metadata")

        cmds = {}
        for cmd in ("emerge", "emaint"):
            for bindir in (self.bindir, self.sbindir):
                path = os.path.join(bindir, cmd)
                if os.path.exists(path):
                    cmds[cmd] = (portage._python_interpreter, "-b", "-Wd",
                                 path)
                    break
            else:
                raise AssertionError('%s binary not found in %s or %s' %
                                     (cmd, self.bindir, self.sbindir))

        git_binary = find_binary("git")
        git_cmd = (git_binary, )

        committer_name = "Gentoo Dev"
        committer_email = "*****@*****.**"

        def repos_set_conf(sync_type,
                           dflt_keys=None,
                           xtra_keys=None,
                           auto_sync="yes",
                           sync_rcu=False,
                           sync_depth=None):
            env["PORTAGE_REPOSITORIES"] = repos_conf % {\
             "EPREFIX": eprefix, "sync-type": sync_type,
             "sync-depth": 0 if sync_depth is None else sync_depth,
             "sync-rcu": "yes" if sync_rcu else "no",
             "auto-sync": auto_sync,
             "default_keys": "" if dflt_keys is None else dflt_keys,
             "repo_extra_keys": "" if xtra_keys is None else xtra_keys}

        def alter_ebuild():
            with open(
                    os.path.join(repo.location + "_sync", "dev-libs", "A",
                                 "A-0.ebuild"), "a") as f:
                f.write("\n")
            bump_timestamp()

        def bump_timestamp():
            bump_timestamp.timestamp += datetime.timedelta(seconds=1)
            with open(
                    os.path.join(repo.location + '_sync', 'metadata',
                                 'timestamp.chk'), 'w') as f:
                f.write(
                    bump_timestamp.timestamp.strftime(
                        '%s\n' % TIMESTAMP_FORMAT, ))

        bump_timestamp.timestamp = datetime.datetime.utcnow()

        bump_timestamp_cmds = ((homedir, bump_timestamp), )

        sync_cmds = (
            (homedir, cmds["emerge"] + ("--sync", )),
            (homedir, lambda: self.assertTrue(
                os.path.exists(os.path.join(repo.location, "dev-libs", "A")),
                "dev-libs/A expected, but missing")),
            (homedir, cmds["emaint"] + ("sync", "-A")),
        )

        sync_cmds_auto_sync = (
            (homedir, lambda: repos_set_conf("rsync", auto_sync="no")),
            (homedir, cmds["emerge"] + ("--sync", )),
            (homedir, lambda: self.assertFalse(
                os.path.exists(os.path.join(repo.location, "dev-libs", "A")),
                "dev-libs/A found, expected missing")),
            (homedir, lambda: repos_set_conf("rsync", auto_sync="yes")),
        )

        rename_repo = (
            (homedir,
             lambda: os.rename(repo.location, repo.location + "_sync")), )

        rsync_opts_repos = (
            (homedir, alter_ebuild),
            (homedir, lambda: repos_set_conf(
                "rsync", None,
                "sync-rsync-extra-opts = --backup --backup-dir=%s" %
                _shell_quote(repo.location + "_back"))),
            (homedir, cmds['emerge'] + ("--sync", )),
            (homedir,
             lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
            (homedir, lambda: shutil.rmtree(repo.location + "_back")),
            (homedir, lambda: repos_set_conf("rsync")),
        )

        rsync_opts_repos_default = (
            (homedir, alter_ebuild),
            (homedir, lambda: repos_set_conf(
                "rsync", "sync-rsync-extra-opts = --backup --backup-dir=%s" %
                _shell_quote(repo.location + "_back"))),
            (homedir, cmds['emerge'] + ("--sync", )),
            (homedir,
             lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
            (homedir, lambda: shutil.rmtree(repo.location + "_back")),
            (homedir, lambda: repos_set_conf("rsync")),
        )

        rsync_opts_repos_default_ovr = (
            (homedir, alter_ebuild),
            (homedir, lambda: repos_set_conf(
                "rsync", "sync-rsync-extra-opts = --backup --backup-dir=%s" %
                _shell_quote(repo.location + "_back_nowhere"),
                "sync-rsync-extra-opts = --backup --backup-dir=%s" %
                _shell_quote(repo.location + "_back"))),
            (homedir, cmds['emerge'] + ("--sync", )),
            (homedir,
             lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
            (homedir, lambda: shutil.rmtree(repo.location + "_back")),
            (homedir, lambda: repos_set_conf("rsync")),
        )

        rsync_opts_repos_default_cancel = (
            (homedir, alter_ebuild),
            (homedir, lambda: repos_set_conf(
                "rsync", "sync-rsync-extra-opts = --backup --backup-dir=%s" %
                _shell_quote(repo.location + "_back_nowhere"
                             ), "sync-rsync-extra-opts = ")),
            (homedir, cmds['emerge'] + ("--sync", )),
            (homedir,
             lambda: self.assertFalse(os.path.exists(repo.location + "_back"))
             ),
            (homedir, lambda: repos_set_conf("rsync")),
        )

        delete_repo_location = (
            (homedir, lambda: shutil.rmtree(repo.user_location)),
            (homedir, lambda: os.mkdir(repo.user_location)),
        )

        revert_rcu_layout = (
            (homedir,
             lambda: os.rename(repo.user_location, repo.user_location + '.bak')
             ),
            (homedir,
             lambda: os.rename(os.path.realpath(repo.user_location + '.bak'),
                               repo.user_location)),
            (homedir, lambda: os.unlink(repo.user_location + '.bak')),
            (homedir,
             lambda: shutil.rmtree(repo.user_location + '_rcu_storedir')),
        )

        upstream_git_commit = (
            (
                repo.location + "_sync",
                git_cmd +
                ('commit', '--allow-empty', '-m', 'test empty commit'),
            ),
            (
                repo.location + "_sync",
                git_cmd +
                ('commit', '--allow-empty', '-m', 'test empty commit 2'),
            ),
        )

        delete_sync_repo = ((homedir,
                             lambda: shutil.rmtree(repo.location + "_sync")), )

        git_repo_create = (
            (repo.location, git_cmd + (
                "config",
                "--global",
                "user.name",
                committer_name,
            )),
            (repo.location, git_cmd + (
                "config",
                "--global",
                "user.email",
                committer_email,
            )),
            (repo.location, git_cmd + ("init-db", )),
            (repo.location, git_cmd + ("add", ".")),
            (repo.location,
             git_cmd + ("commit", "-a", "-m", "add whole repo")),
        )

        sync_type_git = ((homedir, lambda: repos_set_conf("git")), )

        sync_type_git_shallow = ((
            homedir, lambda: repos_set_conf("git", sync_depth=1)), )

        sync_rsync_rcu = ((homedir,
                           lambda: repos_set_conf("rsync", sync_rcu=True)), )

        pythonpath = os.environ.get("PYTHONPATH")
        if pythonpath is not None and not pythonpath.strip():
            pythonpath = None
        if pythonpath is not None and \
         pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
            pass
        else:
            if pythonpath is None:
                pythonpath = ""
            else:
                pythonpath = ":" + pythonpath
            pythonpath = PORTAGE_PYM_PATH + pythonpath

        env = {
            "PORTAGE_OVERRIDE_EPREFIX":
            eprefix,
            "DISTDIR":
            distdir,
            "GENTOO_COMMITTER_NAME":
            committer_name,
            "GENTOO_COMMITTER_EMAIL":
            committer_email,
            "HOME":
            homedir,
            "PATH":
            os.environ["PATH"],
            "PORTAGE_GRPNAME":
            os.environ["PORTAGE_GRPNAME"],
            "PORTAGE_USERNAME":
            os.environ["PORTAGE_USERNAME"],
            "PYTHONDONTWRITEBYTECODE":
            os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
            "PYTHONPATH":
            pythonpath,
        }
        repos_set_conf("rsync")

        if os.environ.get("SANDBOX_ON") == "1":
            # avoid problems from nested sandbox instances
            env["FEATURES"] = "-sandbox -usersandbox"

        dirs = [homedir, metadata_dir]
        try:
            for d in dirs:
                ensure_dirs(d)

            timestamp_path = os.path.join(metadata_dir, 'timestamp.chk')
            with open(timestamp_path, 'w') as f:
                f.write(
                    bump_timestamp.timestamp.strftime(
                        '%s\n' % TIMESTAMP_FORMAT, ))

            if debug:
                # The subprocess inherits both stdout and stderr, for
                # debugging purposes.
                stdout = None
            else:
                # The subprocess inherits stderr so that any warnings
                # triggered by python -Wd will be visible.
                stdout = subprocess.PIPE

            for cwd, cmd in rename_repo + sync_cmds_auto_sync + sync_cmds + \
             rsync_opts_repos + rsync_opts_repos_default + \
             rsync_opts_repos_default_ovr + rsync_opts_repos_default_cancel + \
             bump_timestamp_cmds + sync_rsync_rcu + sync_cmds + revert_rcu_layout + \
             delete_repo_location + sync_cmds + sync_cmds + \
             bump_timestamp_cmds + sync_cmds + revert_rcu_layout + \
             delete_sync_repo + git_repo_create + sync_type_git + \
             rename_repo + sync_cmds + upstream_git_commit + sync_cmds + \
             sync_type_git_shallow + upstream_git_commit + sync_cmds:

                if hasattr(cmd, '__call__'):
                    cmd()
                    continue

                abs_cwd = os.path.join(repo.location, cwd)
                proc = subprocess.Popen(cmd,
                                        cwd=abs_cwd,
                                        env=env,
                                        stdout=stdout)

                if debug:
                    proc.wait()
                else:
                    output = proc.stdout.readlines()
                    proc.wait()
                    proc.stdout.close()
                    if proc.returncode != os.EX_OK:
                        for line in output:
                            sys.stderr.write(_unicode_decode(line))

                self.assertEqual(os.EX_OK, proc.returncode,
                                 "%s failed in %s" % (
                                     cmd,
                                     cwd,
                                 ))

        finally:
            playground.cleanup()
Beispiel #56
0
	def testEbuildFetch(self):

		distfiles = {
			'bar': b'bar\n',
			'foo': b'foo\n',
		}

		ebuilds = {
			'dev-libs/A-1': {
				'EAPI': '7',
				'RESTRICT': 'primaryuri',
				'SRC_URI': '''{scheme}://{host}:{port}/distfiles/bar.txt -> bar
					{scheme}://{host}:{port}/distfiles/foo.txt -> foo''',
			},
		}

		loop = SchedulerInterface(global_event_loop())
		scheme = 'http'
		host = '127.0.0.1'
		content = {}
		for k, v in distfiles.items():
			content['/distfiles/{}.txt'.format(k)] = v

		with AsyncHTTPServer(host, content, loop) as server:
			ebuilds_subst = {}
			for cpv, metadata in ebuilds.items():
				metadata = metadata.copy()
				metadata['SRC_URI'] = metadata['SRC_URI'].format(
					scheme=scheme, host=host, port=server.server_port)
				ebuilds_subst[cpv] = metadata

			playground = ResolverPlayground(ebuilds=ebuilds_subst, distfiles=distfiles)
			ro_distdir = tempfile.mkdtemp()
			try:
				fetchcommand = portage.util.shlex_split(playground.settings['FETCHCOMMAND'])
				fetch_bin = portage.process.find_binary(fetchcommand[0])
				if fetch_bin is None:
					self.skipTest('FETCHCOMMAND not found: {}'.format(playground.settings['FETCHCOMMAND']))
				resumecommand = portage.util.shlex_split(playground.settings['RESUMECOMMAND'])
				resume_bin = portage.process.find_binary(resumecommand[0])
				if resume_bin is None:
					self.skipTest('RESUMECOMMAND not found: {}'.format(playground.settings['RESUMECOMMAND']))
				root_config = playground.trees[playground.eroot]['root_config']
				portdb = root_config.trees["porttree"].dbapi
				settings = config(clone=playground.settings)

				# Tests only work with one ebuild at a time, so the config
				# pool only needs a single config instance.
				class config_pool:
					@staticmethod
					def allocate():
						return settings
					@staticmethod
					def deallocate(settings):
						pass

				def async_fetch(pkg, ebuild_path):
					fetcher = EbuildFetcher(config_pool=config_pool, ebuild_path=ebuild_path,
						fetchonly=False, fetchall=True, pkg=pkg, scheduler=loop)
					fetcher.start()
					return fetcher.async_wait()

				for cpv in ebuilds:
					metadata = dict(zip(Package.metadata_keys,
						portdb.aux_get(cpv, Package.metadata_keys)))

					pkg = Package(built=False, cpv=cpv, installed=False,
						metadata=metadata, root_config=root_config,
						type_name='ebuild')

					settings.setcpv(pkg)
					ebuild_path = portdb.findname(pkg.cpv)
					portage.doebuild_environment(ebuild_path, 'fetch', settings=settings, db=portdb)

					# Test good files in DISTDIR
					for k in settings['AA'].split():
						os.stat(os.path.join(settings['DISTDIR'], k))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test digestgen with fetch
					os.unlink(os.path.join(os.path.dirname(ebuild_path), 'Manifest'))
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					with ForkExecutor(loop=loop) as executor:
						self.assertTrue(bool(loop.run_until_complete(
							loop.run_in_executor(executor, functools.partial(
								digestgen, mysettings=settings, myportdb=portdb)))))
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test missing files in DISTDIR
					for k in settings['AA'].split():
						os.unlink(os.path.join(settings['DISTDIR'], k))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test empty files in DISTDIR
					for k in settings['AA'].split():
						file_path = os.path.join(settings['DISTDIR'], k)
						with open(file_path, 'wb') as f:
							pass
						self.assertEqual(os.stat(file_path).st_size, 0)
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test non-empty files containing null bytes in DISTDIR
					for k in settings['AA'].split():
						file_path = os.path.join(settings['DISTDIR'], k)
						with open(file_path, 'wb') as f:
							f.write(len(distfiles[k]) * b'\0')
						self.assertEqual(os.stat(file_path).st_size, len(distfiles[k]))
					self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
					for k in settings['AA'].split():
						with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
							self.assertEqual(f.read(), distfiles[k])

					# Test PORTAGE_RO_DISTDIRS
					settings['PORTAGE_RO_DISTDIRS'] = '"{}"'.format(ro_distdir)
					orig_fetchcommand = settings['FETCHCOMMAND']
					orig_resumecommand = settings['RESUMECOMMAND']
					try:
						settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = ''
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							os.rename(file_path, os.path.join(ro_distdir, k))
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							self.assertTrue(os.path.islink(file_path))
							with open(file_path, 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
							os.unlink(file_path)
					finally:
						settings.pop('PORTAGE_RO_DISTDIRS')
						settings['FETCHCOMMAND'] = orig_fetchcommand
						settings['RESUMECOMMAND'] = orig_resumecommand

					# Test local filesystem in GENTOO_MIRRORS
					orig_mirrors = settings['GENTOO_MIRRORS']
					orig_fetchcommand = settings['FETCHCOMMAND']
					try:
						settings['GENTOO_MIRRORS'] = ro_distdir
						settings['FETCHCOMMAND'] = settings['RESUMECOMMAND'] = ''
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings['GENTOO_MIRRORS'] = orig_mirrors
						settings['FETCHCOMMAND'] = orig_fetchcommand
						settings['RESUMECOMMAND'] = orig_resumecommand

					# Test readonly DISTDIR
					orig_distdir_mode = os.stat(settings['DISTDIR']).st_mode
					try:
						os.chmod(settings['DISTDIR'], 0o555)
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						os.chmod(settings['DISTDIR'], orig_distdir_mode)

					# Test parallel-fetch mode
					settings['PORTAGE_PARALLEL_FETCHONLY'] = '1'
					try:
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
						for k in settings['AA'].split():
							os.unlink(os.path.join(settings['DISTDIR'], k))
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings.pop('PORTAGE_PARALLEL_FETCHONLY')

					# Test RESUMECOMMAND
					orig_resume_min_size = settings['PORTAGE_FETCH_RESUME_MIN_SIZE']
					try:
						settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = '2'
						for k in settings['AA'].split():
							file_path = os.path.join(settings['DISTDIR'], k)
							os.unlink(file_path)
							with open(file_path + _download_suffix, 'wb') as f:
								f.write(distfiles[k][:2])
						self.assertEqual(loop.run_until_complete(async_fetch(pkg, ebuild_path)), 0)
						for k in settings['AA'].split():
							with open(os.path.join(settings['DISTDIR'], k), 'rb') as f:
								self.assertEqual(f.read(), distfiles[k])
					finally:
						settings['PORTAGE_FETCH_RESUME_MIN_SIZE'] = orig_resume_min_size
			finally:
				shutil.rmtree(ro_distdir)
				playground.cleanup()