Example #1
0
    def _set_returncode(self, wait_retval):
        SpawnProcess._set_returncode(self, wait_retval)
        if not self.pretend and self.returncode == os.EX_OK:
            # If possible, update the mtime to match the remote package if
            # the fetcher didn't already do it automatically.
            bintree = self.pkg.root_config.trees["bintree"]
            if bintree._remote_has_index:
                remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
                if remote_mtime is not None:
                    try:
                        remote_mtime = long(remote_mtime)
                    except ValueError:
                        pass
                    else:
                        try:
                            local_mtime = os.stat(self.pkg_path)[stat.ST_MTIME]
                        except OSError:
                            pass
                        else:
                            if remote_mtime != local_mtime:
                                try:
                                    os.utime(self.pkg_path,
                                             (remote_mtime, remote_mtime))
                                except OSError:
                                    pass

        if self.locked:
            self.unlock()
Example #2
0
	def _apply_max_mtime(self, existing_st, entries):
		"""
		Set the Manifest mtime to the max mtime of all relevant files
		(the existing Manifest mtime is included in order to account for
		eclass modifications that change DIST entries). This results in a
		stable/predictable mtime, which is useful when converting thin
		manifests to thick manifests for distribution via rsync. For
		portability, the mtime is set with 1 second resolution.

		@param existing_st: stat result for existing Manifest
		@type existing_st: posix.stat_result
		@param entries: list of current Manifest2Entry instances
		@type entries: list
		"""
		# Use stat_result[stat.ST_MTIME] for 1 second resolution, since
		# it always rounds down. Note that stat_result.st_mtime will round
		# up from 0.999999999 to 1.0 when precision is lost during conversion
		# from nanosecond resolution to float.
		max_mtime = None if existing_st is None else existing_st[stat.ST_MTIME]
		for entry in entries:
			if entry.type == 'DIST':
				continue
			abs_path = (os.path.join(self.pkgdir, 'files', entry.name) if
				entry.type == 'AUX' else os.path.join(self.pkgdir, entry.name))
			mtime = os.stat(abs_path)[stat.ST_MTIME]
			if max_mtime is None or mtime > max_mtime:
				max_mtime = mtime

		if max_mtime is not None:
			os.utime(self.getFullname(), (max_mtime, max_mtime))
	def _set_returncode(self, wait_retval):
		SpawnProcess._set_returncode(self, wait_retval)
		if self.returncode == os.EX_OK:
			# If possible, update the mtime to match the remote package if
			# the fetcher didn't already do it automatically.
			bintree = self.pkg.root_config.trees["bintree"]
			if bintree._remote_has_index:
				remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
				if remote_mtime is not None:
					try:
						remote_mtime = long(remote_mtime)
					except ValueError:
						pass
					else:
						try:
							local_mtime = os.stat(self.pkg_path)[stat.ST_MTIME]
						except OSError:
							pass
						else:
							if remote_mtime != local_mtime:
								try:
									os.utime(self.pkg_path,
										(remote_mtime, remote_mtime))
								except OSError:
									pass

		if self.locked:
			self.unlock()
Example #4
0
	def sync_timestamp(self):
			# If possible, update the mtime to match the remote package if
			# the fetcher didn't already do it automatically.
			bintree = self.pkg.root_config.trees["bintree"]
			if bintree._remote_has_index:
				remote_mtime = bintree._remotepkgs[
					bintree.dbapi._instance_key(
					self.pkg.cpv)].get("_mtime_")
				if remote_mtime is not None:
					try:
						remote_mtime = long(remote_mtime)
					except ValueError:
						pass
					else:
						try:
							local_mtime = os.stat(self.pkg_path)[stat.ST_MTIME]
						except OSError:
							pass
						else:
							if remote_mtime != local_mtime:
								try:
									os.utime(self.pkg_path,
										(remote_mtime, remote_mtime))
								except OSError:
									pass
Example #5
0
	def sync_timestamp(self):
			# If possible, update the mtime to match the remote package if
			# the fetcher didn't already do it automatically.
			bintree = self.pkg.root_config.trees["bintree"]
			if bintree._remote_has_index:
				remote_mtime = bintree._remotepkgs[
					bintree.dbapi._instance_key(
					self.pkg.cpv)].get("_mtime_")
				if remote_mtime is not None:
					try:
						remote_mtime = long(remote_mtime)
					except ValueError:
						pass
					else:
						try:
							local_mtime = os.stat(self.pkg_path)[stat.ST_MTIME]
						except OSError:
							pass
						else:
							if remote_mtime != local_mtime:
								try:
									os.utime(self.pkg_path,
										(remote_mtime, remote_mtime))
								except OSError:
									pass
Example #6
0
    def _apply_max_mtime(self, preserved_stats, entries):
        """
		Set the Manifest mtime to the max mtime of all relevant files
		and directories. Directory mtimes account for file renames and
		removals. The existing Manifest mtime accounts for eclass
		modifications that change DIST entries. This results in a
		stable/predictable mtime, which is useful when converting thin
		manifests to thick manifests for distribution via rsync. For
		portability, the mtime is set with 1 second resolution.

		@param preserved_stats: maps paths to preserved stat results
			that should be used instead of os.stat() calls
		@type preserved_stats: dict
		@param entries: list of current Manifest2Entry instances
		@type entries: list
		"""
        # Use stat_result[stat.ST_MTIME] for 1 second resolution, since
        # it always rounds down. Note that stat_result.st_mtime will round
        # up from 0.999999999 to 1.0 when precision is lost during conversion
        # from nanosecond resolution to float.
        max_mtime = None
        _update_max = (lambda st: max_mtime if max_mtime is not None and
                       max_mtime > st[stat.ST_MTIME] else st[stat.ST_MTIME])
        _stat = (lambda path: preserved_stats[path]
                 if path in preserved_stats else os.stat(path))

        for stat_result in preserved_stats.values():
            max_mtime = _update_max(stat_result)

        for entry in entries:
            if entry.type == 'DIST':
                continue
            abs_path = (os.path.join(self.pkgdir, 'files', entry.name)
                        if entry.type == 'AUX' else os.path.join(
                            self.pkgdir, entry.name))
            max_mtime = _update_max(_stat(abs_path))

        if not self.thin:
            # Account for changes to all relevant nested directories.
            # This is not necessary for thin manifests because
            # self.pkgdir is already included via preserved_stats.
            for parent_dir, dirs, files in os.walk(self.pkgdir.rstrip(os.sep)):
                try:
                    parent_dir = _unicode_decode(parent_dir,
                                                 encoding=_encodings['fs'],
                                                 errors='strict')
                except UnicodeDecodeError:
                    # If an absolute path cannot be decoded, then it is
                    # always excluded from the manifest (repoman will
                    # report such problems).
                    pass
                else:
                    max_mtime = _update_max(_stat(parent_dir))

        if max_mtime is not None:
            for path in preserved_stats:
                os.utime(path, (max_mtime, max_mtime))
Example #7
0
	def _apply_max_mtime(self, preserved_stats, entries):
		"""
		Set the Manifest mtime to the max mtime of all relevant files
		and directories. Directory mtimes account for file renames and
		removals. The existing Manifest mtime accounts for eclass
		modifications that change DIST entries. This results in a
		stable/predictable mtime, which is useful when converting thin
		manifests to thick manifests for distribution via rsync. For
		portability, the mtime is set with 1 second resolution.

		@param preserved_stats: maps paths to preserved stat results
			that should be used instead of os.stat() calls
		@type preserved_stats: dict
		@param entries: list of current Manifest2Entry instances
		@type entries: list
		"""
		# Use stat_result[stat.ST_MTIME] for 1 second resolution, since
		# it always rounds down. Note that stat_result.st_mtime will round
		# up from 0.999999999 to 1.0 when precision is lost during conversion
		# from nanosecond resolution to float.
		max_mtime = None
		_update_max = (lambda st: max_mtime if max_mtime is not None
			and max_mtime > st[stat.ST_MTIME] else st[stat.ST_MTIME])
		_stat = (lambda path: preserved_stats[path] if path in preserved_stats
			else os.stat(path))

		for stat_result in preserved_stats.values():
			max_mtime = _update_max(stat_result)

		for entry in entries:
			if entry.type == 'DIST':
				continue
			abs_path = (os.path.join(self.pkgdir, 'files', entry.name) if
				entry.type == 'AUX' else os.path.join(self.pkgdir, entry.name))
			max_mtime = _update_max(_stat(abs_path))

		if not self.thin:
			# Account for changes to all relevant nested directories.
			# This is not necessary for thin manifests because
			# self.pkgdir is already included via preserved_stats.
			for parent_dir, dirs, files in os.walk(self.pkgdir.rstrip(os.sep)):
				try:
					parent_dir = _unicode_decode(parent_dir,
						encoding=_encodings['fs'], errors='strict')
				except UnicodeDecodeError:
					# If an absolute path cannot be decoded, then it is
					# always excluded from the manifest (repoman will
					# report such problems).
					pass
				else:
					max_mtime = _update_max(_stat(parent_dir))

		if max_mtime is not None:
			for path in preserved_stats:
				os.utime(path, (max_mtime, max_mtime))
Example #8
0
	def _ensure_access(self, path, mtime=-1):
		"""returns true or false if it's able to ensure that path is properly chmod'd and chowned.
		if mtime is specified, attempts to ensure that's correct also"""
		try:
			apply_permissions(path, gid=self._gid, mode=self._perms)
			if mtime != -1:
				mtime=long(mtime)
				os.utime(path, (mtime, mtime))
		except (PortageException, EnvironmentError):
			return False
		return True
Example #9
0
    def _fs_mirror_copier_exit(self, copier):

        self._assert_current(copier)
        if self._was_cancelled():
            self.wait()
            return

        current_mirror = self._current_mirror
        if copier.returncode != os.EX_OK:
            msg = "%s %s copy failed unexpectedly: %s" % (
                self.distfile,
                current_mirror.name,
                copier.future.exception(),
            )
            self.scheduler.output(msg + "\n",
                                  background=True,
                                  log_path=self._log_path)
            logging.error(msg)
        else:

            logging.debug("copy '%s' from %s to distfiles" %
                          (self.distfile, current_mirror.name))

            # Apply the timestamp from the source file, but
            # just rely on umask for permissions.
            try:
                os.utime(
                    copier.dest_path,
                    ns=(self._current_stat.st_mtime_ns,
                        self._current_stat.st_mtime_ns),
                )
            except OSError as e:
                msg = "%s %s utime failed unexpectedly: %s" % (
                    self.distfile,
                    current_mirror.name,
                    e,
                )
                self.scheduler.output(msg + "\n",
                                      background=True,
                                      log_path=self._log_path)
                logging.error(msg)

            self._success()
            self.returncode = os.EX_OK
            self.wait()
            return

        self._try_next_mirror()
Example #10
0
	def _apply_max_mtime(self, preserved_stats, entries):
		"""
		Set the Manifest mtime to the max mtime of all relevant files
		and directories. Directory mtimes account for file renames and
		removals. The existing Manifest mtime accounts for eclass
		modifications that change DIST entries. This results in a
		stable/predictable mtime, which is useful when converting thin
		manifests to thick manifests for distribution via rsync. For
		portability, the mtime is set with 1 second resolution.

		@param existing_st: stat result for existing Manifest
		@type existing_st: posix.stat_result
		@param preserved_stats: maps paths to preserved stat results
			that should be used instead of os.stat() calls
		@type preserved_stats: dict
		@param entries: list of current Manifest2Entry instances
		@type entries: list
		"""
		# Use stat_result[stat.ST_MTIME] for 1 second resolution, since
		# it always rounds down. Note that stat_result.st_mtime will round
		# up from 0.999999999 to 1.0 when precision is lost during conversion
		# from nanosecond resolution to float.
		max_mtime = None
		_update_max = (lambda st: max_mtime if max_mtime is not None
			and max_mtime > st[stat.ST_MTIME] else st[stat.ST_MTIME])
		_stat = (lambda path: preserved_stats[path] if path in preserved_stats
			else os.stat(path))

		for stat_result in preserved_stats.values():
			max_mtime = _update_max(stat_result)

		dirs = set()
		for entry in entries:
			if entry.type == 'DIST':
				continue
			abs_path = (os.path.join(self.pkgdir, 'files', entry.name) if
				entry.type == 'AUX' else os.path.join(self.pkgdir, entry.name))
			max_mtime = _update_max(_stat(abs_path))

			parent_dir = os.path.dirname(abs_path)
			if parent_dir not in dirs:
				dirs.add(parent_dir)
				max_mtime = _update_max(_stat(parent_dir))

		if max_mtime is not None:
			for path in preserved_stats:
				os.utime(path, (max_mtime, max_mtime))
Example #11
0
	def _fs_mirror_copier_exit(self, copier):

		self._assert_current(copier)
		if self._was_cancelled():
			self.wait()
			return

		current_mirror = self._current_mirror
		if copier.returncode != os.EX_OK:
			msg = "%s %s copy failed unexpectedly" % \
				(self.distfile, current_mirror.name)
			self.scheduler.output(msg + '\n', background=True,
				log_path=self._log_path)
			logging.error(msg)
		else:

			logging.debug("copy '%s' from %s to distfiles" %
				(self.distfile, current_mirror.name))

			# Apply the timestamp from the source file, but
			# just rely on umask for permissions.
			try:
				if sys.hexversion >= 0x3030000:
					os.utime(copier.dest_path,
						ns=(self._current_stat.st_mtime_ns,
						self._current_stat.st_mtime_ns))
				else:
					os.utime(copier.dest_path,
						(self._current_stat[stat.ST_MTIME],
						self._current_stat[stat.ST_MTIME]))
			except OSError as e:
				msg = "%s %s utime failed unexpectedly: %s" % \
					(self.distfile, current_mirror.name, e)
				self.scheduler.output(msg + '\n', background=True,
					log_path=self._log_path)
				logging.error(msg)

			self._success()
			self.returncode = os.EX_OK
			self.wait()
			return

		self._try_next_mirror()
Example #12
0
	def _pkgindex_write(self, pkgindex):
		contents = codecs.getwriter(_encodings['repo.content'])(io.BytesIO())
		pkgindex.write(contents)
		contents = contents.getvalue()
		atime = mtime = long(pkgindex.header["TIMESTAMP"])
		output_files = [(atomic_ofstream(self._pkgindex_file, mode="wb"),
			self._pkgindex_file, None)]

		if "compress-index" in self.settings.features:
			gz_fname = self._pkgindex_file + ".gz"
			fileobj = atomic_ofstream(gz_fname, mode="wb")
			output_files.append((GzipFile(filename='', mode="wb",
				fileobj=fileobj, mtime=mtime), gz_fname, fileobj))

		for f, fname, f_close in output_files:
			f.write(contents)
			f.close()
			if f_close is not None:
				f_close.close()
			# some seconds might have elapsed since TIMESTAMP
			os.utime(fname, (atime, mtime))
Example #13
0
	def _pkgindex_write(self, pkgindex):
		contents = codecs.getwriter(_encodings['repo.content'])(io.BytesIO())
		pkgindex.write(contents)
		contents = contents.getvalue()
		atime = mtime = long(pkgindex.header["TIMESTAMP"])
		output_files = [(atomic_ofstream(self._pkgindex_file, mode="wb"),
			self._pkgindex_file, None)]

		if "compress-index" in self.settings.features:
			gz_fname = self._pkgindex_file + ".gz"
			fileobj = atomic_ofstream(gz_fname, mode="wb")
			output_files.append((GzipFile(filename='', mode="wb",
				fileobj=fileobj, mtime=mtime), gz_fname, fileobj))

		for f, fname, f_close in output_files:
			f.write(contents)
			f.close()
			if f_close is not None:
				f_close.close()
			# some seconds might have elapsed since TIMESTAMP
			os.utime(fname, (atime, mtime))
Example #14
0
    def _ebuild_exit(self, ebuild_process):

        if self._ebuild_lock is not None:
            self._ebuild_lock.unlock()
            self._ebuild_lock = None

        fail = False
        if self._default_exit(ebuild_process) != os.EX_OK:
            if self.phase == "test" and \
             "test-fail-continue" in self.settings.features:
                # mark test phase as complete (bug #452030)
                try:
                    open(
                        _unicode_encode(os.path.join(
                            self.settings["PORTAGE_BUILDDIR"], ".tested"),
                                        encoding=_encodings['fs'],
                                        errors='strict'), 'wb').close()
                except OSError:
                    pass
            else:
                fail = True

        if not fail:
            self.returncode = None

        logfile = self._get_log_path()

        if self.phase == "install":
            out = io.StringIO()
            _check_build_log(self.settings, out=out)
            msg = out.getvalue()
            self.scheduler.output(msg, log_path=logfile)

        if fail:
            self._die_hooks()
            return

        settings = self.settings
        _post_phase_userpriv_perms(settings)

        if self.phase == "unpack":
            # Bump WORKDIR timestamp, in case tar gave it a timestamp
            # that will interfere with distfiles / WORKDIR timestamp
            # comparisons as reported in bug #332217. Also, fix
            # ownership since tar can change that too.
            os.utime(settings["WORKDIR"], None)
            _prepare_workdir(settings)
        elif self.phase == "install":
            out = io.StringIO()
            _post_src_install_write_metadata(settings)
            _post_src_install_uid_fix(settings, out)
            msg = out.getvalue()
            if msg:
                self.scheduler.output(msg, log_path=logfile)
        elif self.phase == "preinst":
            _preinst_bsdflags(settings)
        elif self.phase == "postinst":
            _postinst_bsdflags(settings)

        post_phase_cmds = _post_phase_cmds.get(self.phase)
        if post_phase_cmds is not None:
            if logfile is not None and self.phase in ("install", ):
                # Log to a temporary file, since the code we are running
                # reads PORTAGE_LOG_FILE for QA checks, and we want to
                # avoid annoying "gzip: unexpected end of file" messages
                # when FEATURES=compress-build-logs is enabled.
                fd, logfile = tempfile.mkstemp()
                os.close(fd)
            post_phase = MiscFunctionsProcess(background=self.background,
                                              commands=post_phase_cmds,
                                              fd_pipes=self.fd_pipes,
                                              logfile=logfile,
                                              phase=self.phase,
                                              scheduler=self.scheduler,
                                              settings=settings)
            self._start_task(post_phase, self._post_phase_exit)
            return

        # this point is not reachable if there was a failure and
        # we returned for die_hooks above, so returncode must
        # indicate success (especially if ebuild_process.returncode
        # is unsuccessful and test-fail-continue came into play)
        self.returncode = os.EX_OK
        self._current_task = None
        self.wait()
Example #15
0
	def _ebuild_exit(self, ebuild_process):

		if self._ebuild_lock is not None:
			self._ebuild_lock.unlock()
			self._ebuild_lock = None

		fail = False
		if self._default_exit(ebuild_process) != os.EX_OK:
			if self.phase == "test" and \
				"test-fail-continue" in self.settings.features:
				# mark test phase as complete (bug #452030)
				try:
					open(_unicode_encode(os.path.join(
						self.settings["PORTAGE_BUILDDIR"], ".tested"),
						encoding=_encodings['fs'], errors='strict'),
						'wb').close()
				except OSError:
					pass
			else:
				fail = True

		if not fail:
			self.returncode = None

		logfile = self._get_log_path()

		if self.phase == "install":
			out = io.StringIO()
			_check_build_log(self.settings, out=out)
			msg = out.getvalue()
			self.scheduler.output(msg, log_path=logfile)

		if fail:
			self._die_hooks()
			return

		settings = self.settings
		_post_phase_userpriv_perms(settings)

		if self.phase == "unpack":
			# Bump WORKDIR timestamp, in case tar gave it a timestamp
			# that will interfere with distfiles / WORKDIR timestamp
			# comparisons as reported in bug #332217. Also, fix
			# ownership since tar can change that too.
			os.utime(settings["WORKDIR"], None)
			_prepare_workdir(settings)
		elif self.phase == "install":
			out = io.StringIO()
			_post_src_install_write_metadata(settings)
			_post_src_install_uid_fix(settings, out)
			msg = out.getvalue()
			if msg:
				self.scheduler.output(msg, log_path=logfile)
		elif self.phase == "preinst":
			_preinst_bsdflags(settings)
		elif self.phase == "postinst":
			_postinst_bsdflags(settings)

		post_phase_cmds = _post_phase_cmds.get(self.phase)
		if post_phase_cmds is not None:
			if logfile is not None and self.phase in ("install",):
				# Log to a temporary file, since the code we are running
				# reads PORTAGE_LOG_FILE for QA checks, and we want to
				# avoid annoying "gzip: unexpected end of file" messages
				# when FEATURES=compress-build-logs is enabled.
				fd, logfile = tempfile.mkstemp()
				os.close(fd)
			post_phase = MiscFunctionsProcess(background=self.background,
				commands=post_phase_cmds, fd_pipes=self.fd_pipes,
				logfile=logfile, phase=self.phase, scheduler=self.scheduler,
				settings=settings)
			self._start_task(post_phase, self._post_phase_exit)
			return

		# this point is not reachable if there was a failure and
		# we returned for die_hooks above, so returncode must
		# indicate success (especially if ebuild_process.returncode
		# is unsuccessful and test-fail-continue came into play)
		self.returncode = os.EX_OK
		self._current_task = None
		self.wait()
def _env_update(makelinks, target_root, prev_mtimes, contents, env,
                writemsg_level):
    if writemsg_level is None:
        writemsg_level = portage.util.writemsg_level
    if target_root is None:
        target_root = portage.settings["ROOT"]
    if prev_mtimes is None:
        prev_mtimes = portage.mtimedb["ldpath"]
    if env is None:
        settings = portage.settings
    else:
        settings = env

    eprefix = settings.get("EPREFIX", "")
    eprefix_lstrip = eprefix.lstrip(os.sep)
    eroot = normalize_path(os.path.join(target_root, eprefix_lstrip)).rstrip(
        os.sep) + os.sep
    envd_dir = os.path.join(eroot, "etc", "env.d")
    ensure_dirs(envd_dir, mode=0o755)
    fns = listdir(envd_dir, EmptyOnError=1)
    fns.sort()
    templist = []
    for x in fns:
        if len(x) < 3:
            continue
        if not x[0].isdigit() or not x[1].isdigit():
            continue
        if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
            continue
        templist.append(x)
    fns = templist
    del templist

    space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
    colon_separated = set([
        "ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH", "CLASSPATH", "INFODIR",
        "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH", "PATH", "PKG_CONFIG_PATH",
        "PRELINK_PATH", "PRELINK_PATH_MASK", "PYTHONPATH", "ROOTPATH"
    ])

    config_list = []

    for x in fns:
        file_path = os.path.join(envd_dir, x)
        try:
            myconfig = getconfig(file_path, expand=False)
        except ParseError as e:
            writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
            del e
            continue
        if myconfig is None:
            # broken symlink or file removed by a concurrent process
            writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
            continue

        config_list.append(myconfig)
        if "SPACE_SEPARATED" in myconfig:
            space_separated.update(myconfig["SPACE_SEPARATED"].split())
            del myconfig["SPACE_SEPARATED"]
        if "COLON_SEPARATED" in myconfig:
            colon_separated.update(myconfig["COLON_SEPARATED"].split())
            del myconfig["COLON_SEPARATED"]

    env = {}
    specials = {}
    for var in space_separated:
        mylist = []
        for myconfig in config_list:
            if var in myconfig:
                for item in myconfig[var].split():
                    if item and not item in mylist:
                        mylist.append(item)
                del myconfig[var]  # prepare for env.update(myconfig)
        if mylist:
            env[var] = " ".join(mylist)
        specials[var] = mylist

    for var in colon_separated:
        mylist = []
        for myconfig in config_list:
            if var in myconfig:
                for item in myconfig[var].split(":"):
                    if item and not item in mylist:
                        mylist.append(item)
                del myconfig[var]  # prepare for env.update(myconfig)
        if mylist:
            env[var] = ":".join(mylist)
        specials[var] = mylist

    for myconfig in config_list:
        """Cumulative variables have already been deleted from myconfig so that
		they won't be overwritten by this dict.update call."""
        env.update(myconfig)

    ldsoconf_path = os.path.join(eroot, "etc", "ld.so.conf")
    try:
        myld = io.open(_unicode_encode(ldsoconf_path,
                                       encoding=_encodings['fs'],
                                       errors='strict'),
                       mode='r',
                       encoding=_encodings['content'],
                       errors='replace')
        myldlines = myld.readlines()
        myld.close()
        oldld = []
        for x in myldlines:
            #each line has at least one char (a newline)
            if x[:1] == "#":
                continue
            oldld.append(x[:-1])
    except (IOError, OSError) as e:
        if e.errno != errno.ENOENT:
            raise
        oldld = None

    newld = specials["LDPATH"]
    if (oldld != newld):
        #ld.so.conf needs updating and ldconfig needs to be run
        myfd = atomic_ofstream(ldsoconf_path)
        myfd.write(
            "# ld.so.conf autogenerated by env-update; make all changes to\n")
        myfd.write("# contents of /etc/env.d directory\n")
        for x in specials["LDPATH"]:
            myfd.write(x + "\n")
        myfd.close()

    potential_lib_dirs = set()
    for lib_dir_glob in ('usr/lib*', 'lib*'):
        x = os.path.join(eroot, lib_dir_glob)
        for y in glob.glob(
                _unicode_encode(x, encoding=_encodings['fs'],
                                errors='strict')):
            try:
                y = _unicode_decode(y,
                                    encoding=_encodings['fs'],
                                    errors='strict')
            except UnicodeDecodeError:
                continue
            if os.path.basename(y) != 'libexec':
                potential_lib_dirs.add(y[len(eroot):])

    # Update prelink.conf if we are prelink-enabled
    if prelink_capable:
        prelink_d = os.path.join(eroot, 'etc', 'prelink.conf.d')
        ensure_dirs(prelink_d)
        newprelink = atomic_ofstream(os.path.join(prelink_d, 'portage.conf'))
        newprelink.write(
            "# prelink.conf autogenerated by env-update; make all changes to\n"
        )
        newprelink.write("# contents of /etc/env.d directory\n")

        for x in sorted(potential_lib_dirs) + ['bin', 'sbin']:
            newprelink.write('-l /%s\n' % (x, ))
        prelink_paths = set()
        prelink_paths |= set(specials.get('LDPATH', []))
        prelink_paths |= set(specials.get('PATH', []))
        prelink_paths |= set(specials.get('PRELINK_PATH', []))
        prelink_path_mask = specials.get('PRELINK_PATH_MASK', [])
        for x in prelink_paths:
            if not x:
                continue
            if x[-1:] != '/':
                x += "/"
            plmasked = 0
            for y in prelink_path_mask:
                if not y:
                    continue
                if y[-1] != '/':
                    y += "/"
                if y == x[0:len(y)]:
                    plmasked = 1
                    break
            if not plmasked:
                newprelink.write("-h %s\n" % (x, ))
        for x in prelink_path_mask:
            newprelink.write("-b %s\n" % (x, ))
        newprelink.close()

        # Migration code path.  If /etc/prelink.conf was generated by us, then
        # point it to the new stuff until the prelink package re-installs.
        prelink_conf = os.path.join(eroot, 'etc', 'prelink.conf')
        try:
            with open(
                    _unicode_encode(prelink_conf,
                                    encoding=_encodings['fs'],
                                    errors='strict'), 'rb') as f:
                if f.readline(
                ) == b'# prelink.conf autogenerated by env-update; make all changes to\n':
                    f = atomic_ofstream(prelink_conf)
                    f.write('-c /etc/prelink.conf.d/*.conf\n')
                    f.close()
        except IOError as e:
            if e.errno != errno.ENOENT:
                raise

    current_time = long(time.time())
    mtime_changed = False

    lib_dirs = set()
    for lib_dir in set(specials['LDPATH']) | potential_lib_dirs:
        x = os.path.join(eroot, lib_dir.lstrip(os.sep))
        try:
            newldpathtime = os.stat(x)[stat.ST_MTIME]
            lib_dirs.add(normalize_path(x))
        except OSError as oe:
            if oe.errno == errno.ENOENT:
                try:
                    del prev_mtimes[x]
                except KeyError:
                    pass
                # ignore this path because it doesn't exist
                continue
            raise
        if newldpathtime == current_time:
            # Reset mtime to avoid the potential ambiguity of times that
            # differ by less than 1 second.
            newldpathtime -= 1
            os.utime(x, (newldpathtime, newldpathtime))
            prev_mtimes[x] = newldpathtime
            mtime_changed = True
        elif x in prev_mtimes:
            if prev_mtimes[x] == newldpathtime:
                pass
            else:
                prev_mtimes[x] = newldpathtime
                mtime_changed = True
        else:
            prev_mtimes[x] = newldpathtime
            mtime_changed = True

    if makelinks and \
     not mtime_changed and \
     contents is not None:
        libdir_contents_changed = False
        for mypath, mydata in contents.items():
            if mydata[0] not in ("obj", "sym"):
                continue
            head, tail = os.path.split(mypath)
            if head in lib_dirs:
                libdir_contents_changed = True
                break
        if not libdir_contents_changed:
            makelinks = False

    ldconfig = "/sbin/ldconfig"
    if "CHOST" in settings and "CBUILD" in settings and \
     settings["CHOST"] != settings["CBUILD"]:
        ldconfig = find_binary("%s-ldconfig" % settings["CHOST"])

    # Only run ldconfig as needed
    if makelinks and ldconfig and not eprefix:
        # ldconfig has very different behaviour between FreeBSD and Linux
        if ostype == "Linux" or ostype.lower().endswith("gnu"):
            # We can't update links if we haven't cleaned other versions first, as
            # an older package installed ON TOP of a newer version will cause ldconfig
            # to overwrite the symlinks we just made. -X means no links. After 'clean'
            # we can safely create links.
            writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
             (target_root,))
            os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
        elif ostype in ("FreeBSD", "DragonFly"):
            writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
             target_root)
            os.system(("cd / ; %s -elf -i " + \
             "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
             (ldconfig, target_root, target_root))

    del specials["LDPATH"]

    penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
    penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
    cenvnotice = penvnotice[:]
    penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
    cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"

    #create /etc/profile.env for bash support
    outfile = atomic_ofstream(os.path.join(eroot, "etc", "profile.env"))
    outfile.write(penvnotice)

    env_keys = [x for x in env if x != "LDPATH"]
    env_keys.sort()
    for k in env_keys:
        v = env[k]
        if v.startswith('$') and not v.startswith('${'):
            outfile.write("export %s=$'%s'\n" % (k, v[1:]))
        else:
            outfile.write("export %s='%s'\n" % (k, v))
    outfile.close()

    #create /etc/csh.env for (t)csh support
    outfile = atomic_ofstream(os.path.join(eroot, "etc", "csh.env"))
    outfile.write(cenvnotice)
    for x in env_keys:
        outfile.write("setenv %s '%s'\n" % (x, env[x]))
    outfile.close()
Example #17
0
def _env_update(makelinks, target_root, prev_mtimes, contents, env,
	writemsg_level):
	if writemsg_level is None:
		writemsg_level = portage.util.writemsg_level
	if target_root is None:
		target_root = portage.settings["ROOT"]
	if prev_mtimes is None:
		prev_mtimes = portage.mtimedb["ldpath"]
	if env is None:
		settings = portage.settings
	else:
		settings = env

	eprefix = settings.get("EPREFIX", "")
	eprefix_lstrip = eprefix.lstrip(os.sep)
	envd_dir = os.path.join(target_root, eprefix_lstrip, "etc", "env.d")
	ensure_dirs(envd_dir, mode=0o755)
	fns = listdir(envd_dir, EmptyOnError=1)
	fns.sort()
	templist = []
	for x in fns:
		if len(x) < 3:
			continue
		if not x[0].isdigit() or not x[1].isdigit():
			continue
		if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
			continue
		templist.append(x)
	fns = templist
	del templist

	space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
	colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
		"CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
		  "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
		  "PYTHONPATH", "ROOTPATH"])

	config_list = []

	for x in fns:
		file_path = os.path.join(envd_dir, x)
		try:
			myconfig = getconfig(file_path, expand=False)
		except ParseError as e:
			writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
			del e
			continue
		if myconfig is None:
			# broken symlink or file removed by a concurrent process
			writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
			continue

		config_list.append(myconfig)
		if "SPACE_SEPARATED" in myconfig:
			space_separated.update(myconfig["SPACE_SEPARATED"].split())
			del myconfig["SPACE_SEPARATED"]
		if "COLON_SEPARATED" in myconfig:
			colon_separated.update(myconfig["COLON_SEPARATED"].split())
			del myconfig["COLON_SEPARATED"]

	env = {}
	specials = {}
	for var in space_separated:
		mylist = []
		for myconfig in config_list:
			if var in myconfig:
				for item in myconfig[var].split():
					if item and not item in mylist:
						mylist.append(item)
				del myconfig[var] # prepare for env.update(myconfig)
		if mylist:
			env[var] = " ".join(mylist)
		specials[var] = mylist

	for var in colon_separated:
		mylist = []
		for myconfig in config_list:
			if var in myconfig:
				for item in myconfig[var].split(":"):
					if item and not item in mylist:
						mylist.append(item)
				del myconfig[var] # prepare for env.update(myconfig)
		if mylist:
			env[var] = ":".join(mylist)
		specials[var] = mylist

	for myconfig in config_list:
		"""Cumulative variables have already been deleted from myconfig so that
		they won't be overwritten by this dict.update call."""
		env.update(myconfig)

	ldsoconf_path = os.path.join(
		target_root, eprefix_lstrip, "etc", "ld.so.conf")
	try:
		myld = io.open(_unicode_encode(ldsoconf_path,
			encoding=_encodings['fs'], errors='strict'),
			mode='r', encoding=_encodings['content'], errors='replace')
		myldlines=myld.readlines()
		myld.close()
		oldld=[]
		for x in myldlines:
			#each line has at least one char (a newline)
			if x[:1] == "#":
				continue
			oldld.append(x[:-1])
	except (IOError, OSError) as e:
		if e.errno != errno.ENOENT:
			raise
		oldld = None

	newld = specials["LDPATH"]
	if (oldld != newld):
		#ld.so.conf needs updating and ldconfig needs to be run
		myfd = atomic_ofstream(ldsoconf_path)
		myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
		myfd.write("# contents of /etc/env.d directory\n")
		for x in specials["LDPATH"]:
			myfd.write(x + "\n")
		myfd.close()

	# Update prelink.conf if we are prelink-enabled
	if prelink_capable:
		newprelink = atomic_ofstream(os.path.join(
			target_root, eprefix_lstrip, "etc", "prelink.conf"))
		newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
		newprelink.write("# contents of /etc/env.d directory\n")

		for x in ["/bin","/sbin","/usr/bin","/usr/sbin","/lib","/usr/lib"]:
			newprelink.write("-l %s\n" % (x,));
		prelink_paths = []
		prelink_paths += specials.get("LDPATH", [])
		prelink_paths += specials.get("PATH", [])
		prelink_paths += specials.get("PRELINK_PATH", [])
		prelink_path_mask = specials.get("PRELINK_PATH_MASK", [])
		for x in prelink_paths:
			if not x:
				continue
			if x[-1:] != '/':
				x += "/"
			plmasked = 0
			for y in prelink_path_mask:
				if not y:
					continue
				if y[-1] != '/':
					y += "/"
				if y == x[0:len(y)]:
					plmasked = 1
					break
			if not plmasked:
				newprelink.write("-h %s\n" % (x,))
		for x in prelink_path_mask:
			newprelink.write("-b %s\n" % (x,))
		newprelink.close()

	current_time = long(time.time())
	mtime_changed = False
	lib_dirs = set()
	for lib_dir in set(specials["LDPATH"] + \
		['usr/lib','usr/lib64','usr/lib32','lib','lib64','lib32']):
		x = os.path.join(target_root, eprefix_lstrip, lib_dir.lstrip(os.sep))
		try:
			newldpathtime = os.stat(x)[stat.ST_MTIME]
			lib_dirs.add(normalize_path(x))
		except OSError as oe:
			if oe.errno == errno.ENOENT:
				try:
					del prev_mtimes[x]
				except KeyError:
					pass
				# ignore this path because it doesn't exist
				continue
			raise
		if newldpathtime == current_time:
			# Reset mtime to avoid the potential ambiguity of times that
			# differ by less than 1 second.
			newldpathtime -= 1
			os.utime(x, (newldpathtime, newldpathtime))
			prev_mtimes[x] = newldpathtime
			mtime_changed = True
		elif x in prev_mtimes:
			if prev_mtimes[x] == newldpathtime:
				pass
			else:
				prev_mtimes[x] = newldpathtime
				mtime_changed = True
		else:
			prev_mtimes[x] = newldpathtime
			mtime_changed = True

	if makelinks and \
		not mtime_changed and \
		contents is not None:
		libdir_contents_changed = False
		for mypath, mydata in contents.items():
			if mydata[0] not in ("obj", "sym"):
				continue
			head, tail = os.path.split(mypath)
			if head in lib_dirs:
				libdir_contents_changed = True
				break
		if not libdir_contents_changed:
			makelinks = False

	ldconfig = "/sbin/ldconfig"
	if "CHOST" in settings and "CBUILD" in settings and \
		settings["CHOST"] != settings["CBUILD"]:
		ldconfig = find_binary("%s-ldconfig" % settings["CHOST"])

	# Only run ldconfig as needed
	if makelinks and ldconfig and not eprefix:
		# ldconfig has very different behaviour between FreeBSD and Linux
		if ostype == "Linux" or ostype.lower().endswith("gnu"):
			# We can't update links if we haven't cleaned other versions first, as
			# an older package installed ON TOP of a newer version will cause ldconfig
			# to overwrite the symlinks we just made. -X means no links. After 'clean'
			# we can safely create links.
			writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
				(target_root,))
			os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
		elif ostype in ("FreeBSD","DragonFly"):
			writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
				target_root)
			os.system(("cd / ; %s -elf -i " + \
				"-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
				(ldconfig, target_root, target_root))

	del specials["LDPATH"]

	penvnotice  = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
	penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
	cenvnotice  = penvnotice[:]
	penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
	cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"

	#create /etc/profile.env for bash support
	outfile = atomic_ofstream(os.path.join(
		target_root, eprefix_lstrip, "etc", "profile.env"))
	outfile.write(penvnotice)

	env_keys = [ x for x in env if x != "LDPATH" ]
	env_keys.sort()
	for k in env_keys:
		v = env[k]
		if v.startswith('$') and not v.startswith('${'):
			outfile.write("export %s=$'%s'\n" % (k, v[1:]))
		else:
			outfile.write("export %s='%s'\n" % (k, v))
	outfile.close()

	#create /etc/csh.env for (t)csh support
	outfile = atomic_ofstream(os.path.join(
		target_root, eprefix_lstrip, "etc", "csh.env"))
	outfile.write(cenvnotice)
	for x in env_keys:
		outfile.write("setenv %s '%s'\n" % (x, env[x]))
	outfile.close()
Example #18
0
    def _apply_max_mtime(self, preserved_stats, entries):
        """
        Set the Manifest mtime to the max mtime of all relevant files
        and directories. Directory mtimes account for file renames and
        removals. The existing Manifest mtime accounts for eclass
        modifications that change DIST entries. This results in a
        stable/predictable mtime, which is useful when converting thin
        manifests to thick manifests for distribution via rsync. For
        portability, the mtime is set with 1 second resolution.

        @param preserved_stats: maps paths to preserved stat results
                that should be used instead of os.stat() calls
        @type preserved_stats: dict
        @param entries: list of current Manifest2Entry instances
        @type entries: list
        """

        # Use stat_result[stat.ST_MTIME] for 1 second resolution, since
        # it always rounds down. Note that stat_result.st_mtime will round
        # up from 0.999999999 to 1.0 when precision is lost during conversion
        # from nanosecond resolution to float.

        def _update_max(max_mtime, st):
            stat_mtime = st[stat.ST_MTIME]
            if max_mtime:
                return max(max_mtime, stat_mtime)

        def _stat(path):
            if path in preserved_stats:
                return preserved_stats[path]
            else:
                return os.stat(path)

        max_mtime = None
        for stat_result in preserved_stats.values():
            max_mtime = _update_max(max_mtime, stat_result)

        for entry in entries:
            if entry.type == "DIST":
                continue
            files = ""
            if entry.type == "AUX":
                files = "files"
            abs_path = os.path.join(self.pkgdir, files, entry.name)
            max_mtime = _update_max(max_mtime, _stat(abs_path))

        if not self.thin:
            # Account for changes to all relevant nested directories.
            # This is not necessary for thin manifests because
            # self.pkgdir is already included via preserved_stats.
            for parent_dir, dirs, files in os.walk(self.pkgdir.rstrip(os.sep)):
                try:
                    parent_dir = _unicode_decode(parent_dir,
                                                 encoding=_encodings["fs"],
                                                 errors="strict")
                except UnicodeDecodeError:
                    # If an absolute path cannot be decoded, then it is
                    # always excluded from the manifest (repoman will
                    # report such problems).
                    pass
                else:
                    max_mtime = _update_max(max_mtime, _stat(parent_dir))

        if max_mtime is not None:
            for path in preserved_stats:
                try:
                    os.utime(path, (max_mtime, max_mtime))
                except OSError as e:
                    # Even though we have write permission, utime fails
                    # with EPERM if path is owned by a different user.
                    # Only warn in this case, since it's not a problem
                    # unless this repo is being prepared for distribution
                    # via rsync.
                    writemsg_level(
                        f"!!! utime('{path}', ({max_mtime}, {max_mtime})): {e}\n",
                        level=logging.WARNING,
                        noiselevel=-1,
                    )