Example #1
0
    def unlock(self):
        """
        Set GPG_TTY and run GPG unlock command.
        If gpg-keepalive is set, start keepalive thread.
        """
        if self.GPG_unlock_command and (
            self.settings.get("BINPKG_FORMAT", SUPPORTED_GENTOO_BINPKG_FORMATS[0])
            == "gpkg"
        ):
            try:
                os.environ["GPG_TTY"] = os.ttyname(sys.stdout.fileno())
            except OSError as e:
                # When run with no input/output tty, this will fail.
                # However, if the password is given by command,
                # GPG does not need to ask password, so can be ignored.
                writemsg(colorize("WARN", str(e)) + "\n")

            cmd = shlex_split(varexpand(self.GPG_unlock_command, mydict=self.settings))
            return_code = subprocess.Popen(cmd).wait()

            if return_code == os.EX_OK:
                writemsg_stdout(colorize("GOOD", "unlocked") + "\n")
                sys.stdout.flush()
            else:
                raise GPGException("GPG unlock failed")

            if self.keepalive:
                self.GPG_unlock_command = shlex_split(
                    varexpand(self.GPG_unlock_command, mydict=self.settings)
                )
                self.thread = threading.Thread(target=self.gpg_keepalive, daemon=True)
                self.thread.start()
Example #2
0
	def new(self, **kwargs):
		'''Do the initial clone of the repository'''
		if kwargs:
			self._kwargs(kwargs)
		if not self.has_bin:
			return (1, False)
		try:
			if not os.path.exists(self.repo.location):
				os.makedirs(self.repo.location)
				self.logger(self.xterm_titles,
					'Created new directory %s' % self.repo.location)
		except IOError:
			return (1, False)

		sync_uri = self.repo.sync_uri
		if sync_uri.startswith("file://"):
			sync_uri = sync_uri[7:]

		git_cmd_opts = ""
		if self.repo.module_specific_options.get('sync-git-env'):
			shlexed_env = shlex_split(self.repo.module_specific_options['sync-git-env'])
			env = dict((k, v) for k, _, v in (assignment.partition('=') for assignment in shlexed_env) if k)
			self.spawn_kwargs['env'].update(env)

		if self.repo.module_specific_options.get('sync-git-clone-env'):
			shlexed_env = shlex_split(self.repo.module_specific_options['sync-git-clone-env'])
			clone_env = dict((k, v) for k, _, v in (assignment.partition('=') for assignment in shlexed_env) if k)
			self.spawn_kwargs['env'].update(clone_env)

		if self.settings.get("PORTAGE_QUIET") == "1":
			git_cmd_opts += " --quiet"
		if self.repo.clone_depth is not None:
			if self.repo.clone_depth != 0:
				git_cmd_opts += " --depth %d" % self.repo.clone_depth
		elif self.repo.sync_depth is not None:
			if self.repo.sync_depth != 0:
				git_cmd_opts += " --depth %d" % self.repo.sync_depth
		else:
			# default
			git_cmd_opts += " --depth 1"

		if self.repo.module_specific_options.get('sync-git-clone-extra-opts'):
			git_cmd_opts += " %s" % self.repo.module_specific_options['sync-git-clone-extra-opts']
		git_cmd = "%s clone%s %s ." % (self.bin_command, git_cmd_opts,
			portage._shell_quote(sync_uri))
		writemsg_level(git_cmd + "\n")

		exitcode = portage.process.spawn_bash("cd %s ; exec %s" % (
				portage._shell_quote(self.repo.location), git_cmd),
			**self.spawn_kwargs)
		if exitcode != os.EX_OK:
			msg = "!!! git clone error in %s" % self.repo.location
			self.logger(self.xterm_titles, msg)
			writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
			return (exitcode, False)
		if not self.verify_head():
			return (1, False)
		return (os.EX_OK, True)
Example #3
0
	def new(self, **kwargs):
		'''Do the initial clone of the repository'''
		if kwargs:
			self._kwargs(kwargs)
		if not self.has_bin:
			return (1, False)
		try:
			if not os.path.exists(self.repo.location):
				os.makedirs(self.repo.location)
				self.logger(self.xterm_titles,
					'Created new directory %s' % self.repo.location)
		except IOError:
			return (1, False)

		sync_uri = self.repo.sync_uri
		if sync_uri.startswith("file://"):
			sync_uri = sync_uri[7:]

		git_cmd_opts = ""
		if self.repo.module_specific_options.get('sync-git-env'):
			shlexed_env = shlex_split(self.repo.module_specific_options['sync-git-env'])
			env = dict((k, v) for k, _, v in (assignment.partition('=') for assignment in shlexed_env) if k)
			self.spawn_kwargs['env'].update(env)

		if self.repo.module_specific_options.get('sync-git-clone-env'):
			shlexed_env = shlex_split(self.repo.module_specific_options['sync-git-clone-env'])
			clone_env = dict((k, v) for k, _, v in (assignment.partition('=') for assignment in shlexed_env) if k)
			self.spawn_kwargs['env'].update(clone_env)

		if self.settings.get("PORTAGE_QUIET") == "1":
			git_cmd_opts += " --quiet"
		if self.repo.clone_depth is not None:
			if self.repo.clone_depth != 0:
				git_cmd_opts += " --depth %d" % self.repo.clone_depth
		elif self.repo.sync_depth is not None:
			if self.repo.sync_depth != 0:
				git_cmd_opts += " --depth %d" % self.repo.sync_depth
		else:
			# default
			git_cmd_opts += " --depth 1"

		if self.repo.module_specific_options.get('sync-git-clone-extra-opts'):
			git_cmd_opts += " %s" % self.repo.module_specific_options['sync-git-clone-extra-opts']
		git_cmd = "%s clone%s %s ." % (self.bin_command, git_cmd_opts,
			portage._shell_quote(sync_uri))
		writemsg_level(git_cmd + "\n")

		exitcode = portage.process.spawn_bash("cd %s ; exec %s" % (
				portage._shell_quote(self.repo.location), git_cmd),
			**self.spawn_kwargs)
		if exitcode != os.EX_OK:
			msg = "!!! git clone error in %s" % self.repo.location
			self.logger(self.xterm_titles, msg)
			writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
			return (exitcode, False)
		if not self.verify_head():
			return (1, False)
		return (os.EX_OK, True)
Example #4
0
    def new(self, **kwargs):
        """Do the initial clone of the repository"""
        if kwargs:
            self._kwargs(kwargs)
        try:
            if not os.path.exists(self.repo.location):
                os.makedirs(self.repo.location)
                self.logger(self.xterm_titles,
                            "Created new directory %s" % self.repo.location)
        except IOError:
            return (1, False)

        sync_uri = self.repo.sync_uri
        if sync_uri.startswith("file://"):
            sync_uri = sync_uri[7:]

        hg_cmd_opts = ""
        if self.repo.module_specific_options.get("sync-mercurial-env"):
            shlexed_env = shlex_split(
                self.repo.module_specific_options["sync-mercurial-env"])
            env = dict(
                (k, v) for k, _, v in (assignment.partition("=")
                                       for assignment in shlexed_env) if k)
            self.spawn_kwargs["env"].update(env)

        if self.repo.module_specific_options.get("sync-mercurial-clone-env"):
            shlexed_env = shlex_split(
                self.repo.module_specific_options["sync-mercurial-clone-env"])
            clone_env = dict(
                (k, v) for k, _, v in (assignment.partition("=")
                                       for assignment in shlexed_env) if k)
            self.spawn_kwargs["env"].update(clone_env)

        if self.settings.get("PORTAGE_QUIET") == "1":
            hg_cmd_opts += " --quiet"
        if self.repo.module_specific_options.get(
                "sync-mercurial-clone-extra-opts"):
            hg_cmd_opts += (
                " %s" % self.repo.
                module_specific_options["sync-mercurial-clone-extra-opts"])
        hg_cmd = "%s clone%s %s ." % (
            self.bin_command,
            hg_cmd_opts,
            portage._shell_quote(sync_uri),
        )
        writemsg_level(hg_cmd + "\n")

        exitcode = portage.process.spawn(shlex_split(hg_cmd),
                                         cwd=portage._unicode_encode(
                                             self.repo.location),
                                         **self.spawn_kwargs)
        if exitcode != os.EX_OK:
            msg = "!!! hg clone error in %s" % self.repo.location
            self.logger(self.xterm_titles, msg)
            writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
            return (exitcode, False)
        return (os.EX_OK, True)
Example #5
0
    def update(self):
        ''' Update existing git repository, and ignore the syncuri. We are
		going to trust the user and assume that the user is in the branch
		that he/she wants updated. We'll let the user manage branches with
		git directly.
		'''
        if not self.has_bin:
            return (1, False)
        git_cmd_opts = ""
        if self.repo.module_specific_options.get('sync-git-env'):
            shlexed_env = shlex_split(
                self.repo.module_specific_options['sync-git-env'])
            env = dict(
                (k, v) for k, _, v in (assignment.partition('=')
                                       for assignment in shlexed_env) if k)
            self.spawn_kwargs['env'].update(env)

        if self.repo.module_specific_options.get('sync-git-pull-env'):
            shlexed_env = shlex_split(
                self.repo.module_specific_options['sync-git-pull-env'])
            pull_env = dict(
                (k, v) for k, _, v in (assignment.partition('=')
                                       for assignment in shlexed_env) if k)
            self.spawn_kwargs['env'].update(pull_env)

        if self.settings.get("PORTAGE_QUIET") == "1":
            git_cmd_opts += " --quiet"
        if self.repo.module_specific_options.get('sync-git-pull-extra-opts'):
            git_cmd_opts += " %s" % self.repo.module_specific_options[
                'sync-git-pull-extra-opts']
        git_cmd = "%s pull%s" % (self.bin_command, git_cmd_opts)
        writemsg_level(git_cmd + "\n")

        rev_cmd = [self.bin_command, "rev-list", "--max-count=1", "HEAD"]
        previous_rev = subprocess.check_output(rev_cmd,
                                               cwd=portage._unicode_encode(
                                                   self.repo.location))

        exitcode = portage.process.spawn_bash(
            "cd %s ; exec %s" %
            (portage._shell_quote(self.repo.location), git_cmd),
            **self.spawn_kwargs)
        if exitcode != os.EX_OK:
            msg = "!!! git pull error in %s" % self.repo.location
            self.logger(self.xterm_titles, msg)
            writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
            return (exitcode, False)
        if not self.verify_head():
            return (1, False)

        current_rev = subprocess.check_output(rev_cmd,
                                              cwd=portage._unicode_encode(
                                                  self.repo.location))

        return (os.EX_OK, current_rev != previous_rev)
Example #6
0
    def update(self):
        """Update existing mercurial repository, and ignore the syncuri. We are
		going to trust the user and assume that the user is in the branch
		that he/she wants updated. We'll let the user manage branches with
		hg directly.
		"""

        hg_cmd_opts = ""
        if self.repo.module_specific_options.get("sync-mercurial-env"):
            shlexed_env = shlex_split(
                self.repo.module_specific_options["sync-mercurial-env"])
            env = dict(
                (k, v) for k, _, v in (assignment.partition("=")
                                       for assignment in shlexed_env) if k)
            self.spawn_kwargs["env"].update(env)

        if self.repo.module_specific_options.get("sync-mercurial-pull-env"):
            shlexed_env = shlex_split(
                self.repo.module_specific_options["sync-mercurial-pull-env"])
            pull_env = dict(
                (k, v) for k, _, v in (assignment.partition("=")
                                       for assignment in shlexed_env) if k)
            self.spawn_kwargs["env"].update(pull_env)

        if self.settings.get("PORTAGE_QUIET") == "1":
            hg_cmd_opts += " --quiet"
        if self.repo.module_specific_options.get(
                "sync-mercurial-pull-extra-opts"):
            hg_cmd_opts += (
                " %s" % self.repo.
                module_specific_options["sync-mercurial-pull-extra-opts"])
        hg_cmd = "%s pull -u%s" % (self.bin_command, hg_cmd_opts)
        writemsg_level(hg_cmd + "\n")

        rev_cmd = [self.bin_command, "id", "--id", "--rev", "tip"]
        previous_rev = subprocess.check_output(rev_cmd,
                                               cwd=portage._unicode_encode(
                                                   self.repo.location))

        exitcode = portage.process.spawn(shlex_split(hg_cmd),
                                         cwd=portage._unicode_encode(
                                             self.repo.location),
                                         **self.spawn_kwargs)
        if exitcode != os.EX_OK:
            msg = "!!! hg pull error in %s" % self.repo.location
            self.logger(self.xterm_titles, msg)
            writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
            return (exitcode, False)

        current_rev = subprocess.check_output(rev_cmd,
                                              cwd=portage._unicode_encode(
                                                  self.repo.location))

        return (os.EX_OK, current_rev != previous_rev)
	def set_port_dirs(self, portdir, portdir_overlay):
		self.portdir = portdir
		self.portdir_overlay = portdir_overlay
		if self.portdir_overlay is None:
			self.portdir_overlay = ""

		self.overlay_profiles = []
		for ov in shlex_split(self.portdir_overlay):
			ov = normalize_path(ov)
			profiles_dir = os.path.join(ov, "profiles")
			if os.path.isdir(profiles_dir):
				self.overlay_profiles.append(profiles_dir)

		self.profile_locations = [os.path.join(portdir, "profiles")] + self.overlay_profiles
		self.profile_and_user_locations = self.profile_locations[:]
		if self._user_config:
			self.profile_and_user_locations.append(self.abs_user_config)

		self.profile_locations = tuple(self.profile_locations)
		self.profile_and_user_locations = tuple(self.profile_and_user_locations)

		self.pmask_locations = [os.path.join(portdir, "profiles")]
		self.pmask_locations.extend(self.profiles)
		self.pmask_locations.extend(self.overlay_profiles)
		self.pmask_locations = tuple(self.pmask_locations)
Example #8
0
def diffstatusoutput(cmd, file1, file2):
	"""
	Execute the string cmd in a shell with getstatusoutput() and return a
	2-tuple (status, output).
	"""
	# Use Popen to emulate getstatusoutput(), since getstatusoutput() may
	# raise a UnicodeDecodeError which makes the output inaccessible.
	args = shlex_split(cmd % (file1, file2))

	if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
		not os.path.isabs(args[0]):
		# Python 3.1 _execvp throws TypeError for non-absolute executable
		# path passed as bytes (see http://bugs.python.org/issue8513).
		fullname = portage.process.find_binary(args[0])
		if fullname is None:
			raise portage.exception.CommandNotFound(args[0])
		args[0] = fullname

	args = [portage._unicode_encode(x, errors='strict') for x in args]
	proc = subprocess.Popen(args,
		stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
	output = portage._unicode_decode(proc.communicate()[0])
	if output and output[-1] == "\n":
		# getstatusoutput strips one newline
		output = output[:-1]
	return (proc.wait(), output)
Example #9
0
def diffstatusoutput(cmd, file1, file2):
    """
	Execute the string cmd in a shell with getstatusoutput() and return a
	2-tuple (status, output).
	"""
    # Use Popen to emulate getstatusoutput(), since getstatusoutput() may
    # raise a UnicodeDecodeError which makes the output inaccessible.
    args = shlex_split(cmd % (file1, file2))

    if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
     not os.path.isabs(args[0]):
        # Python 3.1 _execvp throws TypeError for non-absolute executable
        # path passed as bytes (see http://bugs.python.org/issue8513).
        fullname = portage.process.find_binary(args[0])
        if fullname is None:
            raise portage.exception.CommandNotFound(args[0])
        args[0] = fullname

    args = [portage._unicode_encode(x, errors='strict') for x in args]
    proc = subprocess.Popen(args,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.STDOUT)
    output = portage._unicode_decode(proc.communicate()[0])
    if output and output[-1] == "\n":
        # getstatusoutput strips one newline
        output = output[:-1]
    return (proc.wait(), output)
Example #10
0
 def updated_config_files(count):
     self.assertEqual(
         count,
         sum(
             len(x[1]) for x in find_updated_config_files(
                 eroot, shlex_split(config_protect))),
     )
Example #11
0
 def _exclude_pattern(s):
     # shlex_split enables quoted whitespace inside patterns
     if s:
         pat = re.compile("|".join(
             fnmatch.translate(x.lstrip(os.sep)) for x in shlex_split(s)))
     else:
         pat = None
     return pat
Example #12
0
	def update(self):
		''' Update existing git repository, and ignore the syncuri. We are
		going to trust the user and assume that the user is in the branch
		that he/she wants updated. We'll let the user manage branches with
		git directly.
		'''
		if not self.has_bin:
			return (1, False)
		git_cmd_opts = ""
		if self.repo.module_specific_options.get('sync-git-env'):
			shlexed_env = shlex_split(self.repo.module_specific_options['sync-git-env'])
			env = dict((k, v) for k, _, v in (assignment.partition('=') for assignment in shlexed_env) if k)
			self.spawn_kwargs['env'].update(env)

		if self.repo.module_specific_options.get('sync-git-pull-env'):
			shlexed_env = shlex_split(self.repo.module_specific_options['sync-git-pull-env'])
			pull_env = dict((k, v) for k, _, v in (assignment.partition('=') for assignment in shlexed_env) if k)
			self.spawn_kwargs['env'].update(pull_env)

		if self.settings.get("PORTAGE_QUIET") == "1":
			git_cmd_opts += " --quiet"
		if self.repo.module_specific_options.get('sync-git-pull-extra-opts'):
			git_cmd_opts += " %s" % self.repo.module_specific_options['sync-git-pull-extra-opts']
		git_cmd = "%s pull%s" % (self.bin_command, git_cmd_opts)
		writemsg_level(git_cmd + "\n")

		rev_cmd = [self.bin_command, "rev-list", "--max-count=1", "HEAD"]
		previous_rev = subprocess.check_output(rev_cmd,
			cwd=portage._unicode_encode(self.repo.location))

		exitcode = portage.process.spawn_bash("cd %s ; exec %s" % (
				portage._shell_quote(self.repo.location), git_cmd),
			**self.spawn_kwargs)
		if exitcode != os.EX_OK:
			msg = "!!! git pull error in %s" % self.repo.location
			self.logger(self.xterm_titles, msg)
			writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
			return (exitcode, False)
		if not self.verify_head():
			return (1, False)

		current_rev = subprocess.check_output(rev_cmd,
			cwd=portage._unicode_encode(self.repo.location))

		return (os.EX_OK, current_rev != previous_rev)
Example #13
0
	def _exclude_pattern(s):
		# shlex_split enables quoted whitespace inside patterns
		if s:
			pat = re.compile("|".join(
				fnmatch.translate(x.lstrip(os.sep))
				for x in shlex_split(s)))
		else:
			pat = None
		return pat
Example #14
0
    def _async_start(self):
        pipe_logger = None
        filter_proc = None
        try:
            log_input = None
            if self.log_path is not None:
                log_filter_file = self.log_filter_file
                if log_filter_file is not None:
                    split_value = shlex_split(log_filter_file)
                    log_filter_file = split_value if split_value else None
                if log_filter_file:
                    filter_input, stdin = os.pipe()
                    log_input, filter_output = os.pipe()
                    try:
                        filter_proc = yield asyncio.create_subprocess_exec(
                            *log_filter_file,
                            env=self.env,
                            stdin=filter_input,
                            stdout=filter_output,
                            stderr=filter_output,
                            loop=self.scheduler)
                    except EnvironmentError:
                        # Maybe the command is missing or broken somehow...
                        os.close(filter_input)
                        os.close(stdin)
                        os.close(log_input)
                        os.close(filter_output)
                    else:
                        self._stdin = os.fdopen(stdin, 'wb', 0)
                        os.close(filter_input)
                        os.close(filter_output)

            if self._stdin is None:
                # Since log_filter_file is unspecified or refers to a file
                # that is missing or broken somehow, create a pipe that
                # logs directly to pipe_logger.
                log_input, stdin = os.pipe()
                self._stdin = os.fdopen(stdin, 'wb', 0)

            # Set background=True so that pipe_logger does not log to stdout.
            pipe_logger = PipeLogger(background=True,
                                     scheduler=self.scheduler,
                                     input_fd=log_input,
                                     log_file_path=self.log_path)

            yield pipe_logger.async_start()
        except asyncio.CancelledError:
            if pipe_logger is not None and pipe_logger.poll() is None:
                pipe_logger.cancel()
            if filter_proc is not None and filter_proc.returncode is None:
                filter_proc.terminate()
            raise

        self._main_task = asyncio.ensure_future(self._main(
            pipe_logger, filter_proc=filter_proc),
                                                loop=self.scheduler)
        self._main_task.add_done_callback(self._main_exit)
Example #15
0
    def _start(self):
        filter_proc = None
        log_input = None
        if self.log_path is not None:
            log_filter_file = self.log_filter_file
            if log_filter_file is not None:
                split_value = shlex_split(log_filter_file)
                log_filter_file = split_value if split_value else None
            if log_filter_file:
                filter_input, stdin = os.pipe()
                log_input, filter_output = os.pipe()
                try:
                    filter_proc = PopenProcess(
                        proc=subprocess.Popen(
                            log_filter_file,
                            env=self.env,
                            stdin=filter_input,
                            stdout=filter_output,
                            stderr=filter_output,
                        ),
                        scheduler=self.scheduler,
                    )
                    filter_proc.start()
                except EnvironmentError:
                    # Maybe the command is missing or broken somehow...
                    os.close(filter_input)
                    os.close(stdin)
                    os.close(log_input)
                    os.close(filter_output)
                else:
                    self._stdin = os.fdopen(stdin, "wb", 0)
                    os.close(filter_input)
                    os.close(filter_output)

        if self._stdin is None:
            # Since log_filter_file is unspecified or refers to a file
            # that is missing or broken somehow, create a pipe that
            # logs directly to pipe_logger.
            log_input, stdin = os.pipe()
            self._stdin = os.fdopen(stdin, "wb", 0)

        # Set background=True so that pipe_logger does not log to stdout.
        pipe_logger = PipeLogger(
            background=True,
            scheduler=self.scheduler,
            input_fd=log_input,
            log_file_path=self.log_path,
        )
        pipe_logger.start()

        self._main_task_cancel = functools.partial(self._main_cancel,
                                                   filter_proc, pipe_logger)
        self._main_task = asyncio.ensure_future(self._main(
            filter_proc, pipe_logger),
                                                loop=self.scheduler)
        self._main_task.add_done_callback(self._main_exit)
Example #16
0
	def _start_gpg_proc(self):
		gpg_vars = self.gpg_vars
		if gpg_vars is None:
			gpg_vars = {}
		else:
			gpg_vars = gpg_vars.copy()
		gpg_vars["FILE"] = self._manifest_path
		gpg_cmd = varexpand(self.gpg_cmd, mydict=gpg_vars)
		gpg_cmd = shlex_split(gpg_cmd)
		gpg_proc = PopenProcess(proc=subprocess.Popen(gpg_cmd))
		self._start_task(gpg_proc, self._gpg_proc_exit)
Example #17
0
 def _start_gpg_proc(self):
     gpg_vars = self.gpg_vars
     if gpg_vars is None:
         gpg_vars = {}
     else:
         gpg_vars = gpg_vars.copy()
     gpg_vars["FILE"] = self._manifest_path
     gpg_cmd = varexpand(self.gpg_cmd, mydict=gpg_vars)
     gpg_cmd = shlex_split(gpg_cmd)
     gpg_proc = PopenProcess(proc=subprocess.Popen(gpg_cmd))
     self._start_task(gpg_proc, self._gpg_proc_exit)
def validate_cmd_var(v):
	"""
	Validate an evironment variable value to see if it
	contains an executable command as the first token.
	returns (valid, token_list) where 'valid' is boolean and 'token_list'
	is the (possibly empty) list of tokens split by shlex.
	"""
	invalid = False
	v_split = shlex_split(v)
	if not v_split:
		invalid = True
	elif os.path.isabs(v_split[0]):
		invalid = not os.access(v_split[0], os.EX_OK)
	elif find_binary(v_split[0]) is None:
		invalid = True
	return (not invalid, v_split)
	def _start_gpg_proc(self):
		gpg_vars = self.gpg_vars
		if gpg_vars is None:
			gpg_vars = {}
		else:
			gpg_vars = gpg_vars.copy()
		gpg_vars["FILE"] = self._manifest_path
		gpg_cmd = varexpand(self.gpg_cmd, mydict=gpg_vars)
		gpg_cmd = shlex_split(gpg_cmd)
		gpg_proc = PopenProcess(proc=subprocess.Popen(gpg_cmd,
			stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
		# PipeLogger echos output and efficiently monitors for process
		# exit by listening for the stdout EOF event.
		gpg_proc.pipe_reader = PipeLogger(background=self.background,
			input_fd=gpg_proc.proc.stdout, scheduler=self.scheduler)
		self._start_task(gpg_proc, self._gpg_proc_exit)
Example #20
0
def validate_cmd_var(v):
    """
    Validate an evironment variable value to see if it
    contains an executable command as the first token.
    returns (valid, token_list) where 'valid' is boolean and 'token_list'
    is the (possibly empty) list of tokens split by shlex.
    """
    invalid = False
    v_split = shlex_split(v)
    if not v_split:
        invalid = True
    elif os.path.isabs(v_split[0]):
        invalid = not os.access(v_split[0], os.EX_OK)
    elif find_binary(v_split[0]) is None:
        invalid = True
    return (not invalid, v_split)
Example #21
0
	def _start_gpg_proc(self):
		gpg_vars = self.gpg_vars
		if gpg_vars is None:
			gpg_vars = {}
		else:
			gpg_vars = gpg_vars.copy()
		gpg_vars["FILE"] = self._manifest_path
		gpg_cmd = varexpand(self.gpg_cmd, mydict=gpg_vars)
		gpg_cmd = shlex_split(gpg_cmd)
		gpg_proc = PopenProcess(proc=subprocess.Popen(gpg_cmd,
			stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
		# PipeLogger echos output and efficiently monitors for process
		# exit by listening for the stdout EOF event.
		gpg_proc.pipe_reader = PipeLogger(background=self.background,
			input_fd=gpg_proc.proc.stdout, scheduler=self.scheduler)
		self._start_task(gpg_proc, self._gpg_proc_exit)
Example #22
0
	def clean(self, **kwargs):
		"""Log directory cleaning function

		@param **kwargs: optional dictionary of values used in this function are:
			settings: portage settings instance: defaults to portage.settings
				"PORT_LOGDIR": directory to clean
				"PORT_LOGDIR_CLEAN": command for cleaning the logs.
			options: dict:
				'NUM': int: number of days
				'pretend': boolean
		"""
		num_of_days = None
		pretend = False
		if kwargs:
			# convuluted, I know, but portage.settings does not exist in
			# kwargs.get() when called from _emerge.main.clean_logs()
			settings = kwargs.get('settings', None)
			if not settings:
				settings = portage.settings
			options = kwargs.get('options', None)
			if options:
				num_of_days = options.get('NUM', None)
				pretend = options.get('pretend', False)

		clean_cmd = settings.get("PORT_LOGDIR_CLEAN")
		if clean_cmd:
			clean_cmd = shlex_split(clean_cmd)
			if '-mtime' in clean_cmd and num_of_days is not None:
				if num_of_days == 0:
					i = clean_cmd.index('-mtime')
					clean_cmd.remove('-mtime')
					clean_cmd.pop(i)
				else:
					clean_cmd[clean_cmd.index('-mtime') +1] = \
						'+%s' % str(num_of_days)
			if pretend:
				if "-delete" in clean_cmd:
					clean_cmd.remove("-delete")

		if not clean_cmd:
			return (True, None)
		rval = self._clean_logs(clean_cmd, settings)
		errors = self._convert_errors(rval)
		if errors:
			return (False, errors)
		return (True, None)
Example #23
0
def diffstatusoutput(cmd, file1, file2):
	"""
	Execute the string cmd in a shell with getstatusoutput() and return a
	2-tuple (status, output).
	"""
	# Use Popen to emulate getstatusoutput(), since getstatusoutput() may
	# raise a UnicodeDecodeError which makes the output inaccessible.
	args = shlex_split(cmd % (file1, file2))

	args = [portage._unicode_encode(x, errors='strict') for x in args]
	proc = subprocess.Popen(args,
		stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
	output = portage._unicode_decode(proc.communicate()[0])
	if output and output[-1] == "\n":
		# getstatusoutput strips one newline
		output = output[:-1]
	return (proc.wait(), output)
Example #24
0
	def clean(self, **kwargs):
		"""Log directory cleaning function

		@param **kwargs: optional dictionary of values used in this function are:
			settings: portage settings instance: defaults to portage.settings
				"PORT_LOGDIR": directory to clean
				"PORT_LOGDIR_CLEAN": command for cleaning the logs.
			options: dict:
				'NUM': int: number of days
				'pretend': boolean
		"""
		num_of_days = None
		pretend = False

		# convoluted, I know, but portage.settings does not exist in
		# kwargs.get() when called from _emerge.main.clean_logs()
		settings = kwargs.get('settings', getattr(portage, 'settings', {}))

		options = kwargs.get('options', None)
		if options:
			num_of_days = options.get('NUM', None)
			pretend = options.get('pretend', False)

		clean_cmd = settings.get("PORT_LOGDIR_CLEAN")
		if clean_cmd:
			clean_cmd = shlex_split(clean_cmd)
			if '-mtime' in clean_cmd and num_of_days is not None:
				if num_of_days == 0:
					i = clean_cmd.index('-mtime')
					clean_cmd.remove('-mtime')
					clean_cmd.pop(i)
				else:
					clean_cmd[clean_cmd.index('-mtime') +1] = \
						'+%s' % str(num_of_days)
			if pretend:
				if "-delete" in clean_cmd:
					clean_cmd.remove("-delete")

		if not clean_cmd:
			return (True, None)
		rval = self._clean_logs(clean_cmd, settings)
		errors = self._convert_errors(rval)
		if errors:
			return (False, errors)
		return (True, None)
Example #25
0
def editor_is_executable(editor):
	"""
	Given an EDITOR string, validate that it refers to
	an executable. This uses shlex_split() to split the
	first component and do a PATH lookup if necessary.

	@param editor: An EDITOR value from the environment.
	@type: string
	@rtype: bool
	@return: True if an executable is found, False otherwise.
	"""
	editor_split = util.shlex_split(editor)
	if not editor_split:
		return False
	filename = editor_split[0]
	if not os.path.isabs(filename):
		return find_binary(filename) is not None
	return os.access(filename, os.X_OK) and os.path.isfile(filename)
Example #26
0
def editor_is_executable(editor):
	"""
	Given an EDITOR string, validate that it refers to
	an executable. This uses shlex_split() to split the
	first component and do a PATH lookup if necessary.

	@param editor: An EDITOR value from the environment.
	@type: string
	@rtype: bool
	@return: True if an executable is found, False otherwise.
	"""
	editor_split = util.shlex_split(editor)
	if not editor_split:
		return False
	filename = editor_split[0]
	if not os.path.isabs(filename):
		return find_binary(filename) is not None
	return os.access(filename, os.X_OK) and os.path.isfile(filename)
Example #27
0
def diffstatusoutput(cmd, file1, file2):
    """
    Execute the string cmd in a shell with getstatusoutput() and return a
    2-tuple (status, output).
    """
    # Use Popen to emulate getstatusoutput(), since getstatusoutput() may
    # raise a UnicodeDecodeError which makes the output inaccessible.
    args = shlex_split(cmd % (file1, file2))
    if sys.hexversion < 0x3000000 or sys.hexversion >= 0x3020000:
        # Python 3.1 does not support bytes in Popen args.
        args = [portage._unicode_encode(x, errors='strict') for x in args]
    proc = subprocess.Popen(args,
        stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
    output = portage._unicode_decode(proc.communicate()[0])
    if output and output[-1] == "\n":
        # getstatusoutput strips one newline
        output = output[:-1]
    return (proc.wait(), output)
Example #28
0
	def set_port_dirs(self, portdir, portdir_overlay):
		self.portdir = portdir
		self.portdir_overlay = portdir_overlay
		if self.portdir_overlay is None:
			self.portdir_overlay = ""

		self.overlay_profiles = []
		for ov in shlex_split(self.portdir_overlay):
			ov = normalize_path(ov)
			profiles_dir = os.path.join(ov, "profiles")
			if os.path.isdir(profiles_dir):
				self.overlay_profiles.append(profiles_dir)

		self.profile_locations = [os.path.join(portdir, "profiles")] + self.overlay_profiles
		self.profile_and_user_locations = self.profile_locations[:]
		if self._user_config:
			self.profile_and_user_locations.append(self.abs_user_config)

		self.profile_locations = tuple(self.profile_locations)
		self.profile_and_user_locations = tuple(self.profile_and_user_locations)
Example #29
0
    def __init__(self, roots):
        self._shown_repos = {}
        self._unknown_repo = False
        repo_paths = set()
        for root_config in roots.values():
            portdir = root_config.settings.get("PORTDIR")
            if portdir:
                repo_paths.add(portdir)
            overlays = root_config.settings.get("PORTDIR_OVERLAY")
            if overlays:
                repo_paths.update(shlex_split(overlays))
        repo_paths = list(repo_paths)
        self._repo_paths = repo_paths
        self._repo_paths_real = [os.path.realpath(repo_path) for repo_path in repo_paths]

        # pre-allocate index for PORTDIR so that it always has index 0.
        for root_config in roots.values():
            portdb = root_config.trees["porttree"].dbapi
            portdir = portdb.porttree_root
            if portdir:
                self.repoStr(portdir)
Example #30
0
    def __init__(self, roots):
        self._shown_repos = {}
        self._unknown_repo = False
        repo_paths = set()
        for root_config in roots.values():
            portdir = root_config.settings.get("PORTDIR")
            if portdir:
                repo_paths.add(portdir)
            overlays = root_config.settings.get("PORTDIR_OVERLAY")
            if overlays:
                repo_paths.update(shlex_split(overlays))
        repo_paths = list(repo_paths)
        self._repo_paths = repo_paths
        self._repo_paths_real = [ os.path.realpath(repo_path) \
         for repo_path in repo_paths ]

        # pre-allocate index for PORTDIR so that it always has index 0.
        for root_config in roots.values():
            portdb = root_config.trees["porttree"].dbapi
            portdir = portdb.porttree_root
            if portdir:
                self.repoStr(portdir)
Example #31
0
	def run(self):
		config_protect = shlex_split(settings.get("CONFIG_PROTECT", ""))
		self.chk_updated_cfg_files(settings["EROOT"], config_protect)
def ExtractKernelVersion(base_dir):
    """
	Try to figure out what kernel version we are running
	@param base_dir: Path to sources (usually /usr/src/linux)
	@type base_dir: string
	@rtype: tuple( version[string], error[string])
	@return:
	1. tuple( version[string], error[string])
	Either version or error is populated (but never both)

	"""
    lines = []
    pathname = os.path.join(base_dir, 'Makefile')
    try:
        f = io.open(_unicode_encode(pathname,
                                    encoding=_encodings['fs'],
                                    errors='strict'),
                    mode='r',
                    encoding=_encodings['content'],
                    errors='replace')
    except OSError as details:
        return (None, str(details))
    except IOError as details:
        return (None, str(details))

    try:
        for i in range(4):
            lines.append(f.readline())
    except OSError as details:
        return (None, str(details))
    except IOError as details:
        return (None, str(details))
    finally:
        f.close()

    lines = [l.strip() for l in lines]

    version = ''

    #XXX: The following code relies on the ordering of vars within the Makefile
    for line in lines:
        # split on the '=' then remove annoying whitespace
        items = line.split("=")
        items = [i.strip() for i in items]
        if items[0] == 'VERSION' or \
         items[0] == 'PATCHLEVEL':
            version += items[1]
            version += "."
        elif items[0] == 'SUBLEVEL':
            version += items[1]
        elif items[0] == 'EXTRAVERSION' and \
         items[-1] != items[0]:
            version += items[1]

    # Grab a list of files named localversion* and sort them
    localversions = os.listdir(base_dir)
    for x in range(len(localversions) - 1, -1, -1):
        if localversions[x][:12] != "localversion":
            del localversions[x]
    localversions.sort()

    # Append the contents of each to the version string, stripping ALL whitespace
    for lv in localversions:
        version += "".join(" ".join(grabfile(base_dir + "/" + lv)).split())

    # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
    loader = KeyValuePairFileLoader(os.path.join(base_dir, ".config"), None)
    kernelconfig, loader_errors = loader.load()
    if loader_errors:
        for file_path, file_errors in loader_errors.items():
            for error_str in file_errors:
                writemsg_level("%s: %s\n" % (file_path, error_str),
                               level=logging.ERROR,
                               noiselevel=-1)

    if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
        version += "".join(shlex_split(kernelconfig["CONFIG_LOCALVERSION"]))

    return (version, None)
Example #33
0
def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
	root = trees._running_eroot
	mysettings = trees[root]["vartree"].settings
	portdb = trees[root]["porttree"].dbapi
	vardb = trees[root]["vartree"].dbapi
	bindb = trees[root]["bintree"].dbapi

	world_file = os.path.join(mysettings['EROOT'], WORLD_FILE)
	world_list = grabfile(world_file)
	world_modified = False
	world_warnings = set()
	updpath_map = {}
	# Maps repo_name to list of updates. If a given repo has no updates
	# directory, it will be omitted. If a repo has an updates directory
	# but none need to be applied (according to timestamp logic), the
	# value in the dict will be an empty list.
	repo_map = {}
	timestamps = {}

	retupd = False
	update_notice_printed = False
	for repo_name in portdb.getRepositories():
		repo = portdb.getRepositoryPath(repo_name)
		updpath = os.path.join(repo, "profiles", "updates")
		if not os.path.isdir(updpath):
			continue

		if updpath in updpath_map:
			repo_map[repo_name] = updpath_map[updpath]
			continue

		try:
			if if_mtime_changed:
				update_data = grab_updates(updpath, prev_mtimes=prev_mtimes)
			else:
				update_data = grab_updates(updpath)
		except DirectoryNotFound:
			continue
		myupd = []
		updpath_map[updpath] = myupd
		repo_map[repo_name] = myupd
		if len(update_data) > 0:
			for mykey, mystat, mycontent in update_data:
				if not update_notice_printed:
					update_notice_printed = True
					writemsg_stdout("\n")
					if quiet:
						writemsg_stdout(colorize("GOOD",
							_("Performing Global Updates\n")))
						writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
					else:
						writemsg_stdout(colorize("GOOD",
							_("Performing Global Updates:\n")))
						writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
						writemsg_stdout(_("  %s='update pass'  %s='binary update'  "
							"%s='/var/db update'  %s='/var/db move'\n"
							"  %s='/var/db SLOT move'  %s='binary move'  "
							"%s='binary SLOT move'\n  %s='update /etc/portage/package.*'\n") % \
							(bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
				valid_updates, errors = parse_updates(mycontent)
				myupd.extend(valid_updates)
				if not quiet:
					writemsg_stdout(bold(mykey))
					writemsg_stdout(len(valid_updates) * "." + "\n")
				if len(errors) == 0:
					# Update our internal mtime since we
					# processed all of our directives.
					timestamps[mykey] = mystat[stat.ST_MTIME]
				else:
					for msg in errors:
						writemsg("%s\n" % msg, noiselevel=-1)
			if myupd:
				retupd = True

	if retupd:
		if os.access(bindb.bintree.pkgdir, os.W_OK):
			# Call binarytree.populate(), since we want to make sure it's
			# only populated with local packages here (getbinpkgs=0).
			bindb.bintree.populate()
		else:
			bindb = None

	master_repo = portdb.getRepositoryName(portdb.porttree_root)
	if master_repo in repo_map:
		repo_map['DEFAULT'] = repo_map[master_repo]

	for repo_name, myupd in repo_map.items():
			if repo_name == 'DEFAULT':
				continue
			if not myupd:
				continue

			def repo_match(repository):
				return repository == repo_name or \
					(repo_name == master_repo and repository not in repo_map)

			def _world_repo_match(atoma, atomb):
				"""
				Check whether to perform a world change from atoma to atomb.
				If best vardb match for atoma comes from the same repository
				as the update file, allow that. Additionally, if portdb still
				can find a match for old atom name, warn about that.
				"""
				matches = vardb.match(atoma)
				if not matches:
					matches = vardb.match(atomb)
				if matches and \
					repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
					if portdb.match(atoma):
						world_warnings.add((atoma, atomb))
					return True
				else:
					return False

			for update_cmd in myupd:
				for pos, atom in enumerate(world_list):
					new_atom = update_dbentry(update_cmd, atom)
					if atom != new_atom:
						if _world_repo_match(atom, new_atom):
							world_list[pos] = new_atom
							world_modified = True

			for update_cmd in myupd:
				if update_cmd[0] == "move":
					moves = vardb.move_ent(update_cmd, repo_match=repo_match)
					if moves:
						writemsg_stdout(moves * "@")
					if bindb:
						moves = bindb.move_ent(update_cmd, repo_match=repo_match)
						if moves:
							writemsg_stdout(moves * "%")
				elif update_cmd[0] == "slotmove":
					moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
					if moves:
						writemsg_stdout(moves * "s")
					if bindb:
						moves = bindb.move_slot_ent(update_cmd, repo_match=repo_match)
						if moves:
							writemsg_stdout(moves * "S")

	if world_modified:
		world_list.sort()
		write_atomic(world_file,
			"".join("%s\n" % (x,) for x in world_list))
		if world_warnings:
			# XXX: print warning that we've updated world entries
			# and the old name still matches something (from an overlay)?
			pass

	if retupd:

			def _config_repo_match(repo_name, atoma, atomb):
				"""
				Check whether to perform a world change from atoma to atomb.
				If best vardb match for atoma comes from the same repository
				as the update file, allow that. Additionally, if portdb still
				can find a match for old atom name, warn about that.
				"""
				matches = vardb.match(atoma)
				if not matches:
					matches = vardb.match(atomb)
					if not matches:
						return False
				repository = vardb.aux_get(best(matches), ['repository'])[0]
				return repository == repo_name or \
					(repo_name == master_repo and repository not in repo_map)

			update_config_files(root,
				shlex_split(mysettings.get("CONFIG_PROTECT", "")),
				shlex_split(mysettings.get("CONFIG_PROTECT_MASK", "")),
				repo_map, match_callback=_config_repo_match)

			# The above global updates proceed quickly, so they
			# are considered a single mtimedb transaction.
			if timestamps:
				# We do not update the mtime in the mtimedb
				# until after _all_ of the above updates have
				# been processed because the mtimedb will
				# automatically commit when killed by ctrl C.
				for mykey, mtime in timestamps.items():
					prev_mtimes[mykey] = mtime

			do_upgrade_packagesmessage = False
			# We gotta do the brute force updates for these now.
			if True:
				def onUpdate(maxval, curval):
					if curval > 0:
						writemsg_stdout("#")
				if quiet:
					onUpdate = None
				vardb.update_ents(repo_map, onUpdate=onUpdate)
				if bindb:
					def onUpdate(maxval, curval):
						if curval > 0:
							writemsg_stdout("*")
					if quiet:
						onUpdate = None
					bindb.update_ents(repo_map, onUpdate=onUpdate)
			else:
				do_upgrade_packagesmessage = 1

			# Update progress above is indicated by characters written to stdout so
			# we print a couple new lines here to separate the progress output from
			# what follows.
			writemsg_stdout("\n\n")

			if do_upgrade_packagesmessage and bindb and \
				bindb.cpv_all():
				writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
				writemsg_stdout(bold(_("Note: This can take a very long time.")))
				writemsg_stdout("\n")

	return retupd
Example #34
0
	def update(self):
		''' Update existing git repository, and ignore the syncuri. We are
		going to trust the user and assume that the user is in the branch
		that he/she wants updated. We'll let the user manage branches with
		git directly.
		'''
		if not self.has_bin:
			return (1, False)
		git_cmd_opts = ""
		quiet = self.settings.get("PORTAGE_QUIET") == "1"
		if self.repo.module_specific_options.get('sync-git-env'):
			shlexed_env = shlex_split(self.repo.module_specific_options['sync-git-env'])
			env = dict((k, v) for k, _, v in (assignment.partition('=') for assignment in shlexed_env) if k)
			self.spawn_kwargs['env'].update(env)

		if self.repo.module_specific_options.get('sync-git-pull-env'):
			shlexed_env = shlex_split(self.repo.module_specific_options['sync-git-pull-env'])
			pull_env = dict((k, v) for k, _, v in (assignment.partition('=') for assignment in shlexed_env) if k)
			self.spawn_kwargs['env'].update(pull_env)

		if self.settings.get("PORTAGE_QUIET") == "1":
			git_cmd_opts += " --quiet"
		if self.repo.module_specific_options.get('sync-git-pull-extra-opts'):
			git_cmd_opts += " %s" % self.repo.module_specific_options['sync-git-pull-extra-opts']

		try:
			remote_branch = portage._unicode_decode(
				subprocess.check_output([self.bin_command, 'rev-parse',
				'--abbrev-ref', '--symbolic-full-name', '@{upstream}'],
				cwd=portage._unicode_encode(self.repo.location))).rstrip('\n')
		except subprocess.CalledProcessError as e:
			msg = "!!! git rev-parse error in %s" % self.repo.location
			self.logger(self.xterm_titles, msg)
			writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
			return (e.returncode, False)

		shallow = self.repo.sync_depth is not None and self.repo.sync_depth != 0
		if shallow:
			git_cmd_opts += " --depth %d" % self.repo.sync_depth

			# For shallow fetch, unreachable objects may need to be pruned
			# manually, in order to prevent automatic git gc calls from
			# eventually failing (see bug 599008).
			gc_cmd = ['git', '-c', 'gc.autodetach=false', 'gc', '--auto']
			if quiet:
				gc_cmd.append('--quiet')
			exitcode = subprocess.call(gc_cmd,
				cwd=portage._unicode_encode(self.repo.location))
			if exitcode != os.EX_OK:
				msg = "!!! git gc error in %s" % self.repo.location
				self.logger(self.xterm_titles, msg)
				writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
				return (exitcode, False)

		git_cmd = "%s fetch %s%s" % (self.bin_command,
			remote_branch.partition('/')[0], git_cmd_opts)

		writemsg_level(git_cmd + "\n")

		rev_cmd = [self.bin_command, "rev-list", "--max-count=1", "HEAD"]
		previous_rev = subprocess.check_output(rev_cmd,
			cwd=portage._unicode_encode(self.repo.location))

		exitcode = portage.process.spawn_bash("cd %s ; exec %s" % (
				portage._shell_quote(self.repo.location), git_cmd),
			**self.spawn_kwargs)

		if exitcode != os.EX_OK:
			msg = "!!! git fetch error in %s" % self.repo.location
			self.logger(self.xterm_titles, msg)
			writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
			return (exitcode, False)

		if not self.verify_head(revision='refs/remotes/%s' % remote_branch):
			return (1, False)

		if shallow:
			# Since the default merge strategy typically fails when
			# the depth is not unlimited, `git reset --merge`.
			merge_cmd = [self.bin_command, 'reset', '--merge']
		else:
			merge_cmd = [self.bin_command, 'merge']
		merge_cmd.append('refs/remotes/%s' % remote_branch)
		if quiet:
			merge_cmd.append('--quiet')
		exitcode = subprocess.call(merge_cmd,
			cwd=portage._unicode_encode(self.repo.location))

		if exitcode != os.EX_OK:
			msg = "!!! git merge error in %s" % self.repo.location
			self.logger(self.xterm_titles, msg)
			writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
			return (exitcode, False)

		current_rev = subprocess.check_output(rev_cmd,
			cwd=portage._unicode_encode(self.repo.location))

		return (os.EX_OK, current_rev != previous_rev)
Example #35
0
def fetch(myuris, mysettings, listonly=0, fetchonly=0,
	locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
	allow_missing_digests=True, force=False):
	"""
	Fetch files to DISTDIR and also verify digests if they are available.

	@param myuris: Maps each file name to a tuple of available fetch URIs.
	@type myuris: dict
	@param mysettings: Portage config instance.
	@type mysettings: portage.config
	@param listonly: Only print URIs and do not actually fetch them.
	@type listonly: bool
	@param fetchonly: Do not block for files that are locked by a
		concurrent fetcher process. This means that the function can
		return successfully *before* all files have been successfully
		fetched!
	@type fetchonly: bool
	@param use_locks: Enable locks. This parameter is ineffective if
		FEATURES=distlocks is disabled in the portage config!
	@type use_locks: bool
	@param digests: Maps each file name to a dict of digest types and values.
	@type digests: dict
	@param allow_missing_digests: Enable fetch even if there are no digests
		available for verification.
	@type allow_missing_digests: bool
	@param force: Force download, even when a file already exists in
		DISTDIR. This is most useful when there are no digests available,
		since otherwise download will be automatically forced if the
		existing file does not match the available digests. Also, this
		avoids the need to remove the existing file in advance, which
		makes it possible to atomically replace the file and avoid
		interference with concurrent processes.
	@type force: bool
	@rtype: int
	@return: 1 if successful, 0 otherwise.
	"""

	if force and digests:
		# Since the force parameter can trigger unnecessary fetch when the
		# digests match, do not allow force=True when digests are provided.
		raise PortageException(_('fetch: force=True is not allowed when digests are provided'))

	if not myuris:
		return 1

	features = mysettings.features
	restrict = mysettings.get("PORTAGE_RESTRICT","").split()
	userfetch = portage.data.secpass >= 2 and "userfetch" in features

	# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
	restrict_mirror = "mirror" in restrict or "nomirror" in restrict
	if restrict_mirror:
		if ("mirror" in features) and ("lmirror" not in features):
			# lmirror should allow you to bypass mirror restrictions.
			# XXX: This is not a good thing, and is temporary at best.
			print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
			return 1

	# Generally, downloading the same file repeatedly from
	# every single available mirror is a waste of bandwidth
	# and time, so there needs to be a cap.
	checksum_failure_max_tries = 5
	v = checksum_failure_max_tries
	try:
		v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
			checksum_failure_max_tries))
	except (ValueError, OverflowError):
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains non-integer value: '%s'\n") % \
			mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	if v < 1:
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains value less than 1: '%s'\n") % v, noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	checksum_failure_max_tries = v
	del v

	fetch_resume_size_default = "350K"
	fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
	if fetch_resume_size is not None:
		fetch_resume_size = "".join(fetch_resume_size.split())
		if not fetch_resume_size:
			# If it's undefined or empty, silently use the default.
			fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
		if match is None or \
			(match.group(2).upper() not in _size_suffix_map):
			writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
				" contains an unrecognized format: '%s'\n") % \
				mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
			writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
				"default value: %s\n") % fetch_resume_size_default,
				noiselevel=-1)
			fetch_resume_size = None
	if fetch_resume_size is None:
		fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
	fetch_resume_size = int(match.group(1)) * \
		2 ** _size_suffix_map[match.group(2).upper()]

	# Behave like the package has RESTRICT="primaryuri" after a
	# couple of checksum failures, to increase the probablility
	# of success before checksum_failure_max_tries is reached.
	checksum_failure_primaryuri = 2
	thirdpartymirrors = mysettings.thirdpartymirrors()

	# In the background parallel-fetch process, it's safe to skip checksum
	# verification of pre-existing files in $DISTDIR that have the correct
	# file size. The parent process will verify their checksums prior to
	# the unpack phase.

	parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
	if parallel_fetchonly:
		fetchonly = 1

	check_config_instance(mysettings)

	custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
		CUSTOM_MIRRORS_FILE), recursive=1)

	mymirrors=[]

	if listonly or ("distlocks" not in features):
		use_locks = 0

	distdir_writable = os.access(mysettings["DISTDIR"], os.W_OK)
	fetch_to_ro = 0
	if "skiprocheck" in features:
		fetch_to_ro = 1

	if not distdir_writable and fetch_to_ro:
		if use_locks:
			writemsg(colorize("BAD",
				_("!!! For fetching to a read-only filesystem, "
				"locking should be turned off.\n")), noiselevel=-1)
			writemsg(_("!!! This can be done by adding -distlocks to "
				"FEATURES in /etc/portage/make.conf\n"), noiselevel=-1)
#			use_locks = 0

	# local mirrors are always added
	if try_mirrors and "local" in custommirrors:
		mymirrors += custommirrors["local"]

	if restrict_mirror:
		# We don't add any mirrors.
		pass
	else:
		if try_mirrors:
			mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]

	hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
	if hash_filter.transparent:
		hash_filter = None
	skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
	if skip_manifest:
		allow_missing_digests = True
	pkgdir = mysettings.get("O")
	if digests is None and not (pkgdir is None or skip_manifest):
		mydigests = mysettings.repositories.get_repo_for_location(
			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
			pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
	elif digests is None or skip_manifest:
		# no digests because fetch was not called for a specific package
		mydigests = {}
	else:
		mydigests = digests

	ro_distdirs = [x for x in \
		shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
		if os.path.isdir(x)]

	fsmirrors = []
	for x in range(len(mymirrors)-1,-1,-1):
		if mymirrors[x] and mymirrors[x][0]=='/':
			fsmirrors += [mymirrors[x]]
			del mymirrors[x]

	restrict_fetch = "fetch" in restrict
	force_mirror = "force-mirror" in features and not restrict_mirror
	custom_local_mirrors = custommirrors.get("local", [])
	if restrict_fetch:
		# With fetch restriction, a normal uri may only be fetched from
		# custom local mirrors (if available).  A mirror:// uri may also
		# be fetched from specific mirrors (effectively overriding fetch
		# restriction, but only for specific mirrors).
		locations = custom_local_mirrors
	else:
		locations = mymirrors

	file_uri_tuples = []
	# Check for 'items' attribute since OrderedDict is not a dict.
	if hasattr(myuris, 'items'):
		for myfile, uri_set in myuris.items():
			for myuri in uri_set:
				file_uri_tuples.append((myfile, myuri))
			if not uri_set:
				file_uri_tuples.append((myfile, None))
	else:
		for myuri in myuris:
			if urlparse(myuri).scheme:
				file_uri_tuples.append((os.path.basename(myuri), myuri))
			else:
				file_uri_tuples.append((os.path.basename(myuri), None))

	filedict = OrderedDict()
	primaryuri_dict = {}
	thirdpartymirror_uris = {}
	for myfile, myuri in file_uri_tuples:
		if myfile not in filedict:
			filedict[myfile]=[]
			if distdir_writable:
				mirror_cache = os.path.join(mysettings["DISTDIR"],
						".mirror-cache.json")
			else:
				mirror_cache = None
			for l in locations:
				filedict[myfile].append(functools.partial(
					get_mirror_url, l, myfile, mysettings, mirror_cache))
		if myuri is None:
			continue
		if myuri[:9]=="mirror://":
			eidx = myuri.find("/", 9)
			if eidx != -1:
				mirrorname = myuri[9:eidx]
				path = myuri[eidx+1:]

				# Try user-defined mirrors first
				if mirrorname in custommirrors:
					for cmirr in custommirrors[mirrorname]:
						filedict[myfile].append(
							cmirr.rstrip("/") + "/" + path)

				# now try the official mirrors
				if mirrorname in thirdpartymirrors:
					uris = [locmirr.rstrip("/") + "/" + path \
						for locmirr in thirdpartymirrors[mirrorname]]
					random.shuffle(uris)
					filedict[myfile].extend(uris)
					thirdpartymirror_uris.setdefault(myfile, []).extend(uris)

				if mirrorname not in custommirrors and \
					mirrorname not in thirdpartymirrors:
					writemsg(_("!!! No known mirror by the name: %s\n") % (mirrorname))
			else:
				writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
				writemsg("  %s\n" % (myuri), noiselevel=-1)
		else:
			if restrict_fetch or force_mirror:
				# Only fetch from specific mirrors is allowed.
				continue
			primaryuris = primaryuri_dict.get(myfile)
			if primaryuris is None:
				primaryuris = []
				primaryuri_dict[myfile] = primaryuris
			primaryuris.append(myuri)

	# Order primaryuri_dict values to match that in SRC_URI.
	for uris in primaryuri_dict.values():
		uris.reverse()

	# Prefer thirdpartymirrors over normal mirrors in cases when
	# the file does not yet exist on the normal mirrors.
	for myfile, uris in thirdpartymirror_uris.items():
		primaryuri_dict.setdefault(myfile, []).extend(uris)

	# Now merge primaryuri values into filedict (includes mirrors
	# explicitly referenced in SRC_URI).
	if "primaryuri" in restrict:
		for myfile, uris in filedict.items():
			filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
	else:
		for myfile in filedict:
			filedict[myfile] += primaryuri_dict.get(myfile, [])

	can_fetch=True

	if listonly:
		can_fetch = False

	if can_fetch and not fetch_to_ro:
		try:
			_ensure_distdir(mysettings, mysettings["DISTDIR"])
		except PortageException as e:
			if not os.path.isdir(mysettings["DISTDIR"]):
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
				writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)

	if can_fetch and \
		not fetch_to_ro and \
		not os.access(mysettings["DISTDIR"], os.W_OK):
		writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
			noiselevel=-1)
		can_fetch = False

	distdir_writable = can_fetch and not fetch_to_ro
	failed_files = set()
	restrict_fetch_msg = False
	valid_hashes = set(get_valid_checksum_keys())
	valid_hashes.discard("size")

	for myfile in filedict:
		"""
		fetched  status
		0        nonexistent
		1        partially downloaded
		2        completely downloaded
		"""
		fetched = 0

		orig_digests = mydigests.get(myfile, {})

		if not (allow_missing_digests or listonly):
			verifiable_hash_types = set(orig_digests).intersection(valid_hashes)
			if not verifiable_hash_types:
				expected = " ".join(sorted(valid_hashes))
				got = set(orig_digests)
				got.discard("size")
				got = " ".join(sorted(got))
				reason = (_("Insufficient data for checksum verification"),
					got, expected)
				writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
					noiselevel=-1)
				writemsg(_("!!! Reason: %s\n") % reason[0],
					noiselevel=-1)
				writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
					(reason[1], reason[2]), noiselevel=-1)

				if fetchonly:
					failed_files.add(myfile)
					continue
				else:
					return 0

		size = orig_digests.get("size")
		if size == 0:
			# Zero-byte distfiles are always invalid, so discard their digests.
			del mydigests[myfile]
			orig_digests.clear()
			size = None
		pruned_digests = orig_digests
		if parallel_fetchonly:
			pruned_digests = {}
			if size is not None:
				pruned_digests["size"] = size

		myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
		download_path = myfile_path if fetch_to_ro else myfile_path + _download_suffix
		has_space = True
		has_space_superuser = True
		file_lock = None
		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		else:
			# check if there is enough space in DISTDIR to completely store myfile
			# overestimate the filesize so we aren't bitten by FS overhead
			vfs_stat = None
			if size is not None and hasattr(os, "statvfs"):
				try:
					vfs_stat = os.statvfs(mysettings["DISTDIR"])
				except OSError as e:
					writemsg_level("!!! statvfs('%s'): %s\n" %
						(mysettings["DISTDIR"], e),
						noiselevel=-1, level=logging.ERROR)
					del e

			if vfs_stat is not None:
				try:
					mysize = os.stat(myfile_path).st_size
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
					mysize = 0
				if (size - mysize + vfs_stat.f_bsize) >= \
					(vfs_stat.f_bsize * vfs_stat.f_bavail):

					if (size - mysize + vfs_stat.f_bsize) >= \
						(vfs_stat.f_bsize * vfs_stat.f_bfree):
						has_space_superuser = False

					if not has_space_superuser:
						has_space = False
					elif portage.data.secpass < 2:
						has_space = False
					elif userfetch:
						has_space = False

			if distdir_writable and use_locks:

				lock_kwargs = {}
				if fetchonly:
					lock_kwargs["flags"] = os.O_NONBLOCK

				try:
					file_lock = lockfile(myfile_path,
						wantnewlockfile=1, **lock_kwargs)
				except TryAgain:
					writemsg(_(">>> File '%s' is already locked by "
						"another fetcher. Continuing...\n") % myfile,
						noiselevel=-1)
					continue
		try:
			if not listonly:

				eout = EOutput()
				eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
				match, mystat = _check_distfile(
					myfile_path, pruned_digests, eout, hash_filter=hash_filter)
				if match and not force:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if distdir_writable and not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e
					continue

				# Remove broken symlinks or symlinks to files which
				# _check_distfile did not match above.
				if distdir_writable and mystat is None or os.path.islink(myfile_path):
					try:
						os.unlink(myfile_path)
					except OSError as e:
						if e.errno not in (errno.ENOENT, errno.ESTALE):
							raise
					mystat = None

				if mystat is not None:
					if stat.S_ISDIR(mystat.st_mode):
						writemsg_level(
							_("!!! Unable to fetch file since "
							"a directory is in the way: \n"
							"!!!   %s\n") % myfile_path,
							level=logging.ERROR, noiselevel=-1)
						return 0

					if distdir_writable and not force:
						# Since _check_distfile did not match above, the file
						# is either corrupt or its identity has changed since
						# the last time it was fetched, so rename it.
						temp_filename = _checksum_failure_temp_file(
							mysettings, mysettings["DISTDIR"], myfile)
						writemsg_stdout(_("Refetching... "
							"File renamed to '%s'\n\n") % \
							temp_filename, noiselevel=-1)

				# Stat the temporary download file for comparison with
				# fetch_resume_size.
				try:
					mystat = os.stat(download_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					mystat = None

				if mystat is not None:
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(download_path)
							except OSError:
								pass
					elif distdir_writable and size is not None:
						if mystat.st_size < fetch_resume_size and \
							mystat.st_size < size:
							# If the file already exists and the size does not
							# match the existing digests, it may be that the
							# user is attempting to update the digest. In this
							# case, the digestgen() function will advise the
							# user to use `ebuild --force foo.ebuild manifest`
							# in order to force the old digests to be replaced.
							# Since the user may want to keep this file, rename
							# it instead of deleting it.
							writemsg(_(">>> Renaming distfile with size "
								"%d (smaller than " "PORTAGE_FETCH_RESU"
								"ME_MIN_SIZE)\n") % mystat.st_size)
							temp_filename = \
								_checksum_failure_temp_file(
									mysettings, mysettings["DISTDIR"],
									os.path.basename(download_path))
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)
						elif mystat.st_size >= size:
							temp_filename = \
								_checksum_failure_temp_file(
									mysettings, mysettings["DISTDIR"],
									os.path.basename(download_path))
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)

				if distdir_writable and ro_distdirs:
					readonly_file = None
					for x in ro_distdirs:
						filename = os.path.join(x, myfile)
						match, mystat = _check_distfile(
							filename, pruned_digests, eout, hash_filter=hash_filter)
						if match:
							readonly_file = filename
							break
					if readonly_file is not None:
						try:
							os.unlink(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
						os.symlink(readonly_file, myfile_path)
						continue

				# this message is shown only after we know that
				# the file is not already fetched
				if not has_space:
					writemsg(_("!!! Insufficient space to store %s in %s\n") % \
						(myfile, mysettings["DISTDIR"]), noiselevel=-1)

					if has_space_superuser:
						writemsg(_("!!! Insufficient privileges to use "
							"remaining space.\n"), noiselevel=-1)
						if userfetch:
							writemsg(_("!!! You may set FEATURES=\"-userfetch\""
								" in /etc/portage/make.conf in order to fetch with\n"
								"!!! superuser privileges.\n"), noiselevel=-1)

				if fsmirrors and not os.path.exists(myfile_path) and has_space:
					for mydir in fsmirrors:
						mirror_file = os.path.join(mydir, myfile)
						try:
							shutil.copyfile(mirror_file, download_path)
							writemsg(_("Local mirror has file: %s\n") % myfile)
							break
						except (IOError, OSError) as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e

				try:
					mystat = os.stat(download_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
				else:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if not os.path.islink(download_path):
						try:
							apply_secpass_permissions(download_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(download_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % (e,), noiselevel=-1)

					# If the file is empty then it's obviously invalid. Remove
					# the empty file and try to download if possible.
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(download_path)
							except EnvironmentError:
								pass
					elif not orig_digests:
						# We don't have a digest, but the file exists.  We must
						# assume that it is fully downloaded.
						if not force:
							continue
					else:
						if (mydigests[myfile].get("size") is not None
								and mystat.st_size < mydigests[myfile]["size"]
								and not restrict_fetch):
							fetched = 1 # Try to resume this download.
						elif parallel_fetchonly and \
							mystat.st_size == mydigests[myfile]["size"]:
							eout = EOutput()
							eout.quiet = \
								mysettings.get("PORTAGE_QUIET") == "1"
							eout.ebegin(
								"%s size ;-)" % (myfile, ))
							eout.eend(0)
							continue
						else:
							digests = _filter_unaccelarated_hashes(mydigests[myfile])
							if hash_filter is not None:
								digests = _apply_hash_filter(digests, hash_filter)
							verified_ok, reason = verify_all(download_path, digests)
							if not verified_ok:
								writemsg(_("!!! Previously fetched"
									" file: '%s'\n") % myfile, noiselevel=-1)
								writemsg(_("!!! Reason: %s\n") % reason[0],
									noiselevel=-1)
								writemsg(_("!!! Got:      %s\n"
									"!!! Expected: %s\n") % \
									(reason[1], reason[2]), noiselevel=-1)
								if reason[0] == _("Insufficient data for checksum verification"):
									return 0
								if distdir_writable:
									temp_filename = \
										_checksum_failure_temp_file(
											mysettings, mysettings["DISTDIR"],
											os.path.basename(download_path))
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
							else:
								if not fetch_to_ro:
									_movefile(download_path, myfile_path, mysettings=mysettings)
								eout = EOutput()
								eout.quiet = \
									mysettings.get("PORTAGE_QUIET", None) == "1"
								if digests:
									digests = list(digests)
									digests.sort()
									eout.ebegin(
										"%s %s ;-)" % (myfile, " ".join(digests)))
									eout.eend(0)
								continue # fetch any remaining files

			# Create a reversed list since that is optimal for list.pop().
			uri_list = filedict[myfile][:]
			uri_list.reverse()
			checksum_failure_count = 0
			tried_locations = set()
			while uri_list:
				loc = uri_list.pop()
				if isinstance(loc, functools.partial):
					loc = loc()
				# Eliminate duplicates here in case we've switched to
				# "primaryuri" mode on the fly due to a checksum failure.
				if loc in tried_locations:
					continue
				tried_locations.add(loc)
				if listonly:
					writemsg_stdout(loc+" ", noiselevel=-1)
					continue
				# allow different fetchcommands per protocol
				protocol = loc[0:loc.find("://")]

				global_config_path = GLOBAL_CONFIG_PATH
				if portage.const.EPREFIX:
					global_config_path = os.path.join(portage.const.EPREFIX,
							GLOBAL_CONFIG_PATH.lstrip(os.sep))

				missing_file_param = False
				fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
				fetchcommand = mysettings.get(fetchcommand_var)
				if fetchcommand is None:
					fetchcommand_var = "FETCHCOMMAND"
					fetchcommand = mysettings.get(fetchcommand_var)
					if fetchcommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (fetchcommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in fetchcommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % fetchcommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
				resumecommand = mysettings.get(resumecommand_var)
				if resumecommand is None:
					resumecommand_var = "RESUMECOMMAND"
					resumecommand = mysettings.get(resumecommand_var)
					if resumecommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (resumecommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in resumecommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % resumecommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				if missing_file_param:
					writemsg_level(
						_("!!! Refer to the make.conf(5) man page for "
						"information about how to\n!!! correctly specify "
						"FETCHCOMMAND and RESUMECOMMAND.\n"),
						level=logging.ERROR, noiselevel=-1)
					if myfile != os.path.basename(loc):
						return 0

				if not can_fetch:
					if fetched != 2:
						try:
							mysize = os.stat(download_path).st_size
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							mysize = 0

						if mysize == 0:
							writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
								noiselevel=-1)
						elif size is None or size > mysize:
							writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
								noiselevel=-1)
						else:
							writemsg(_("!!! File %s is incorrect size, "
								"but unable to retry.\n") % myfile, noiselevel=-1)
						return 0
					else:
						continue

				if fetched != 2 and has_space:
					#we either need to resume or start the download
					if fetched == 1:
						try:
							mystat = os.stat(download_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:
							if distdir_writable and mystat.st_size < fetch_resume_size:
								writemsg(_(">>> Deleting distfile with size "
									"%d (smaller than " "PORTAGE_FETCH_RESU"
									"ME_MIN_SIZE)\n") % mystat.st_size)
								try:
									os.unlink(download_path)
								except OSError as e:
									if e.errno not in \
										(errno.ENOENT, errno.ESTALE):
										raise
									del e
								fetched = 0
					if fetched == 1:
						#resume mode:
						writemsg(_(">>> Resuming download...\n"))
						locfetch=resumecommand
						command_var = resumecommand_var
					else:
						#normal mode:
						locfetch=fetchcommand
						command_var = fetchcommand_var
					writemsg_stdout(_(">>> Downloading '%s'\n") % \
						_hide_url_passwd(loc))
					variables = {
						"URI":     loc,
						"FILE":    os.path.basename(download_path)
					}

					for k in ("DISTDIR", "PORTAGE_SSH_OPTS"):
						v = mysettings.get(k)
						if v is not None:
							variables[k] = v

					myfetch = shlex_split(locfetch)
					myfetch = [varexpand(x, mydict=variables) for x in myfetch]
					myret = -1
					try:

						myret = _spawn_fetch(mysettings, myfetch)

					finally:
						try:
							apply_secpass_permissions(download_path,
								gid=portage_gid, mode=0o664, mask=0o2)
						except FileNotFound:
							pass
						except PortageException as e:
							if not os.access(download_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e

					# If the file is empty then it's obviously invalid.  Don't
					# trust the return value from the fetcher.  Remove the
					# empty file and try to download again.
					try:
						mystat = os.lstat(download_path)
						if mystat.st_size == 0 or (stat.S_ISLNK(mystat.st_mode) and not os.path.exists(download_path)):
							os.unlink(download_path)
							fetched = 0
							continue
					except EnvironmentError:
						pass

					if mydigests is not None and myfile in mydigests:
						try:
							mystat = os.stat(download_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:

							if stat.S_ISDIR(mystat.st_mode):
								# This can happen if FETCHCOMMAND erroneously
								# contains wget's -P option where it should
								# instead have -O.
								writemsg_level(
									_("!!! The command specified in the "
									"%s variable appears to have\n!!! "
									"created a directory instead of a "
									"normal file.\n") % command_var,
									level=logging.ERROR, noiselevel=-1)
								writemsg_level(
									_("!!! Refer to the make.conf(5) "
									"man page for information about how "
									"to\n!!! correctly specify "
									"FETCHCOMMAND and RESUMECOMMAND.\n"),
									level=logging.ERROR, noiselevel=-1)
								return 0

							# no exception?  file exists. let digestcheck() report
							# an appropriately for size or checksum errors

							# If the fetcher reported success and the file is
							# too small, it's probably because the digest is
							# bad (upstream changed the distfile).  In this
							# case we don't want to attempt to resume. Show a
							# digest verification failure to that the user gets
							# a clue about what just happened.
							if myret != os.EX_OK and \
								mystat.st_size < mydigests[myfile]["size"]:
								# Fetch failed... Try the next one... Kill 404 files though.
								if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
									html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
									with io.open(
										_unicode_encode(download_path,
										encoding=_encodings['fs'], errors='strict'),
										mode='r', encoding=_encodings['content'], errors='replace'
										) as f:
										if html404.search(f.read()):
											try:
												os.unlink(download_path)
												writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
												fetched = 0
												continue
											except (IOError, OSError):
												pass
								fetched = 1
								continue
							if True:
								# File is the correct size--check the checksums for the fetched
								# file NOW, for those users who don't have a stable/continuous
								# net connection. This way we have a chance to try to download
								# from another mirror...
								digests = _filter_unaccelarated_hashes(mydigests[myfile])
								if hash_filter is not None:
									digests = _apply_hash_filter(digests, hash_filter)
								verified_ok, reason = verify_all(download_path, digests)
								if not verified_ok:
									writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
										noiselevel=-1)
									writemsg(_("!!! Reason: %s\n") % reason[0],
										noiselevel=-1)
									writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
										(reason[1], reason[2]), noiselevel=-1)
									if reason[0] == _("Insufficient data for checksum verification"):
										return 0
									if distdir_writable:
										temp_filename = \
											_checksum_failure_temp_file(
												mysettings, mysettings["DISTDIR"],
												os.path.basename(download_path))
										writemsg_stdout(_("Refetching... "
											"File renamed to '%s'\n\n") % \
											temp_filename, noiselevel=-1)
									fetched=0
									checksum_failure_count += 1
									if checksum_failure_count == \
										checksum_failure_primaryuri:
										# Switch to "primaryuri" mode in order
										# to increase the probablility of
										# of success.
										primaryuris = \
											primaryuri_dict.get(myfile)
										if primaryuris:
											uri_list.extend(
												reversed(primaryuris))
									if checksum_failure_count >= \
										checksum_failure_max_tries:
										break
								else:
									if not fetch_to_ro:
										_movefile(download_path, myfile_path, mysettings=mysettings)
									eout = EOutput()
									eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
									if digests:
										eout.ebegin("%s %s ;-)" % \
											(myfile, " ".join(sorted(digests))))
										eout.eend(0)
									fetched=2
									break
					else: # no digests available
						if not myret:
							if not fetch_to_ro:
								_movefile(download_path, myfile_path, mysettings=mysettings)
							fetched=2
							break
						elif mydigests!=None:
							writemsg(_("No digest file available and download failed.\n\n"),
								noiselevel=-1)
		finally:
			if use_locks and file_lock:
				unlockfile(file_lock)
				file_lock = None

		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		if fetched != 2:
			if restrict_fetch and not restrict_fetch_msg:
				restrict_fetch_msg = True
				msg = _("\n!!! %s/%s"
					" has fetch restriction turned on.\n"
					"!!! This probably means that this "
					"ebuild's files must be downloaded\n"
					"!!! manually.  See the comments in"
					" the ebuild for more information.\n\n") % \
					(mysettings["CATEGORY"], mysettings["PF"])
				writemsg_level(msg,
					level=logging.ERROR, noiselevel=-1)
			elif restrict_fetch:
				pass
			elif listonly:
				pass
			elif not filedict[myfile]:
				writemsg(_("Warning: No mirrors available for file"
					" '%s'\n") % (myfile), noiselevel=-1)
			else:
				writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
					noiselevel=-1)

			if listonly:
				failed_files.add(myfile)
				continue
			elif fetchonly:
				failed_files.add(myfile)
				continue
			return 0
	if failed_files:
		return 0
	return 1
Example #36
0
def fetch(myuris, mysettings, listonly=0, fetchonly=0,
	locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
	allow_missing_digests=True):
	"fetch files.  Will use digest file if available."

	if not myuris:
		return 1

	features = mysettings.features
	restrict = mysettings.get("PORTAGE_RESTRICT","").split()

	userfetch = secpass >= 2 and "userfetch" in features
	userpriv = secpass >= 2 and "userpriv" in features

	# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
	restrict_mirror = "mirror" in restrict or "nomirror" in restrict
	if restrict_mirror:
		if ("mirror" in features) and ("lmirror" not in features):
			# lmirror should allow you to bypass mirror restrictions.
			# XXX: This is not a good thing, and is temporary at best.
			print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
			return 1

	# Generally, downloading the same file repeatedly from
	# every single available mirror is a waste of bandwidth
	# and time, so there needs to be a cap.
	checksum_failure_max_tries = 5
	v = checksum_failure_max_tries
	try:
		v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
			checksum_failure_max_tries))
	except (ValueError, OverflowError):
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains non-integer value: '%s'\n") % \
			mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	if v < 1:
		writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
			" contains value less than 1: '%s'\n") % v, noiselevel=-1)
		writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
			"default value: %s\n") % checksum_failure_max_tries,
			noiselevel=-1)
		v = checksum_failure_max_tries
	checksum_failure_max_tries = v
	del v

	fetch_resume_size_default = "350K"
	fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
	if fetch_resume_size is not None:
		fetch_resume_size = "".join(fetch_resume_size.split())
		if not fetch_resume_size:
			# If it's undefined or empty, silently use the default.
			fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
		if match is None or \
			(match.group(2).upper() not in _size_suffix_map):
			writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
				" contains an unrecognized format: '%s'\n") % \
				mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
			writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
				"default value: %s\n") % fetch_resume_size_default,
				noiselevel=-1)
			fetch_resume_size = None
	if fetch_resume_size is None:
		fetch_resume_size = fetch_resume_size_default
		match = _fetch_resume_size_re.match(fetch_resume_size)
	fetch_resume_size = int(match.group(1)) * \
		2 ** _size_suffix_map[match.group(2).upper()]

	# Behave like the package has RESTRICT="primaryuri" after a
	# couple of checksum failures, to increase the probablility
	# of success before checksum_failure_max_tries is reached.
	checksum_failure_primaryuri = 2
	thirdpartymirrors = mysettings.thirdpartymirrors()

	# In the background parallel-fetch process, it's safe to skip checksum
	# verification of pre-existing files in $DISTDIR that have the correct
	# file size. The parent process will verify their checksums prior to
	# the unpack phase.

	parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
	if parallel_fetchonly:
		fetchonly = 1

	check_config_instance(mysettings)

	custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
		CUSTOM_MIRRORS_FILE), recursive=1)

	mymirrors=[]

	if listonly or ("distlocks" not in features):
		use_locks = 0

	fetch_to_ro = 0
	if "skiprocheck" in features:
		fetch_to_ro = 1

	if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
		if use_locks:
			writemsg(colorize("BAD",
				_("!!! For fetching to a read-only filesystem, "
				"locking should be turned off.\n")), noiselevel=-1)
			writemsg(_("!!! This can be done by adding -distlocks to "
				"FEATURES in /etc/make.conf\n"), noiselevel=-1)
#			use_locks = 0

	# local mirrors are always added
	if "local" in custommirrors:
		mymirrors += custommirrors["local"]

	if restrict_mirror:
		# We don't add any mirrors.
		pass
	else:
		if try_mirrors:
			mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]

	hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
	if hash_filter.transparent:
		hash_filter = None
	skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
	if skip_manifest:
		allow_missing_digests = True
	pkgdir = mysettings.get("O")
	if digests is None and not (pkgdir is None or skip_manifest):
		mydigests = mysettings.repositories.get_repo_for_location(
			os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
			pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
	elif digests is None or skip_manifest:
		# no digests because fetch was not called for a specific package
		mydigests = {}
	else:
		mydigests = digests

	ro_distdirs = [x for x in \
		shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
		if os.path.isdir(x)]

	fsmirrors = []
	for x in range(len(mymirrors)-1,-1,-1):
		if mymirrors[x] and mymirrors[x][0]=='/':
			fsmirrors += [mymirrors[x]]
			del mymirrors[x]

	restrict_fetch = "fetch" in restrict
	force_mirror = "force-mirror" in features and not restrict_mirror
	custom_local_mirrors = custommirrors.get("local", [])
	if restrict_fetch:
		# With fetch restriction, a normal uri may only be fetched from
		# custom local mirrors (if available).  A mirror:// uri may also
		# be fetched from specific mirrors (effectively overriding fetch
		# restriction, but only for specific mirrors).
		locations = custom_local_mirrors
	else:
		locations = mymirrors

	file_uri_tuples = []
	# Check for 'items' attribute since OrderedDict is not a dict.
	if hasattr(myuris, 'items'):
		for myfile, uri_set in myuris.items():
			for myuri in uri_set:
				file_uri_tuples.append((myfile, myuri))
	else:
		for myuri in myuris:
			file_uri_tuples.append((os.path.basename(myuri), myuri))

	filedict = OrderedDict()
	primaryuri_dict = {}
	thirdpartymirror_uris = {}
	for myfile, myuri in file_uri_tuples:
		if myfile not in filedict:
			filedict[myfile]=[]
			for y in range(0,len(locations)):
				filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
		if myuri[:9]=="mirror://":
			eidx = myuri.find("/", 9)
			if eidx != -1:
				mirrorname = myuri[9:eidx]
				path = myuri[eidx+1:]

				# Try user-defined mirrors first
				if mirrorname in custommirrors:
					for cmirr in custommirrors[mirrorname]:
						filedict[myfile].append(
							cmirr.rstrip("/") + "/" + path)

				# now try the official mirrors
				if mirrorname in thirdpartymirrors:
					uris = [locmirr.rstrip("/") + "/" + path \
						for locmirr in thirdpartymirrors[mirrorname]]
					random.shuffle(uris)
					filedict[myfile].extend(uris)
					thirdpartymirror_uris.setdefault(myfile, []).extend(uris)

				if not filedict[myfile]:
					writemsg(_("No known mirror by the name: %s\n") % (mirrorname))
			else:
				writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
				writemsg("  %s\n" % (myuri), noiselevel=-1)
		else:
			if restrict_fetch or force_mirror:
				# Only fetch from specific mirrors is allowed.
				continue
			primaryuris = primaryuri_dict.get(myfile)
			if primaryuris is None:
				primaryuris = []
				primaryuri_dict[myfile] = primaryuris
			primaryuris.append(myuri)

	# Order primaryuri_dict values to match that in SRC_URI.
	for uris in primaryuri_dict.values():
		uris.reverse()

	# Prefer thirdpartymirrors over normal mirrors in cases when
	# the file does not yet exist on the normal mirrors.
	for myfile, uris in thirdpartymirror_uris.items():
		primaryuri_dict.setdefault(myfile, []).extend(uris)

	# Now merge primaryuri values into filedict (includes mirrors
	# explicitly referenced in SRC_URI).
	if "primaryuri" in restrict:
		for myfile, uris in filedict.items():
			filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
	else:
		for myfile in filedict:
			filedict[myfile] += primaryuri_dict.get(myfile, [])

	can_fetch=True

	if listonly:
		can_fetch = False

	if can_fetch and not fetch_to_ro:
		global _userpriv_test_write_file_cache
		dirmode  = 0o070
		filemode =   0o60
		modemask =    0o2
		dir_gid = portage_gid
		if "FAKED_MODE" in mysettings:
			# When inside fakeroot, directories with portage's gid appear
			# to have root's gid. Therefore, use root's gid instead of
			# portage's gid to avoid spurrious permissions adjustments
			# when inside fakeroot.
			dir_gid = 0
		distdir_dirs = [""]
		try:
			
			for x in distdir_dirs:
				mydir = os.path.join(mysettings["DISTDIR"], x)
				write_test_file = os.path.join(
					mydir, ".__portage_test_write__")

				try:
					st = os.stat(mydir)
				except OSError:
					st = None

				if st is not None and stat.S_ISDIR(st.st_mode):
					if not (userfetch or userpriv):
						continue
					if _userpriv_test_write_file(mysettings, write_test_file):
						continue

				_userpriv_test_write_file_cache.pop(write_test_file, None)
				if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
					if st is None:
						# The directory has just been created
						# and therefore it must be empty.
						continue
					writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
						noiselevel=-1)
					def onerror(e):
						raise # bail out on the first error that occurs during recursion
					if not apply_recursive_permissions(mydir,
						gid=dir_gid, dirmode=dirmode, dirmask=modemask,
						filemode=filemode, filemask=modemask, onerror=onerror):
						raise OperationNotPermitted(
							_("Failed to apply recursive permissions for the portage group."))
		except PortageException as e:
			if not os.path.isdir(mysettings["DISTDIR"]):
				writemsg("!!! %s\n" % str(e), noiselevel=-1)
				writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
				writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)

	if can_fetch and \
		not fetch_to_ro and \
		not os.access(mysettings["DISTDIR"], os.W_OK):
		writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
			noiselevel=-1)
		can_fetch = False

	distdir_writable = can_fetch and not fetch_to_ro
	failed_files = set()
	restrict_fetch_msg = False

	for myfile in filedict:
		"""
		fetched  status
		0        nonexistent
		1        partially downloaded
		2        completely downloaded
		"""
		fetched = 0

		orig_digests = mydigests.get(myfile, {})

		if not (allow_missing_digests or listonly):
			verifiable_hash_types = set(orig_digests).intersection(hashfunc_map)
			verifiable_hash_types.discard("size")
			if not verifiable_hash_types:
				expected = set(hashfunc_map)
				expected.discard("size")
				expected = " ".join(sorted(expected))
				got = set(orig_digests)
				got.discard("size")
				got = " ".join(sorted(got))
				reason = (_("Insufficient data for checksum verification"),
					got, expected)
				writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
					noiselevel=-1)
				writemsg(_("!!! Reason: %s\n") % reason[0],
					noiselevel=-1)
				writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
					(reason[1], reason[2]), noiselevel=-1)

				if fetchonly:
					failed_files.add(myfile)
					continue
				else:
					return 0

		size = orig_digests.get("size")
		if size == 0:
			# Zero-byte distfiles are always invalid, so discard their digests.
			del mydigests[myfile]
			orig_digests.clear()
			size = None
		pruned_digests = orig_digests
		if parallel_fetchonly:
			pruned_digests = {}
			if size is not None:
				pruned_digests["size"] = size

		myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
		has_space = True
		has_space_superuser = True
		file_lock = None
		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		else:
			# check if there is enough space in DISTDIR to completely store myfile
			# overestimate the filesize so we aren't bitten by FS overhead
			vfs_stat = None
			if size is not None and hasattr(os, "statvfs"):
				try:
					vfs_stat = os.statvfs(mysettings["DISTDIR"])
				except OSError as e:
					writemsg_level("!!! statvfs('%s'): %s\n" %
						(mysettings["DISTDIR"], e),
						noiselevel=-1, level=logging.ERROR)
					del e

			if vfs_stat is not None:
				try:
					mysize = os.stat(myfile_path).st_size
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
					mysize = 0
				if (size - mysize + vfs_stat.f_bsize) >= \
					(vfs_stat.f_bsize * vfs_stat.f_bavail):

					if (size - mysize + vfs_stat.f_bsize) >= \
						(vfs_stat.f_bsize * vfs_stat.f_bfree):
						has_space_superuser = False

					if not has_space_superuser:
						has_space = False
					elif secpass < 2:
						has_space = False
					elif userfetch:
						has_space = False

			if distdir_writable and use_locks:

				lock_kwargs = {}
				if fetchonly:
					lock_kwargs["flags"] = os.O_NONBLOCK

				try:
					file_lock = lockfile(myfile_path,
						wantnewlockfile=1, **lock_kwargs)
				except TryAgain:
					writemsg(_(">>> File '%s' is already locked by "
						"another fetcher. Continuing...\n") % myfile,
						noiselevel=-1)
					continue
		try:
			if not listonly:

				eout = EOutput()
				eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
				match, mystat = _check_distfile(
					myfile_path, pruned_digests, eout, hash_filter=hash_filter)
				if match:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if distdir_writable and not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e
					continue

				if distdir_writable and mystat is None:
					# Remove broken symlinks if necessary.
					try:
						os.unlink(myfile_path)
					except OSError:
						pass

				if mystat is not None:
					if stat.S_ISDIR(mystat.st_mode):
						writemsg_level(
							_("!!! Unable to fetch file since "
							"a directory is in the way: \n"
							"!!!   %s\n") % myfile_path,
							level=logging.ERROR, noiselevel=-1)
						return 0

					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(myfile_path)
							except OSError:
								pass
					elif distdir_writable:
						if mystat.st_size < fetch_resume_size and \
							mystat.st_size < size:
							# If the file already exists and the size does not
							# match the existing digests, it may be that the
							# user is attempting to update the digest. In this
							# case, the digestgen() function will advise the
							# user to use `ebuild --force foo.ebuild manifest`
							# in order to force the old digests to be replaced.
							# Since the user may want to keep this file, rename
							# it instead of deleting it.
							writemsg(_(">>> Renaming distfile with size "
								"%d (smaller than " "PORTAGE_FETCH_RESU"
								"ME_MIN_SIZE)\n") % mystat.st_size)
							temp_filename = \
								_checksum_failure_temp_file(
								mysettings["DISTDIR"], myfile)
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)
						elif mystat.st_size >= size:
							temp_filename = \
								_checksum_failure_temp_file(
								mysettings["DISTDIR"], myfile)
							writemsg_stdout(_("Refetching... "
								"File renamed to '%s'\n\n") % \
								temp_filename, noiselevel=-1)

				if distdir_writable and ro_distdirs:
					readonly_file = None
					for x in ro_distdirs:
						filename = os.path.join(x, myfile)
						match, mystat = _check_distfile(
							filename, pruned_digests, eout, hash_filter=hash_filter)
						if match:
							readonly_file = filename
							break
					if readonly_file is not None:
						try:
							os.unlink(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
						os.symlink(readonly_file, myfile_path)
						continue

				# this message is shown only after we know that
				# the file is not already fetched
				if not has_space:
					writemsg(_("!!! Insufficient space to store %s in %s\n") % \
						(myfile, mysettings["DISTDIR"]), noiselevel=-1)

					if has_space_superuser:
						writemsg(_("!!! Insufficient privileges to use "
							"remaining space.\n"), noiselevel=-1)
						if userfetch:
							writemsg(_("!!! You may set FEATURES=\"-userfetch\""
								" in /etc/make.conf in order to fetch with\n"
								"!!! superuser privileges.\n"), noiselevel=-1)

				if fsmirrors and not os.path.exists(myfile_path) and has_space:
					for mydir in fsmirrors:
						mirror_file = os.path.join(mydir, myfile)
						try:
							shutil.copyfile(mirror_file, myfile_path)
							writemsg(_("Local mirror has file: %s\n") % myfile)
							break
						except (IOError, OSError) as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e

				try:
					mystat = os.stat(myfile_path)
				except OSError as e:
					if e.errno not in (errno.ENOENT, errno.ESTALE):
						raise
					del e
				else:
					# Skip permission adjustment for symlinks, since we don't
					# want to modify anything outside of the primary DISTDIR,
					# and symlinks typically point to PORTAGE_RO_DISTDIRS.
					if not os.path.islink(myfile_path):
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2,
								stat_cached=mystat)
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % (e,), noiselevel=-1)

					# If the file is empty then it's obviously invalid. Remove
					# the empty file and try to download if possible.
					if mystat.st_size == 0:
						if distdir_writable:
							try:
								os.unlink(myfile_path)
							except EnvironmentError:
								pass
					elif myfile not in mydigests:
						# We don't have a digest, but the file exists.  We must
						# assume that it is fully downloaded.
						continue
					else:
						if mystat.st_size < mydigests[myfile]["size"] and \
							not restrict_fetch:
							fetched = 1 # Try to resume this download.
						elif parallel_fetchonly and \
							mystat.st_size == mydigests[myfile]["size"]:
							eout = EOutput()
							eout.quiet = \
								mysettings.get("PORTAGE_QUIET") == "1"
							eout.ebegin(
								"%s size ;-)" % (myfile, ))
							eout.eend(0)
							continue
						else:
							digests = _filter_unaccelarated_hashes(mydigests[myfile])
							if hash_filter is not None:
								digests = _apply_hash_filter(digests, hash_filter)
							verified_ok, reason = verify_all(myfile_path, digests)
							if not verified_ok:
								writemsg(_("!!! Previously fetched"
									" file: '%s'\n") % myfile, noiselevel=-1)
								writemsg(_("!!! Reason: %s\n") % reason[0],
									noiselevel=-1)
								writemsg(_("!!! Got:      %s\n"
									"!!! Expected: %s\n") % \
									(reason[1], reason[2]), noiselevel=-1)
								if reason[0] == _("Insufficient data for checksum verification"):
									return 0
								if distdir_writable:
									temp_filename = \
										_checksum_failure_temp_file(
										mysettings["DISTDIR"], myfile)
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
							else:
								eout = EOutput()
								eout.quiet = \
									mysettings.get("PORTAGE_QUIET", None) == "1"
								if digests:
									digests = list(digests)
									digests.sort()
									eout.ebegin(
										"%s %s ;-)" % (myfile, " ".join(digests)))
									eout.eend(0)
								continue # fetch any remaining files

			# Create a reversed list since that is optimal for list.pop().
			uri_list = filedict[myfile][:]
			uri_list.reverse()
			checksum_failure_count = 0
			tried_locations = set()
			while uri_list:
				loc = uri_list.pop()
				# Eliminate duplicates here in case we've switched to
				# "primaryuri" mode on the fly due to a checksum failure.
				if loc in tried_locations:
					continue
				tried_locations.add(loc)
				if listonly:
					writemsg_stdout(loc+" ", noiselevel=-1)
					continue
				# allow different fetchcommands per protocol
				protocol = loc[0:loc.find("://")]

				global_config_path = GLOBAL_CONFIG_PATH
				if mysettings['EPREFIX']:
					global_config_path = os.path.join(mysettings['EPREFIX'],
							GLOBAL_CONFIG_PATH.lstrip(os.sep))

				missing_file_param = False
				fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
				fetchcommand = mysettings.get(fetchcommand_var)
				if fetchcommand is None:
					fetchcommand_var = "FETCHCOMMAND"
					fetchcommand = mysettings.get(fetchcommand_var)
					if fetchcommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (fetchcommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in fetchcommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % fetchcommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
				resumecommand = mysettings.get(resumecommand_var)
				if resumecommand is None:
					resumecommand_var = "RESUMECOMMAND"
					resumecommand = mysettings.get(resumecommand_var)
					if resumecommand is None:
						writemsg_level(
							_("!!! %s is unset. It should "
							"have been defined in\n!!! %s/make.globals.\n") \
							% (resumecommand_var, global_config_path),
							level=logging.ERROR, noiselevel=-1)
						return 0
				if "${FILE}" not in resumecommand:
					writemsg_level(
						_("!!! %s does not contain the required ${FILE}"
						" parameter.\n") % resumecommand_var,
						level=logging.ERROR, noiselevel=-1)
					missing_file_param = True

				if missing_file_param:
					writemsg_level(
						_("!!! Refer to the make.conf(5) man page for "
						"information about how to\n!!! correctly specify "
						"FETCHCOMMAND and RESUMECOMMAND.\n"),
						level=logging.ERROR, noiselevel=-1)
					if myfile != os.path.basename(loc):
						return 0

				if not can_fetch:
					if fetched != 2:
						try:
							mysize = os.stat(myfile_path).st_size
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							mysize = 0

						if mysize == 0:
							writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
								noiselevel=-1)
						elif size is None or size > mysize:
							writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
								noiselevel=-1)
						else:
							writemsg(_("!!! File %s is incorrect size, "
								"but unable to retry.\n") % myfile, noiselevel=-1)
						return 0
					else:
						continue

				if fetched != 2 and has_space:
					#we either need to resume or start the download
					if fetched == 1:
						try:
							mystat = os.stat(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:
							if mystat.st_size < fetch_resume_size:
								writemsg(_(">>> Deleting distfile with size "
									"%d (smaller than " "PORTAGE_FETCH_RESU"
									"ME_MIN_SIZE)\n") % mystat.st_size)
								try:
									os.unlink(myfile_path)
								except OSError as e:
									if e.errno not in \
										(errno.ENOENT, errno.ESTALE):
										raise
									del e
								fetched = 0
					if fetched == 1:
						#resume mode:
						writemsg(_(">>> Resuming download...\n"))
						locfetch=resumecommand
						command_var = resumecommand_var
					else:
						#normal mode:
						locfetch=fetchcommand
						command_var = fetchcommand_var
					writemsg_stdout(_(">>> Downloading '%s'\n") % \
						_hide_url_passwd(loc))
					variables = {
						"DISTDIR": mysettings["DISTDIR"],
						"URI":     loc,
						"FILE":    myfile
					}

					myfetch = shlex_split(locfetch)
					myfetch = [varexpand(x, mydict=variables) for x in myfetch]
					myret = -1
					try:

						myret = _spawn_fetch(mysettings, myfetch)

					finally:
						try:
							apply_secpass_permissions(myfile_path,
								gid=portage_gid, mode=0o664, mask=0o2)
						except FileNotFound:
							pass
						except PortageException as e:
							if not os.access(myfile_path, os.R_OK):
								writemsg(_("!!! Failed to adjust permissions:"
									" %s\n") % str(e), noiselevel=-1)
							del e

					# If the file is empty then it's obviously invalid.  Don't
					# trust the return value from the fetcher.  Remove the
					# empty file and try to download again.
					try:
						if os.stat(myfile_path).st_size == 0:
							os.unlink(myfile_path)
							fetched = 0
							continue
					except EnvironmentError:
						pass

					if mydigests is not None and myfile in mydigests:
						try:
							mystat = os.stat(myfile_path)
						except OSError as e:
							if e.errno not in (errno.ENOENT, errno.ESTALE):
								raise
							del e
							fetched = 0
						else:

							if stat.S_ISDIR(mystat.st_mode):
								# This can happen if FETCHCOMMAND erroneously
								# contains wget's -P option where it should
								# instead have -O.
								writemsg_level(
									_("!!! The command specified in the "
									"%s variable appears to have\n!!! "
									"created a directory instead of a "
									"normal file.\n") % command_var,
									level=logging.ERROR, noiselevel=-1)
								writemsg_level(
									_("!!! Refer to the make.conf(5) "
									"man page for information about how "
									"to\n!!! correctly specify "
									"FETCHCOMMAND and RESUMECOMMAND.\n"),
									level=logging.ERROR, noiselevel=-1)
								return 0

							# no exception?  file exists. let digestcheck() report
							# an appropriately for size or checksum errors

							# If the fetcher reported success and the file is
							# too small, it's probably because the digest is
							# bad (upstream changed the distfile).  In this
							# case we don't want to attempt to resume. Show a
							# digest verification failure to that the user gets
							# a clue about what just happened.
							if myret != os.EX_OK and \
								mystat.st_size < mydigests[myfile]["size"]:
								# Fetch failed... Try the next one... Kill 404 files though.
								if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
									html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
									with io.open(
										_unicode_encode(myfile_path,
										encoding=_encodings['fs'], errors='strict'),
										mode='r', encoding=_encodings['content'], errors='replace'
										) as f:
										if html404.search(f.read()):
											try:
												os.unlink(mysettings["DISTDIR"]+"/"+myfile)
												writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
												fetched = 0
												continue
											except (IOError, OSError):
												pass
								fetched = 1
								continue
							if True:
								# File is the correct size--check the checksums for the fetched
								# file NOW, for those users who don't have a stable/continuous
								# net connection. This way we have a chance to try to download
								# from another mirror...
								digests = _filter_unaccelarated_hashes(mydigests[myfile])
								if hash_filter is not None:
									digests = _apply_hash_filter(digests, hash_filter)
								verified_ok, reason = verify_all(myfile_path, digests)
								if not verified_ok:
									writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
										noiselevel=-1)
									writemsg(_("!!! Reason: %s\n") % reason[0],
										noiselevel=-1)
									writemsg(_("!!! Got:      %s\n!!! Expected: %s\n") % \
										(reason[1], reason[2]), noiselevel=-1)
									if reason[0] == _("Insufficient data for checksum verification"):
										return 0
									temp_filename = \
										_checksum_failure_temp_file(
										mysettings["DISTDIR"], myfile)
									writemsg_stdout(_("Refetching... "
										"File renamed to '%s'\n\n") % \
										temp_filename, noiselevel=-1)
									fetched=0
									checksum_failure_count += 1
									if checksum_failure_count == \
										checksum_failure_primaryuri:
										# Switch to "primaryuri" mode in order
										# to increase the probablility of
										# of success.
										primaryuris = \
											primaryuri_dict.get(myfile)
										if primaryuris:
											uri_list.extend(
												reversed(primaryuris))
									if checksum_failure_count >= \
										checksum_failure_max_tries:
										break
								else:
									eout = EOutput()
									eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
									if digests:
										eout.ebegin("%s %s ;-)" % \
											(myfile, " ".join(sorted(digests))))
										eout.eend(0)
									fetched=2
									break
					else:
						if not myret:
							fetched=2
							break
						elif mydigests!=None:
							writemsg(_("No digest file available and download failed.\n\n"),
								noiselevel=-1)
		finally:
			if use_locks and file_lock:
				unlockfile(file_lock)
				file_lock = None

		if listonly:
			writemsg_stdout("\n", noiselevel=-1)
		if fetched != 2:
			if restrict_fetch and not restrict_fetch_msg:
				restrict_fetch_msg = True
				msg = _("\n!!! %s/%s"
					" has fetch restriction turned on.\n"
					"!!! This probably means that this "
					"ebuild's files must be downloaded\n"
					"!!! manually.  See the comments in"
					" the ebuild for more information.\n\n") % \
					(mysettings["CATEGORY"], mysettings["PF"])
				writemsg_level(msg,
					level=logging.ERROR, noiselevel=-1)
			elif restrict_fetch:
				pass
			elif listonly:
				pass
			elif not filedict[myfile]:
				writemsg(_("Warning: No mirrors available for file"
					" '%s'\n") % (myfile), noiselevel=-1)
			else:
				writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
					noiselevel=-1)

			if listonly:
				failed_files.add(myfile)
				continue
			elif fetchonly:
				failed_files.add(myfile)
				continue
			return 0
	if failed_files:
		return 0
	return 1
Example #37
0
def ExtractKernelVersion(base_dir):
	"""
	Try to figure out what kernel version we are running
	@param base_dir: Path to sources (usually /usr/src/linux)
	@type base_dir: string
	@rtype: tuple( version[string], error[string])
	@return:
	1. tuple( version[string], error[string])
	Either version or error is populated (but never both)

	"""
	lines = []
	pathname = os.path.join(base_dir, 'Makefile')
	try:
		f = io.open(_unicode_encode(pathname,
			encoding=_encodings['fs'], errors='strict'), mode='r',
			encoding=_encodings['content'], errors='replace')
	except OSError as details:
		return (None, str(details))
	except IOError as details:
		return (None, str(details))

	try:
		for i in range(4):
			lines.append(f.readline())
	except OSError as details:
		return (None, str(details))
	except IOError as details:
		return (None, str(details))
	finally:
		f.close()

	lines = [l.strip() for l in lines]

	version = ''

	#XXX: The following code relies on the ordering of vars within the Makefile
	for line in lines:
		# split on the '=' then remove annoying whitespace
		items = line.split("=")
		items = [i.strip() for i in items]
		if items[0] == 'VERSION' or \
			items[0] == 'PATCHLEVEL':
			version += items[1]
			version += "."
		elif items[0] == 'SUBLEVEL':
			version += items[1]
		elif items[0] == 'EXTRAVERSION' and \
			items[-1] != items[0]:
			version += items[1]

	# Grab a list of files named localversion* and sort them
	localversions = os.listdir(base_dir)
	for x in range(len(localversions) - 1, -1, -1):
		if localversions[x][:12] != "localversion":
			del localversions[x]
	localversions.sort()

	# Append the contents of each to the version string, stripping ALL whitespace
	for lv in localversions:
		version += "".join(" ".join(grabfile(base_dir + "/" + lv)).split())

	# Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
	loader = KeyValuePairFileLoader(os.path.join(base_dir, ".config"), None)
	kernelconfig, loader_errors = loader.load()
	if loader_errors:
		for file_path, file_errors in loader_errors.items():
			for error_str in file_errors:
				writemsg_level("%s: %s\n" % (file_path, error_str),
					level=logging.ERROR, noiselevel=-1)

	if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
		version += "".join(shlex_split(kernelconfig["CONFIG_LOCALVERSION"]))

	return (version, None)
Example #38
0
    def _add_repositories(portdir, portdir_overlay, prepos, ignored_map,
                          local_config, default_portdir):
        """Add overlays in PORTDIR_OVERLAY as repositories"""
        overlays = []
        portdir_orig = None
        if portdir:
            portdir = normalize_path(portdir)
            portdir_orig = portdir
            overlays.append(portdir)
        try:
            port_ov = [normalize_path(i) for i in shlex_split(portdir_overlay)]
        except ValueError as e:
            #File "/usr/lib/python3.2/shlex.py", line 168, in read_token
            #	raise ValueError("No closing quotation")
            writemsg(_("!!! Invalid PORTDIR_OVERLAY:"
                       " %s: %s\n") % (e, portdir_overlay),
                     noiselevel=-1)
            port_ov = []
        overlays.extend(port_ov)
        default_repo_opts = {}
        if prepos['DEFAULT'].aliases is not None:
            default_repo_opts['aliases'] = \
             ' '.join(prepos['DEFAULT'].aliases)
        if prepos['DEFAULT'].eclass_overrides is not None:
            default_repo_opts['eclass-overrides'] = \
             ' '.join(prepos['DEFAULT'].eclass_overrides)
        if prepos['DEFAULT'].masters is not None:
            default_repo_opts['masters'] = \
             ' '.join(prepos['DEFAULT'].masters)

        if overlays:
            # We need a copy of the original repos.conf data, since we're
            # going to modify the prepos dict and some of the RepoConfig
            # objects that we put in prepos may have to be discarded if
            # they get overridden by a repository with the same name but
            # a different location. This is common with repoman, for example,
            # when temporarily overriding an rsync repo with another copy
            # of the same repo from CVS.
            repos_conf = prepos.copy()
            #overlay priority is negative because we want them to be looked before any other repo
            base_priority = 0
            for ov in overlays:
                # Ignore missing directory for 'gentoo' so that
                # first sync with emerge-webrsync is possible.
                if isdir_raise_eaccess(ov) or \
                 (base_priority == 0 and ov is portdir):
                    repo_opts = default_repo_opts.copy()
                    repo_opts['location'] = ov
                    repo = RepoConfig(None,
                                      repo_opts,
                                      local_config=local_config)
                    # repos_conf_opts contains options from repos.conf
                    repos_conf_opts = repos_conf.get(repo.name)
                    if repos_conf_opts is not None:
                        # Selectively copy only the attributes which
                        # repos.conf is allowed to override.
                        for k in ('aliases', 'auto_sync', 'eclass_overrides',
                                  'force', 'masters', 'priority', 'sync_depth',
                                  'sync_hooks_only_on_change', 'sync_type',
                                  'sync_umask', 'sync_uri', 'sync_user',
                                  'module_specific_options'):
                            v = getattr(repos_conf_opts, k, None)
                            if v is not None:
                                setattr(repo, k, v)

                    if repo.name in prepos:
                        # Silently ignore when PORTDIR overrides the location
                        # setting from the default repos.conf (bug #478544).
                        old_location = prepos[repo.name].location
                        if old_location is not None and \
                         old_location != repo.location and \
                         not (base_priority == 0 and
                         old_location == default_portdir):
                            ignored_map.setdefault(repo.name,
                                                   []).append(old_location)
                            if old_location == portdir:
                                portdir = repo.location

                    if repo.priority is None:
                        if base_priority == 0 and ov == portdir_orig:
                            # If it's the original PORTDIR setting and it's not
                            # in PORTDIR_OVERLAY, then it will be assigned a
                            # special priority setting later.
                            pass
                        else:
                            repo.priority = base_priority
                            base_priority += 1

                    prepos[repo.name] = repo
                else:

                    if not portage._sync_mode:
                        writemsg(
                            _("!!! Invalid PORTDIR_OVERLAY (not a dir): '%s'\n"
                              ) % ov,
                            noiselevel=-1)

        return portdir
Example #39
0
def parse_args(argv, repoman_default_opts):
    """Use a customized optionParser to parse command line arguments for repoman
    Args:
            argv - a sequence of command line arguments
    Returns:
            (opts, args), just like a call to parser.parse_args()
    """

    argv = portage._decode_argv(argv)

    modes = {
        "commit": "Run a scan then commit changes",
        "ci": "Run a scan then commit changes",
        "fix": "Fix simple QA issues (stray digests, missing digests)",
        "full": "Scan directory tree and print all issues (not a summary)",
        "help": "Show this screen",
        "manifest": "Generate a Manifest (fetches files if necessary)",
        "manifest-check": "Check Manifests for missing or incorrect digests",
        "scan": "Scan directory tree for QA issues",
    }

    output_choices = {
        "default": "The normal output format",
        "column": "Columnar output suitable for use with grep",
    }

    mode_keys = list(modes)
    mode_keys.sort()

    output_keys = sorted(output_choices)

    parser = argparse.ArgumentParser(
        usage="repoman [options] [mode]",
        description="Modes: %s" % " | ".join(mode_keys),
        epilog="For more help consult the man page.",
    )

    parser.add_argument(
        "-a",
        "--ask",
        dest="ask",
        action="store_true",
        default=False,
        help="Request a confirmation before commiting",
    )

    parser.add_argument(
        "-b",
        "--bug",
        dest="bug",
        action="append",
        metavar="<BUG-NO|BUG-URL>",
        default=[],
        help=(
            "Mention a Gentoo or upstream bug in the commit footer; "
            "takes either Gentoo bug number or full bug URL"
        ),
    )

    parser.add_argument(
        "-c",
        "--closes",
        dest="closes",
        action="append",
        metavar="<PR-NO|PR-URL>",
        default=[],
        help=(
            "Adds a Closes footer to close GitHub pull request (or compatible); "
            "takes either GitHub PR number or full PR URL"
        ),
    )

    parser.add_argument(
        "-m",
        "--commitmsg",
        dest="commitmsg",
        help="specify a commit message on the command line",
    )

    parser.add_argument(
        "-M",
        "--commitmsgfile",
        dest="commitmsgfile",
        help="specify a path to a file that contains a commit message",
    )

    parser.add_argument(
        "--digest",
        choices=("y", "n"),
        metavar="<y|n>",
        help="Automatically update Manifest digests for modified files",
    )

    parser.add_argument(
        "-p",
        "--pretend",
        dest="pretend",
        default=False,
        action="store_true",
        help="don't commit or fix anything; just show what would be done",
    )

    parser.add_argument(
        "-q",
        "--quiet",
        dest="quiet",
        action="count",
        default=0,
        help="do not print unnecessary messages",
    )

    parser.add_argument(
        "--echangelog",
        choices=("y", "n", "force"),
        metavar="<y|n|force>",
        help=(
            "for commit mode, call echangelog if ChangeLog is unmodified (or "
            "regardless of modification if 'force' is specified)"
        ),
    )

    parser.add_argument(
        "--experimental-inherit",
        choices=("y", "n"),
        metavar="<y|n>",
        default="n",
        help=(
            "Enable experimental inherit.missing checks which may misbehave"
            " when the internal eclass database becomes outdated"
        ),
    )

    parser.add_argument(
        "--experimental-repository-modules",
        choices=("y", "n"),
        metavar="<y|n>",
        default="n",
        help="Enable experimental repository modules",
    )

    parser.add_argument(
        "-f",
        "--force",
        dest="force",
        action="store_true",
        default=False,
        help="Commit with QA violations",
    )

    parser.add_argument(
        "-S",
        "--straight-to-stable",
        dest="straight_to_stable",
        default=False,
        action="store_true",
        help="Allow committing straight to stable",
    )

    parser.add_argument(
        "--vcs", dest="vcs", help="Force using specific VCS instead of autodetection"
    )

    parser.add_argument(
        "-v",
        "--verbose",
        dest="verbosity",
        action="count",
        help="be very verbose in output",
        default=0,
    )

    parser.add_argument(
        "-V", "--version", dest="version", action="store_true", help="show version info"
    )

    parser.add_argument(
        "-x",
        "--xmlparse",
        dest="xml_parse",
        action="store_true",
        default=False,
        help="forces the metadata.xml parse check to be carried out",
    )

    parser.add_argument(
        "--if-modified",
        choices=("y", "n"),
        default="n",
        metavar="<y|n>",
        help="only check packages that have uncommitted modifications",
    )

    parser.add_argument(
        "-i",
        "--ignore-arches",
        dest="ignore_arches",
        action="store_true",
        default=False,
        help="ignore arch-specific failures (where arch != host)",
    )

    parser.add_argument(
        "--ignore-default-opts",
        action="store_true",
        help="do not use the REPOMAN_DEFAULT_OPTS environment variable",
    )

    parser.add_argument(
        "-I",
        "--ignore-masked",
        dest="ignore_masked",
        action="store_true",
        default=False,
        help="ignore masked packages (not allowed with commit mode)",
    )

    parser.add_argument(
        "--include-arches",
        dest="include_arches",
        metavar="ARCHES",
        action="append",
        help=(
            "A space separated list of arches used to "
            "filter the selection of profiles for dependency checks"
        ),
    )

    parser.add_argument(
        "--include-profiles",
        dest="include_profiles",
        metavar="PROFILES",
        action="append",
        help=(
            "A space separated list of profiles used to "
            "define the selection of profiles for dependency checks"
        ),
    )

    parser.add_argument(
        "-d",
        "--include-dev",
        dest="include_dev",
        action="store_true",
        default=False,
        help="include dev profiles in dependency checks",
    )

    parser.add_argument(
        "-e",
        "--include-exp-profiles",
        choices=("y", "n"),
        metavar="<y|n>",
        default=False,
        help="include exp profiles in dependency checks",
    )

    parser.add_argument(
        "--unmatched-removal",
        dest="unmatched_removal",
        action="store_true",
        default=False,
        help=(
            "enable strict checking of package.mask and package.unmask files"
            " for unmatched removal atoms"
        ),
    )

    parser.add_argument(
        "--without-mask",
        dest="without_mask",
        action="store_true",
        default=False,
        help=(
            "behave as if no package.mask entries exist"
            " (not allowed with commit mode)"
        ),
    )

    parser.add_argument(
        "--output-style",
        dest="output_style",
        choices=output_keys,
        help="select output type",
        default="default",
    )

    parser.add_argument(
        "-j",
        "--jobs",
        dest="jobs",
        action="store",
        type=int,
        default=1,
        help="Specifies the number of jobs (processes) to run simultaneously.",
    )

    parser.add_argument(
        "-l",
        "--load-average",
        dest="load_average",
        action="store",
        type=float,
        default=None,
        help="Specifies that no new jobs (processes) should be started if there are others "
        "jobs running and the load average is at least load (a floating-point number).",
    )

    parser.add_argument(
        "--mode",
        dest="mode",
        choices=mode_keys,
        help="specify which mode repoman will run in (default=full)",
    )

    # Modes help is included earlier, in the parser description.
    parser.add_argument(
        "mode_positional",
        nargs="?",
        metavar="mode",
        choices=mode_keys,
        help=argparse.SUPPRESS,
    )

    opts = parser.parse_args(argv[1:])

    if not opts.ignore_default_opts:
        default_opts = util.shlex_split(repoman_default_opts)
        if default_opts:
            opts = parser.parse_args(default_opts + argv[1:])

    args = []
    if opts.mode is not None:
        args.append(opts.mode)
    if opts.mode_positional is not None:
        args.append(opts.mode_positional)

    if len(set(args)) > 1:
        parser.error("multiple modes specified: %s" % " ".join(args))

    opts.mode = args[0] if args else None

    if opts.mode == "help":
        parser.print_help()
        parser.exit()

    if not opts.mode:
        opts.mode = "full"

    if opts.mode == "ci":
        opts.mode = "commit"  # backwards compat shortcut

    # Use verbosity and quiet options to appropriately fiddle with the loglevel
    for val in range(opts.verbosity):
        logger = logging.getLogger()
        logger.setLevel(logger.getEffectiveLevel() - 10)

    for val in range(opts.quiet):
        logger = logging.getLogger()
        logger.setLevel(logger.getEffectiveLevel() + 10)

    if opts.mode == "commit" and opts.commitmsg:
        opts.commitmsg = _unicode_decode(opts.commitmsg)

    if opts.mode == "commit" and not (opts.force or opts.pretend):
        if opts.ignore_masked:
            opts.ignore_masked = False
            logging.warn("Commit mode automatically disables --ignore-masked")
        if opts.without_mask:
            opts.without_mask = False
            logging.warn("Commit mode automatically disables --without-mask")

    return (opts, args)
Example #40
0
    def _xpak_start(self):
        tar_options = ""
        if "xattr" in self.features:
            process = subprocess.Popen(["tar", "--help"],
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE)
            output = process.communicate()[0]
            if b"--xattrs" in output:
                tar_options = ["--xattrs", "--xattrs-include='*'"]
                for x in portage.util.shlex_split(
                        self.env.get("PORTAGE_XATTR_EXCLUDE", "")):
                    tar_options.append(
                        portage._shell_quote("--xattrs-exclude=%s" % x))
                tar_options = " ".join(tar_options)

        decomp = _compressors.get(compression_probe(self.pkg_path))
        if decomp is not None:
            decomp_cmd = decomp.get("decompress")
        elif tarfile.is_tarfile(
                portage._unicode_encode(self.pkg_path,
                                        encoding=portage._encodings["fs"],
                                        errors="strict")):
            decomp_cmd = "cat"
            decomp = {
                "compress": "cat",
                "package": "sys-apps/coreutils",
            }
        else:
            decomp_cmd = None
        if decomp_cmd is None:
            self.scheduler.output(
                "!!! %s\n" % _("File compression header unrecognized: %s") %
                self.pkg_path,
                log_path=self.logfile,
                background=self.background,
                level=logging.ERROR,
            )
            self.returncode = 1
            self._async_wait()
            return

        try:
            decompression_binary = shlex_split(
                varexpand(decomp_cmd, mydict=self.env))[0]
        except IndexError:
            decompression_binary = ""

        if find_binary(decompression_binary) is None:
            # Try alternative command if it exists
            if decomp.get("decompress_alt"):
                decomp_cmd = decomp.get("decompress_alt")
            try:
                decompression_binary = shlex_split(
                    varexpand(decomp_cmd, mydict=self.env))[0]
            except IndexError:
                decompression_binary = ""

            if find_binary(decompression_binary) is None:
                missing_package = decomp.get("package")
                self.scheduler.output(
                    "!!! %s\n" %
                    _("File compression unsupported %s.\n Command was: %s.\n Maybe missing package: %s"
                      ) % (
                          self.pkg_path,
                          varexpand(decomp_cmd, mydict=self.env),
                          missing_package,
                      ),
                    log_path=self.logfile,
                    background=self.background,
                    level=logging.ERROR,
                )
                self.returncode = 1
                self._async_wait()
                return

        pkg_xpak = portage.xpak.tbz2(self.pkg_path)
        pkg_xpak.scan()

        # SIGPIPE handling (128 + SIGPIPE) should be compatible with
        # assert_sigpipe_ok() that's used by the ebuild unpack() helper.
        self.args = [
            self._shell_binary,
            "-c",
            ("cmd0=(head -c %d -- %s) cmd1=(%s) cmd2=(tar -xp %s -C %s -f -); "
             + '"${cmd0[@]}" | "${cmd1[@]}" | "${cmd2[@]}"; ' +
             "p=(${PIPESTATUS[@]}) ; for i in {0..2}; do " +
             "if [[ ${p[$i]} != 0 && ${p[$i]} != %d ]] ; then " +
             'echo command $(eval "echo \\"\'\\${cmd$i[*]}\'\\"") ' +
             "failed with status ${p[$i]} ; exit ${p[$i]} ; fi ; done; " +
             "if [ ${p[$i]} != 0 ] ; then " +
             'echo command $(eval "echo \\"\'\\${cmd$i[*]}\'\\"") ' +
             "failed with status ${p[$i]} ; exit ${p[$i]} ; fi ; " +
             "exit 0 ;") % (
                 pkg_xpak.filestat.st_size - pkg_xpak.xpaksize,
                 portage._shell_quote(self.pkg_path),
                 decomp_cmd,
                 tar_options,
                 portage._shell_quote(self.image_dir),
                 128 + signal.SIGPIPE,
             ),
        ]

        SpawnProcess._start(self)
Example #41
0
def parse_args(argv, qahelp, repoman_default_opts):
    """Use a customized optionParser to parse command line arguments for repoman
	Args:
		argv - a sequence of command line arguments
		qahelp - a dict of qa warning to help message
	Returns:
		(opts, args), just like a call to parser.parse_args()
	"""

    argv = portage._decode_argv(argv)

    modes = {
        "commit": "Run a scan then commit changes",
        "ci": "Run a scan then commit changes",
        "fix": "Fix simple QA issues (stray digests, missing digests)",
        "full": "Scan directory tree and print all issues (not a summary)",
        "help": "Show this screen",
        "manifest": "Generate a Manifest (fetches files if necessary)",
        "manifest-check": "Check Manifests for missing or incorrect digests",
        "scan": "Scan directory tree for QA issues",
    }

    output_choices = {"default": "The normal output format", "column": "Columnar output suitable for use with grep"}

    mode_keys = list(modes)
    mode_keys.sort()

    output_keys = sorted(output_choices)

    parser = ArgumentParser(
        usage="repoman [options] [mode]",
        description="Modes: %s" % " | ".join(mode_keys),
        epilog="For more help consult the man page.",
    )

    parser.add_argument(
        "-a", "--ask", dest="ask", action="store_true", default=False, help="Request a confirmation before commiting"
    )

    parser.add_argument("-m", "--commitmsg", dest="commitmsg", help="specify a commit message on the command line")

    parser.add_argument(
        "-M", "--commitmsgfile", dest="commitmsgfile", help="specify a path to a file that contains a commit message"
    )

    parser.add_argument(
        "--digest", choices=("y", "n"), metavar="<y|n>", help="Automatically update Manifest digests for modified files"
    )

    parser.add_argument(
        "-p",
        "--pretend",
        dest="pretend",
        default=False,
        action="store_true",
        help="don't commit or fix anything; just show what would be done",
    )

    parser.add_argument(
        "-q", "--quiet", dest="quiet", action="count", default=0, help="do not print unnecessary messages"
    )

    parser.add_argument(
        "--echangelog",
        choices=("y", "n", "force"),
        metavar="<y|n|force>",
        help=(
            "for commit mode, call echangelog if ChangeLog is unmodified (or "
            "regardless of modification if 'force' is specified)"
        ),
    )

    parser.add_argument(
        "--experimental-inherit",
        choices=("y", "n"),
        metavar="<y|n>",
        default="n",
        help=(
            "Enable experimental inherit.missing checks which may misbehave"
            " when the internal eclass database becomes outdated"
        ),
    )

    parser.add_argument(
        "-f", "--force", dest="force", action="store_true", default=False, help="Commit with QA violations"
    )

    parser.add_argument(
        "-S",
        "--straight-to-stable",
        dest="straight_to_stable",
        default=False,
        action="store_true",
        help="Allow committing straight to stable",
    )

    parser.add_argument("--vcs", dest="vcs", help="Force using specific VCS instead of autodetection")

    parser.add_argument(
        "-v", "--verbose", dest="verbosity", action="count", help="be very verbose in output", default=0
    )

    parser.add_argument("-V", "--version", dest="version", action="store_true", help="show version info")

    parser.add_argument(
        "-x",
        "--xmlparse",
        dest="xml_parse",
        action="store_true",
        default=False,
        help="forces the metadata.xml parse check to be carried out",
    )

    parser.add_argument(
        "--if-modified",
        choices=("y", "n"),
        default="n",
        metavar="<y|n>",
        help="only check packages that have uncommitted modifications",
    )

    parser.add_argument(
        "-i",
        "--ignore-arches",
        dest="ignore_arches",
        action="store_true",
        default=False,
        help="ignore arch-specific failures (where arch != host)",
    )

    parser.add_argument(
        "--ignore-default-opts", action="store_true", help="do not use the REPOMAN_DEFAULT_OPTS environment variable"
    )

    parser.add_argument(
        "-I",
        "--ignore-masked",
        dest="ignore_masked",
        action="store_true",
        default=False,
        help="ignore masked packages (not allowed with commit mode)",
    )

    parser.add_argument(
        "--include-arches",
        dest="include_arches",
        metavar="ARCHES",
        action="append",
        help=("A space separated list of arches used to " "filter the selection of profiles for dependency checks"),
    )

    parser.add_argument(
        "-d",
        "--include-dev",
        dest="include_dev",
        action="store_true",
        default=False,
        help="include dev profiles in dependency checks",
    )

    parser.add_argument(
        "-e",
        "--include-exp-profiles",
        choices=("y", "n"),
        metavar="<y|n>",
        default=False,
        help="include exp profiles in dependency checks",
    )

    parser.add_argument(
        "--unmatched-removal",
        dest="unmatched_removal",
        action="store_true",
        default=False,
        help=("enable strict checking of package.mask and package.unmask files" " for unmatched removal atoms"),
    )

    parser.add_argument(
        "--without-mask",
        dest="without_mask",
        action="store_true",
        default=False,
        help=("behave as if no package.mask entries exist" " (not allowed with commit mode)"),
    )

    parser.add_argument(
        "--output-style", dest="output_style", choices=output_keys, help="select output type", default="default"
    )

    parser.add_argument(
        "--mode", dest="mode", choices=mode_keys, help="specify which mode repoman will run in (default=full)"
    )

    opts, args = parser.parse_known_args(argv[1:])

    if not opts.ignore_default_opts:
        default_opts = util.shlex_split(repoman_default_opts)
        if default_opts:
            opts, args = parser.parse_known_args(default_opts + sys.argv[1:])

    if opts.mode == "help":
        parser.print_help(short=False)

    for arg in args:
        if arg in modes:
            if not opts.mode:
                opts.mode = arg
                break
        else:
            parser.error("invalid mode: %s" % arg)

    if not opts.mode:
        opts.mode = "full"

    if opts.mode == "ci":
        opts.mode = "commit"  # backwards compat shortcut

        # Use verbosity and quiet options to appropriately fiddle with the loglevel
    for val in range(opts.verbosity):
        logger = logging.getLogger()
        logger.setLevel(logger.getEffectiveLevel() - 10)

    for val in range(opts.quiet):
        logger = logging.getLogger()
        logger.setLevel(logger.getEffectiveLevel() + 10)

    if opts.mode == "commit" and opts.commitmsg:
        opts.commitmsg = _unicode_decode(opts.commitmsg)

    if opts.mode == "commit" and not (opts.force or opts.pretend):
        if opts.ignore_masked:
            opts.ignore_masked = False
            logging.warn("Commit mode automatically disables --ignore-masked")
        if opts.without_mask:
            opts.without_mask = False
            logging.warn("Commit mode automatically disables --without-mask")

    return (opts, args)
Example #42
0
	def _add_repositories(portdir, portdir_overlay, prepos, ignored_map, ignored_location_map):
		"""Add overlays in PORTDIR_OVERLAY as repositories"""
		overlays = []
		if portdir:
			portdir = normalize_path(portdir)
			overlays.append(portdir)
		try:
			port_ov = [normalize_path(i) for i in shlex_split(portdir_overlay)]
		except ValueError as e:
			#File "/usr/lib/python3.2/shlex.py", line 168, in read_token
			#	raise ValueError("No closing quotation")
			writemsg(_("!!! Invalid PORTDIR_OVERLAY:"
				" %s: %s\n") % (e, portdir_overlay), noiselevel=-1)
			port_ov = []
		overlays.extend(port_ov)
		default_repo_opts = {}
		if prepos['DEFAULT'].aliases is not None:
			default_repo_opts['aliases'] = \
				' '.join(prepos['DEFAULT'].aliases)
		if prepos['DEFAULT'].eclass_overrides is not None:
			default_repo_opts['eclass-overrides'] = \
				' '.join(prepos['DEFAULT'].eclass_overrides)
		if prepos['DEFAULT'].masters is not None:
			default_repo_opts['masters'] = \
				' '.join(prepos['DEFAULT'].masters)

		if overlays:
			# We need a copy of the original repos.conf data, since we're
			# going to modify the prepos dict and some of the RepoConfig
			# objects that we put in prepos may have to be discarded if
			# they get overridden by a repository with the same name but
			# a different location. This is common with repoman, for example,
			# when temporarily overriding an rsync repo with another copy
			# of the same repo from CVS.
			repos_conf = prepos.copy()
			#overlay priority is negative because we want them to be looked before any other repo
			base_priority = 0
			for ov in overlays:
				if os.path.isdir(ov):
					repo_opts = default_repo_opts.copy()
					repo_opts['location'] = ov
					repo = RepoConfig(None, repo_opts)
					# repos_conf_opts contains options from repos.conf
					repos_conf_opts = repos_conf.get(repo.name)
					if repos_conf_opts is not None:
						# Selectively copy only the attributes which
						# repos.conf is allowed to override.
						for k in ('aliases', 'eclass_overrides', 'masters', 'priority'):
							v = getattr(repos_conf_opts, k, None)
							if v is not None:
								setattr(repo, k, v)

					if repo.name in prepos:
						old_location = prepos[repo.name].location
						if old_location is not None and old_location != repo.location:
							ignored_map.setdefault(repo.name, []).append(old_location)
							ignored_location_map[old_location] = repo.name
							if old_location == portdir:
								portdir = repo.user_location

					if ov == portdir and portdir not in port_ov:
						repo.priority = -1000
					elif repo.priority is None:
						repo.priority = base_priority
						base_priority += 1

					prepos[repo.name] = repo
				else:
					if not portage._sync_disabled_warnings:
						writemsg(_("!!! Invalid PORTDIR_OVERLAY (not a dir): '%s'\n") % ov, noiselevel=-1)

		return portdir
Example #43
0
	def _add_repositories(portdir, portdir_overlay, prepos, ignored_map, ignored_location_map):
		"""Add overlays in PORTDIR_OVERLAY as repositories"""
		overlays = []
		if portdir:
			portdir = normalize_path(portdir)
			overlays.append(portdir)
		try:
			port_ov = [normalize_path(i) for i in shlex_split(portdir_overlay)]
		except ValueError as e:
			#File "/usr/lib/python3.2/shlex.py", line 168, in read_token
			#	raise ValueError("No closing quotation")
			writemsg(_("!!! Invalid PORTDIR_OVERLAY:"
				" %s: %s\n") % (e, portdir_overlay), noiselevel=-1)
			port_ov = []
		overlays.extend(port_ov)
		default_repo_opts = {}
		if prepos['DEFAULT'].aliases is not None:
			default_repo_opts['aliases'] = \
				' '.join(prepos['DEFAULT'].aliases)
		if prepos['DEFAULT'].eclass_overrides is not None:
			default_repo_opts['eclass-overrides'] = \
				' '.join(prepos['DEFAULT'].eclass_overrides)
		if prepos['DEFAULT'].masters is not None:
			default_repo_opts['masters'] = \
				' '.join(prepos['DEFAULT'].masters)

		if overlays:
			# We need a copy of the original repos.conf data, since we're
			# going to modify the prepos dict and some of the RepoConfig
			# objects that we put in prepos may have to be discarded if
			# they get overridden by a repository with the same name but
			# a different location. This is common with repoman, for example,
			# when temporarily overriding an rsync repo with another copy
			# of the same repo from CVS.
			repos_conf = prepos.copy()
			#overlay priority is negative because we want them to be looked before any other repo
			base_priority = 0
			for ov in overlays:
				if os.path.isdir(ov):
					repo_opts = default_repo_opts.copy()
					repo_opts['location'] = ov
					repo = RepoConfig(None, repo_opts)
					# repos_conf_opts contains options from repos.conf
					repos_conf_opts = repos_conf.get(repo.name)
					if repos_conf_opts is not None:
						# Selectively copy only the attributes which
						# repos.conf is allowed to override.
						for k in ('aliases', 'eclass_overrides', 'masters', 'priority'):
							v = getattr(repos_conf_opts, k, None)
							if v is not None:
								setattr(repo, k, v)

					if repo.name in prepos:
						old_location = prepos[repo.name].location
						if old_location is not None and old_location != repo.location:
							ignored_map.setdefault(repo.name, []).append(old_location)
							ignored_location_map[old_location] = repo.name
							if old_location == portdir:
								portdir = repo.user_location

					if ov == portdir and portdir not in port_ov:
						repo.priority = -1000
					elif repo.priority is None:
						repo.priority = base_priority
						base_priority += 1

					prepos[repo.name] = repo
				else:
					writemsg(_("!!! Invalid PORTDIR_OVERLAY"
						" (not a dir): '%s'\n") % ov, noiselevel=-1)

		return portdir
Example #44
0
def parse_args(argv, repoman_default_opts):
    """Use a customized optionParser to parse command line arguments for repoman
	Args:
		argv - a sequence of command line arguments
	Returns:
		(opts, args), just like a call to parser.parse_args()
	"""

    argv = portage._decode_argv(argv)

    modes = {
        'commit': 'Run a scan then commit changes',
        'ci': 'Run a scan then commit changes',
        'fix': 'Fix simple QA issues (stray digests, missing digests)',
        'full': 'Scan directory tree and print all issues (not a summary)',
        'help': 'Show this screen',
        'manifest': 'Generate a Manifest (fetches files if necessary)',
        'manifest-check': 'Check Manifests for missing or incorrect digests',
        'scan': 'Scan directory tree for QA issues'
    }

    output_choices = {
        'default': 'The normal output format',
        'column': 'Columnar output suitable for use with grep'
    }

    mode_keys = list(modes)
    mode_keys.sort()

    output_keys = sorted(output_choices)

    parser = argparse.ArgumentParser(
        usage="repoman [options] [mode]",
        description="Modes: %s" % " | ".join(mode_keys),
        epilog="For more help consult the man page.")

    parser.add_argument('-a',
                        '--ask',
                        dest='ask',
                        action='store_true',
                        default=False,
                        help='Request a confirmation before commiting')

    parser.add_argument(
        '-b',
        '--bug',
        dest='bug',
        action='append',
        metavar='<BUG-NO|BUG-URL>',
        default=[],
        help=('Mention a Gentoo or upstream bug in the commit footer; '
              'takes either Gentoo bug number or full bug URL'))

    parser.add_argument(
        '-c',
        '--closes',
        dest='closes',
        action='append',
        metavar='<PR-NO|PR-URL>',
        default=[],
        help=
        ('Adds a Closes footer to close GitHub pull request (or compatible); '
         'takes either GitHub PR number or full PR URL'))

    parser.add_argument(
        '--copyright',
        dest='copyright',
        default=False,
        action='store_true',
        help=
        ('This option will update the copyright header to include the make.conf'
         ' COPYRIGHT_OWNER value if defined or the default "Gentoo Authors"'))

    parser.add_argument('-m',
                        '--commitmsg',
                        dest='commitmsg',
                        help='specify a commit message on the command line')

    parser.add_argument(
        '-M',
        '--commitmsgfile',
        dest='commitmsgfile',
        help='specify a path to a file that contains a commit message')

    parser.add_argument(
        '--digest',
        choices=('y', 'n'),
        metavar='<y|n>',
        help='Automatically update Manifest digests for modified files')

    parser.add_argument(
        '-p',
        '--pretend',
        dest='pretend',
        default=False,
        action='store_true',
        help='don\'t commit or fix anything; just show what would be done')

    parser.add_argument('-q',
                        '--quiet',
                        dest="quiet",
                        action="count",
                        default=0,
                        help='do not print unnecessary messages')

    parser.add_argument(
        '--echangelog',
        choices=('y', 'n', 'force'),
        metavar="<y|n|force>",
        help=(
            'for commit mode, call echangelog if ChangeLog is unmodified (or '
            'regardless of modification if \'force\' is specified)'))

    parser.add_argument(
        '--experimental-inherit',
        choices=('y', 'n'),
        metavar="<y|n>",
        default='n',
        help=('Enable experimental inherit.missing checks which may misbehave'
              ' when the internal eclass database becomes outdated'))

    parser.add_argument('--experimental-repository-modules',
                        choices=('y', 'n'),
                        metavar="<y|n>",
                        default='n',
                        help='Enable experimental repository modules')

    parser.add_argument('-f',
                        '--force',
                        dest='force',
                        action='store_true',
                        default=False,
                        help='Commit with QA violations')

    parser.add_argument('-S',
                        '--straight-to-stable',
                        dest='straight_to_stable',
                        default=False,
                        action='store_true',
                        help='Allow committing straight to stable')

    parser.add_argument(
        '--vcs',
        dest='vcs',
        help='Force using specific VCS instead of autodetection')

    parser.add_argument('-v',
                        '--verbose',
                        dest="verbosity",
                        action='count',
                        help='be very verbose in output',
                        default=0)

    parser.add_argument('-V',
                        '--version',
                        dest='version',
                        action='store_true',
                        help='show version info')

    parser.add_argument(
        '-x',
        '--xmlparse',
        dest='xml_parse',
        action='store_true',
        default=False,
        help='forces the metadata.xml parse check to be carried out')

    parser.add_argument(
        '--if-modified',
        choices=('y', 'n'),
        default='n',
        metavar="<y|n>",
        help='only check packages that have uncommitted modifications')

    parser.add_argument(
        '-i',
        '--ignore-arches',
        dest='ignore_arches',
        action='store_true',
        default=False,
        help='ignore arch-specific failures (where arch != host)')

    parser.add_argument(
        "--ignore-default-opts",
        action="store_true",
        help="do not use the REPOMAN_DEFAULT_OPTS environment variable")

    parser.add_argument(
        '-I',
        '--ignore-masked',
        dest='ignore_masked',
        action='store_true',
        default=False,
        help='ignore masked packages (not allowed with commit mode)')

    parser.add_argument(
        '--include-arches',
        dest='include_arches',
        metavar='ARCHES',
        action='append',
        help=('A space separated list of arches used to '
              'filter the selection of profiles for dependency checks'))

    parser.add_argument('-d',
                        '--include-dev',
                        dest='include_dev',
                        action='store_true',
                        default=False,
                        help='include dev profiles in dependency checks')

    parser.add_argument('-e',
                        '--include-exp-profiles',
                        choices=('y', 'n'),
                        metavar='<y|n>',
                        default=False,
                        help='include exp profiles in dependency checks')

    parser.add_argument(
        '--unmatched-removal',
        dest='unmatched_removal',
        action='store_true',
        default=False,
        help=('enable strict checking of package.mask and package.unmask files'
              ' for unmatched removal atoms'))

    parser.add_argument('--without-mask',
                        dest='without_mask',
                        action='store_true',
                        default=False,
                        help=('behave as if no package.mask entries exist'
                              ' (not allowed with commit mode)'))

    parser.add_argument('--output-style',
                        dest='output_style',
                        choices=output_keys,
                        help='select output type',
                        default='default')

    parser.add_argument(
        '--mode',
        dest='mode',
        choices=mode_keys,
        help='specify which mode repoman will run in (default=full)')

    opts, args = parser.parse_known_args(argv[1:])

    if not opts.ignore_default_opts:
        default_opts = util.shlex_split(repoman_default_opts)
        if default_opts:
            opts, args = parser.parse_known_args(default_opts + sys.argv[1:])

    if opts.mode == 'help':
        parser.print_help(short=False)

    for arg in args:
        if arg in modes:
            if not opts.mode:
                opts.mode = arg
                break
        else:
            parser.error("invalid mode: %s" % arg)

    if not opts.mode:
        opts.mode = 'full'

    if opts.mode == 'ci':
        opts.mode = 'commit'  # backwards compat shortcut

    # Use verbosity and quiet options to appropriately fiddle with the loglevel
    for val in range(opts.verbosity):
        logger = logging.getLogger()
        logger.setLevel(logger.getEffectiveLevel() - 10)

    for val in range(opts.quiet):
        logger = logging.getLogger()
        logger.setLevel(logger.getEffectiveLevel() + 10)

    if opts.mode == 'commit' and opts.commitmsg:
        opts.commitmsg = _unicode_decode(opts.commitmsg)

    if opts.mode == 'commit' and not (opts.force or opts.pretend):
        if opts.ignore_masked:
            opts.ignore_masked = False
            logging.warn('Commit mode automatically disables --ignore-masked')
        if opts.without_mask:
            opts.without_mask = False
            logging.warn('Commit mode automatically disables --without-mask')

    return (opts, args)
Example #45
0
	def _add_repositories(portdir, portdir_overlay, prepos,
		ignored_map, local_config, default_portdir):
		"""Add overlays in PORTDIR_OVERLAY as repositories"""
		overlays = []
		portdir_orig = None
		if portdir:
			portdir = normalize_path(portdir)
			portdir_orig = portdir
			overlays.append(portdir)
		try:
			port_ov = [normalize_path(i) for i in shlex_split(portdir_overlay)]
		except ValueError as e:
			#File "/usr/lib/python3.2/shlex.py", line 168, in read_token
			#	raise ValueError("No closing quotation")
			writemsg(_("!!! Invalid PORTDIR_OVERLAY:"
				" %s: %s\n") % (e, portdir_overlay), noiselevel=-1)
			port_ov = []
		overlays.extend(port_ov)
		default_repo_opts = {}
		if prepos['DEFAULT'].aliases is not None:
			default_repo_opts['aliases'] = \
				' '.join(prepos['DEFAULT'].aliases)
		if prepos['DEFAULT'].eclass_overrides is not None:
			default_repo_opts['eclass-overrides'] = \
				' '.join(prepos['DEFAULT'].eclass_overrides)
		if prepos['DEFAULT'].masters is not None:
			default_repo_opts['masters'] = \
				' '.join(prepos['DEFAULT'].masters)

		if overlays:
			# We need a copy of the original repos.conf data, since we're
			# going to modify the prepos dict and some of the RepoConfig
			# objects that we put in prepos may have to be discarded if
			# they get overridden by a repository with the same name but
			# a different location. This is common with repoman, for example,
			# when temporarily overriding an rsync repo with another copy
			# of the same repo from CVS.
			repos_conf = prepos.copy()
			#overlay priority is negative because we want them to be looked before any other repo
			base_priority = 0
			for ov in overlays:
				# Ignore missing directory for 'gentoo' so that
				# first sync with emerge-webrsync is possible.
				if isdir_raise_eaccess(ov) or \
					(base_priority == 0 and ov is portdir):
					repo_opts = default_repo_opts.copy()
					repo_opts['location'] = ov
					repo = RepoConfig(None, repo_opts, local_config=local_config)
					# repos_conf_opts contains options from repos.conf
					repos_conf_opts = repos_conf.get(repo.name)
					if repos_conf_opts is not None:
						# Selectively copy only the attributes which
						# repos.conf is allowed to override.
						for k in ('aliases', 'auto_sync',
							'clone_depth', 'eclass_overrides',
							'force', 'masters', 'priority', 'strict_misc_digests',
							'sync_depth', 'sync_hooks_only_on_change',
							'sync_openpgp_key_path',
							'sync_openpgp_key_refresh_retry_count',
							'sync_openpgp_key_refresh_retry_delay_max',
							'sync_openpgp_key_refresh_retry_delay_exp_base',
							'sync_openpgp_key_refresh_retry_delay_mult',
							'sync_openpgp_key_refresh_retry_overall_timeout',
							'sync_type', 'sync_umask', 'sync_uri', 'sync_user',
							'module_specific_options'):
							v = getattr(repos_conf_opts, k, None)
							if v is not None:
								setattr(repo, k, v)

					if repo.name in prepos:
						# Silently ignore when PORTDIR overrides the location
						# setting from the default repos.conf (bug #478544).
						old_location = prepos[repo.name].location
						if old_location is not None and \
							old_location != repo.location and \
							not (base_priority == 0 and
							old_location == default_portdir):
							ignored_map.setdefault(repo.name, []).append(old_location)
							if old_location == portdir:
								portdir = repo.location

					if repo.priority is None:
						if base_priority == 0 and ov == portdir_orig:
							# If it's the original PORTDIR setting and it's not
							# in PORTDIR_OVERLAY, then it will be assigned a
							# special priority setting later.
							pass
						else:
							repo.priority = base_priority
							base_priority += 1

					prepos[repo.name] = repo
				else:

					if not portage._sync_mode:
						writemsg(_("!!! Invalid PORTDIR_OVERLAY (not a dir): '%s'\n") % ov, noiselevel=-1)

		return portdir
Example #46
0
    def update(self):
        ''' Update existing git repository, and ignore the syncuri. We are
		going to trust the user and assume that the user is in the branch
		that he/she wants updated. We'll let the user manage branches with
		git directly.
		'''
        if not self.has_bin:
            return (1, False)
        git_cmd_opts = ""
        quiet = self.settings.get("PORTAGE_QUIET") == "1"
        if self.repo.module_specific_options.get('sync-git-env'):
            shlexed_env = shlex_split(
                self.repo.module_specific_options['sync-git-env'])
            env = dict(
                (k, v) for k, _, v in (assignment.partition('=')
                                       for assignment in shlexed_env) if k)
            self.spawn_kwargs['env'].update(env)

        if self.repo.module_specific_options.get('sync-git-pull-env'):
            shlexed_env = shlex_split(
                self.repo.module_specific_options['sync-git-pull-env'])
            pull_env = dict(
                (k, v) for k, _, v in (assignment.partition('=')
                                       for assignment in shlexed_env) if k)
            self.spawn_kwargs['env'].update(pull_env)

        if self.settings.get("PORTAGE_QUIET") == "1":
            git_cmd_opts += " --quiet"
        if self.repo.module_specific_options.get('sync-git-pull-extra-opts'):
            git_cmd_opts += " %s" % self.repo.module_specific_options[
                'sync-git-pull-extra-opts']

        try:
            remote_branch = portage._unicode_decode(
                subprocess.check_output([
                    self.bin_command, 'rev-parse', '--abbrev-ref',
                    '--symbolic-full-name', '@{upstream}'
                ],
                                        cwd=portage._unicode_encode(
                                            self.repo.location))).rstrip('\n')
        except subprocess.CalledProcessError as e:
            msg = "!!! git rev-parse error in %s" % self.repo.location
            self.logger(self.xterm_titles, msg)
            writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
            return (e.returncode, False)

        shallow = self.repo.sync_depth is not None and self.repo.sync_depth != 0
        if shallow:
            git_cmd_opts += " --depth %d" % self.repo.sync_depth

            # For shallow fetch, unreachable objects may need to be pruned
            # manually, in order to prevent automatic git gc calls from
            # eventually failing (see bug 599008).
            gc_cmd = ['git', '-c', 'gc.autodetach=false', 'gc', '--auto']
            if quiet:
                gc_cmd.append('--quiet')
            exitcode = subprocess.call(gc_cmd,
                                       cwd=portage._unicode_encode(
                                           self.repo.location))
            if exitcode != os.EX_OK:
                msg = "!!! git gc error in %s" % self.repo.location
                self.logger(self.xterm_titles, msg)
                writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
                return (exitcode, False)

        git_cmd = "%s fetch %s%s" % (
            self.bin_command, remote_branch.partition('/')[0], git_cmd_opts)

        writemsg_level(git_cmd + "\n")

        rev_cmd = [self.bin_command, "rev-list", "--max-count=1", "HEAD"]
        previous_rev = subprocess.check_output(rev_cmd,
                                               cwd=portage._unicode_encode(
                                                   self.repo.location))

        exitcode = portage.process.spawn_bash(
            "cd %s ; exec %s" %
            (portage._shell_quote(self.repo.location), git_cmd),
            **self.spawn_kwargs)

        if exitcode != os.EX_OK:
            msg = "!!! git fetch error in %s" % self.repo.location
            self.logger(self.xterm_titles, msg)
            writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
            return (exitcode, False)

        if not self.verify_head(revision='refs/remotes/%s' % remote_branch):
            return (1, False)

        if shallow:
            # Since the default merge strategy typically fails when
            # the depth is not unlimited, `git reset --merge`.
            merge_cmd = [self.bin_command, 'reset', '--merge']
        else:
            merge_cmd = [self.bin_command, 'merge']
        merge_cmd.append('refs/remotes/%s' % remote_branch)
        if quiet:
            merge_cmd.append('--quiet')
        exitcode = subprocess.call(merge_cmd,
                                   cwd=portage._unicode_encode(
                                       self.repo.location))

        if exitcode != os.EX_OK:
            msg = "!!! git merge error in %s" % self.repo.location
            self.logger(self.xterm_titles, msg)
            writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
            return (exitcode, False)

        current_rev = subprocess.check_output(rev_cmd,
                                              cwd=portage._unicode_encode(
                                                  self.repo.location))

        return (os.EX_OK, current_rev != previous_rev)
Example #47
0
		def updated_config_files(count):
			self.assertEqual(count,
				sum(len(x[1]) for x in find_updated_config_files(eroot,
				shlex_split(config_protect))))
Example #48
0
    async def _async_test_simple(self, playground, metadata_xml_files, loop):

        debug = playground.debug
        settings = playground.settings
        eprefix = settings["EPREFIX"]
        eroot = settings["EROOT"]
        trees = playground.trees
        portdb = trees[eroot]["porttree"].dbapi
        test_repo_location = settings.repositories["test_repo"].location
        var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
        cachedir = os.path.join(var_cache_edb, "dep")
        cachedir_pregen = os.path.join(test_repo_location, "metadata",
                                       "md5-cache")

        portage_python = portage._python_interpreter
        dispatch_conf_cmd = (
            portage_python,
            "-b",
            "-Wd",
            os.path.join(self.sbindir, "dispatch-conf"),
        )
        ebuild_cmd = (portage_python, "-b", "-Wd",
                      os.path.join(self.bindir, "ebuild"))
        egencache_cmd = (
            portage_python,
            "-b",
            "-Wd",
            os.path.join(self.bindir, "egencache"),
            "--repo",
            "test_repo",
            "--repositories-configuration",
            settings.repositories.config_string(),
        )
        emerge_cmd = (portage_python, "-b", "-Wd",
                      os.path.join(self.bindir, "emerge"))
        emaint_cmd = (portage_python, "-b", "-Wd",
                      os.path.join(self.sbindir, "emaint"))
        env_update_cmd = (
            portage_python,
            "-b",
            "-Wd",
            os.path.join(self.sbindir, "env-update"),
        )
        etc_update_cmd = (BASH_BINARY, os.path.join(self.sbindir,
                                                    "etc-update"))
        fixpackages_cmd = (
            portage_python,
            "-b",
            "-Wd",
            os.path.join(self.sbindir, "fixpackages"),
        )
        portageq_cmd = (
            portage_python,
            "-b",
            "-Wd",
            os.path.join(self.bindir, "portageq"),
        )
        quickpkg_cmd = (
            portage_python,
            "-b",
            "-Wd",
            os.path.join(self.bindir, "quickpkg"),
        )
        regenworld_cmd = (
            portage_python,
            "-b",
            "-Wd",
            os.path.join(self.sbindir, "regenworld"),
        )

        rm_binary = find_binary("rm")
        self.assertEqual(rm_binary is None, False, "rm command not found")
        rm_cmd = (rm_binary, )

        egencache_extra_args = []
        if self._have_python_xml():
            egencache_extra_args.append("--update-use-local-desc")

        test_ebuild = portdb.findname("dev-libs/A-1")
        self.assertFalse(test_ebuild is None)

        cross_prefix = os.path.join(eprefix, "cross_prefix")
        cross_root = os.path.join(eprefix, "cross_root")
        cross_eroot = os.path.join(cross_root, eprefix.lstrip(os.sep))

        binhost_dir = os.path.join(eprefix, "binhost")
        binhost_address = "127.0.0.1"
        binhost_remote_path = "/binhost"
        binhost_server = AsyncHTTPServer(
            binhost_address, BinhostContentMap(binhost_remote_path,
                                               binhost_dir), loop).__enter__()
        binhost_uri = "http://{address}:{port}{path}".format(
            address=binhost_address,
            port=binhost_server.server_port,
            path=binhost_remote_path,
        )

        binpkg_format = settings.get("BINPKG_FORMAT",
                                     SUPPORTED_GENTOO_BINPKG_FORMATS[0])
        self.assertIn(binpkg_format, ("xpak", "gpkg"))
        if binpkg_format == "xpak":
            foo_filename = "foo-0-1.xpak"
        elif binpkg_format == "gpkg":
            foo_filename = "foo-0-1.gpkg.tar"

        test_commands = ()

        if hasattr(argparse.ArgumentParser, "parse_intermixed_args"):
            test_commands += (
                emerge_cmd + ("--oneshot", "dev-libs/A", "-v", "dev-libs/A"), )

        test_commands += (
            emerge_cmd + (
                "--usepkgonly",
                "--root",
                cross_root,
                "--quickpkg-direct=y",
                "--quickpkg-direct-root",
                "/",
                "dev-libs/A",
            ),
            emerge_cmd + (
                "--usepkgonly",
                "--quickpkg-direct=y",
                "--quickpkg-direct-root",
                cross_root,
                "dev-libs/A",
            ),
            env_update_cmd,
            portageq_cmd + (
                "envvar",
                "-v",
                "CONFIG_PROTECT",
                "EROOT",
                "PORTAGE_CONFIGROOT",
                "PORTAGE_TMPDIR",
                "USERLAND",
            ),
            etc_update_cmd,
            dispatch_conf_cmd,
            emerge_cmd + ("--version", ),
            emerge_cmd + ("--info", ),
            emerge_cmd + ("--info", "--verbose"),
            emerge_cmd + ("--list-sets", ),
            emerge_cmd + ("--check-news", ),
            rm_cmd + ("-rf", cachedir),
            rm_cmd + ("-rf", cachedir_pregen),
            emerge_cmd + ("--regen", ),
            rm_cmd + ("-rf", cachedir),
            ({
                "FEATURES": "metadata-transfer"
            }, ) + emerge_cmd + ("--regen", ),
            rm_cmd + ("-rf", cachedir),
            ({
                "FEATURES": "metadata-transfer"
            }, ) + emerge_cmd + ("--regen", ),
            rm_cmd + ("-rf", cachedir),
            egencache_cmd + ("--update", ) + tuple(egencache_extra_args),
            ({
                "FEATURES": "metadata-transfer"
            }, ) + emerge_cmd + ("--metadata", ),
            rm_cmd + ("-rf", cachedir),
            ({
                "FEATURES": "metadata-transfer"
            }, ) + emerge_cmd + ("--metadata", ),
            emerge_cmd + ("--metadata", ),
            rm_cmd + ("-rf", cachedir),
            emerge_cmd + ("--oneshot", "virtual/foo"),
            lambda: self.assertFalse(
                os.path.exists(
                    os.path.join(pkgdir, "virtual", "foo", foo_filename))),
            ({
                "FEATURES": "unmerge-backup"
            }, ) + emerge_cmd + ("--unmerge", "virtual/foo"),
            lambda: self.assertTrue(
                os.path.exists(
                    os.path.join(pkgdir, "virtual", "foo", foo_filename))),
            emerge_cmd + ("--pretend", "dev-libs/A"),
            ebuild_cmd +
            (test_ebuild, "manifest", "clean", "package", "merge"),
            emerge_cmd +
            ("--pretend", "--tree", "--complete-graph", "dev-libs/A"),
            emerge_cmd + ("-p", "dev-libs/B"),
            emerge_cmd + ("-p", "--newrepo", "dev-libs/B"),
            emerge_cmd + (
                "-B",
                "dev-libs/B",
            ),
            emerge_cmd + (
                "--oneshot",
                "--usepkg",
                "dev-libs/B",
            ),
            # trigger clean prior to pkg_pretend as in bug #390711
            ebuild_cmd + (test_ebuild, "unpack"),
            emerge_cmd + (
                "--oneshot",
                "dev-libs/A",
            ),
            emerge_cmd + (
                "--noreplace",
                "dev-libs/A",
            ),
            emerge_cmd + (
                "--config",
                "dev-libs/A",
            ),
            emerge_cmd + ("--info", "dev-libs/A", "dev-libs/B"),
            emerge_cmd +
            ("--pretend", "--depclean", "--verbose", "dev-libs/B"),
            emerge_cmd + (
                "--pretend",
                "--depclean",
            ),
            emerge_cmd + ("--depclean", ),
            quickpkg_cmd + (
                "--include-config",
                "y",
                "dev-libs/A",
            ),
            # Test bug #523684, where a file renamed or removed by the
            # admin forces replacement files to be merged with config
            # protection.
            lambda: self.assertEqual(
                0,
                len(
                    list(
                        find_updated_config_files(
                            eroot, shlex_split(settings["CONFIG_PROTECT"])))),
            ),
            lambda: os.unlink(os.path.join(eprefix, "etc", "A-0")),
            emerge_cmd + ("--usepkgonly", "dev-libs/A"),
            lambda: self.assertEqual(
                1,
                len(
                    list(
                        find_updated_config_files(
                            eroot, shlex_split(settings["CONFIG_PROTECT"])))),
            ),
            emaint_cmd + ("--check", "all"),
            emaint_cmd + ("--fix", "all"),
            fixpackages_cmd,
            regenworld_cmd,
            portageq_cmd + ("match", eroot, "dev-libs/A"),
            portageq_cmd + ("best_visible", eroot, "dev-libs/A"),
            portageq_cmd + ("best_visible", eroot, "binary", "dev-libs/A"),
            portageq_cmd + ("contents", eroot, "dev-libs/A-1"),
            portageq_cmd + ("metadata", eroot, "ebuild", "dev-libs/A-1",
                            "EAPI", "IUSE", "RDEPEND"),
            portageq_cmd + ("metadata", eroot, "binary", "dev-libs/A-1",
                            "EAPI", "USE", "RDEPEND"),
            portageq_cmd + (
                "metadata",
                eroot,
                "installed",
                "dev-libs/A-1",
                "EAPI",
                "USE",
                "RDEPEND",
            ),
            portageq_cmd + ("owners", eroot, eroot + "usr"),
            emerge_cmd + ("-p", eroot + "usr"),
            emerge_cmd + ("-p", "--unmerge", "-q", eroot + "usr"),
            emerge_cmd + ("--unmerge", "--quiet", "dev-libs/A"),
            emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
            # If EMERGE_DEFAULT_OPTS contains --autounmask=n, then --autounmask
            # must be specified with --autounmask-continue.
            (
                {
                    "EMERGE_DEFAULT_OPTS": "--autounmask=n"
                }, ) + emerge_cmd + (
                    "--autounmask",
                    "--autounmask-continue",
                    "dev-libs/C",
                ),
            # Verify that the above --autounmask-continue command caused
            # USE=flag to be applied correctly to dev-libs/D.
            portageq_cmd + ("match", eroot, "dev-libs/D[flag]"),
            # Test cross-prefix usage, including chpathtool for binpkgs.
            # EAPI 7
            (
                {
                    "EPREFIX": cross_prefix
                }, ) + emerge_cmd + ("dev-libs/C", ),
            ({
                "EPREFIX": cross_prefix
            }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/C"),
            ({
                "EPREFIX": cross_prefix
            }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/D"),
            ({
                "ROOT": cross_root
            }, ) + emerge_cmd + ("dev-libs/D", ),
            portageq_cmd + ("has_version", cross_eroot, "dev-libs/D"),
            # EAPI 5
            (
                {
                    "EPREFIX": cross_prefix
                }, ) + emerge_cmd + ("--usepkgonly", "dev-libs/A"),
            ({
                "EPREFIX": cross_prefix
            }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
            ({
                "EPREFIX": cross_prefix
            }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),
            ({
                "EPREFIX": cross_prefix
            }, ) + emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
            ({
                "EPREFIX": cross_prefix
            }, ) + emerge_cmd + ("-C", "--quiet", "dev-libs/A"),
            ({
                "EPREFIX": cross_prefix
            }, ) + emerge_cmd + ("dev-libs/A", ),
            ({
                "EPREFIX": cross_prefix
            }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
            ({
                "EPREFIX": cross_prefix
            }, ) + portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),
            # Test ROOT support
            (
                {
                    "ROOT": cross_root
                }, ) + emerge_cmd + ("dev-libs/B", ),
            portageq_cmd + ("has_version", cross_eroot, "dev-libs/B"),
        )

        # Test binhost support if FETCHCOMMAND is available.
        binrepos_conf_file = os.path.join(os.sep, eprefix, BINREPOS_CONF_FILE)
        with open(binrepos_conf_file, "wt") as f:
            f.write("[test-binhost]\n")
            f.write("sync-uri = {}\n".format(binhost_uri))
        fetchcommand = portage.util.shlex_split(
            playground.settings["FETCHCOMMAND"])
        fetch_bin = portage.process.find_binary(fetchcommand[0])
        if fetch_bin is not None:
            test_commands = test_commands + (
                lambda: os.rename(pkgdir, binhost_dir),
                emerge_cmd + ("-e", "--getbinpkgonly", "dev-libs/A"),
                lambda: shutil.rmtree(pkgdir),
                lambda: os.rename(binhost_dir, pkgdir),
                # Remove binrepos.conf and test PORTAGE_BINHOST.
                lambda: os.unlink(binrepos_conf_file),
                lambda: os.rename(pkgdir, binhost_dir),
                ({
                    "PORTAGE_BINHOST": binhost_uri
                }, ) + emerge_cmd + ("-fe", "--getbinpkgonly", "dev-libs/A"),
                lambda: shutil.rmtree(pkgdir),
                lambda: os.rename(binhost_dir, pkgdir),
            )

        distdir = playground.distdir
        pkgdir = playground.pkgdir
        fake_bin = os.path.join(eprefix, "bin")
        portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
        profile_path = settings.profile_path
        user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH)

        path = os.environ.get("PATH")
        if path is not None and not path.strip():
            path = None
        if path is None:
            path = ""
        else:
            path = ":" + path
        path = fake_bin + path

        pythonpath = os.environ.get("PYTHONPATH")
        if pythonpath is not None and not pythonpath.strip():
            pythonpath = None
        if pythonpath is not None and pythonpath.split(
                ":")[0] == PORTAGE_PYM_PATH:
            pass
        else:
            if pythonpath is None:
                pythonpath = ""
            else:
                pythonpath = ":" + pythonpath
            pythonpath = PORTAGE_PYM_PATH + pythonpath

        env = {
            "PORTAGE_OVERRIDE_EPREFIX":
            eprefix,
            "CLEAN_DELAY":
            "0",
            "DISTDIR":
            distdir,
            "EMERGE_WARNING_DELAY":
            "0",
            "INFODIR":
            "",
            "INFOPATH":
            "",
            "PATH":
            path,
            "PKGDIR":
            pkgdir,
            "PORTAGE_INST_GID":
            str(portage.data.portage_gid),
            "PORTAGE_INST_UID":
            str(portage.data.portage_uid),
            "PORTAGE_PYTHON":
            portage_python,
            "PORTAGE_REPOSITORIES":
            settings.repositories.config_string(),
            "PORTAGE_TMPDIR":
            portage_tmpdir,
            "PORTAGE_LOGDIR":
            portage_tmpdir,
            "PYTHONDONTWRITEBYTECODE":
            os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
            "PYTHONPATH":
            pythonpath,
            "__PORTAGE_TEST_PATH_OVERRIDE":
            fake_bin,
        }

        if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
            env["__PORTAGE_TEST_HARDLINK_LOCKS"] = os.environ[
                "__PORTAGE_TEST_HARDLINK_LOCKS"]

        updates_dir = os.path.join(test_repo_location, "profiles", "updates")
        dirs = [
            cachedir,
            cachedir_pregen,
            cross_eroot,
            cross_prefix,
            distdir,
            fake_bin,
            portage_tmpdir,
            updates_dir,
            user_config_dir,
            var_cache_edb,
        ]
        etc_symlinks = ("dispatch-conf.conf", "etc-update.conf")
        # Override things that may be unavailable, or may have portability
        # issues when running tests in exotic environments.
        #   prepstrip - bug #447810 (bash read builtin EINTR problem)
        true_symlinks = ["find", "prepstrip", "sed", "scanelf"]
        true_binary = find_binary("true")
        self.assertEqual(true_binary is None, False, "true command not found")
        try:
            for d in dirs:
                ensure_dirs(d)
            for x in true_symlinks:
                os.symlink(true_binary, os.path.join(fake_bin, x))
            for x in etc_symlinks:
                os.symlink(os.path.join(self.cnf_etc_path, x),
                           os.path.join(eprefix, "etc", x))
            with open(os.path.join(var_cache_edb, "counter"), "wb") as f:
                f.write(b"100")
            # non-empty system set keeps --depclean quiet
            with open(os.path.join(profile_path, "packages"), "w") as f:
                f.write("*dev-libs/token-system-pkg")
            for cp, xml_data in metadata_xml_files:
                with open(os.path.join(test_repo_location, cp, "metadata.xml"),
                          "w") as f:
                    f.write(playground.metadata_xml_template % xml_data)
            with open(os.path.join(updates_dir, "1Q-2010"), "w") as f:
                f.write("""
slotmove =app-doc/pms-3 2 3
move dev-util/git dev-vcs/git
""")

            if debug:
                # The subprocess inherits both stdout and stderr, for
                # debugging purposes.
                stdout = None
            else:
                # The subprocess inherits stderr so that any warnings
                # triggered by python -Wd will be visible.
                stdout = subprocess.PIPE

            for args in test_commands:

                if hasattr(args, "__call__"):
                    args()
                    continue

                if isinstance(args[0], dict):
                    local_env = env.copy()
                    local_env.update(args[0])
                    args = args[1:]
                else:
                    local_env = env

                proc = await asyncio.create_subprocess_exec(*args,
                                                            env=local_env,
                                                            stderr=None,
                                                            stdout=stdout)

                if debug:
                    await proc.wait()
                else:
                    output, _err = await proc.communicate()
                    await proc.wait()
                    if proc.returncode != os.EX_OK:
                        portage.writemsg(output)

                self.assertEqual(os.EX_OK, proc.returncode,
                                 "emerge failed with args %s" % (args, ))
        finally:
            binhost_server.__exit__(None, None, None)
            playground.cleanup()
Example #49
0
	def _add_overlays(portdir, portdir_overlay, prepos, ignored_map, ignored_location_map):
		"""Add overlays in PORTDIR_OVERLAY as repositories"""
		overlays = []
		if portdir:
			portdir = normalize_path(portdir)
			overlays.append(portdir)
		try:
			port_ov = [normalize_path(i) for i in shlex_split(portdir_overlay)]
		except ValueError as e:
			#File "/usr/lib/python3.2/shlex.py", line 168, in read_token
			#	raise ValueError("No closing quotation")
			writemsg(_("!!! Invalid PORTDIR_OVERLAY:"
				" %s: %s\n") % (e, portdir_overlay), noiselevel=-1)
			port_ov = []
		overlays.extend(port_ov)
		default_repo_opts = {}
		if prepos['DEFAULT'].aliases is not None:
			default_repo_opts['aliases'] = \
				' '.join(prepos['DEFAULT'].aliases)
		if prepos['DEFAULT'].eclass_overrides is not None:
			default_repo_opts['eclass-overrides'] = \
				' '.join(prepos['DEFAULT'].eclass_overrides)
		if prepos['DEFAULT'].masters is not None:
			default_repo_opts['masters'] = \
				' '.join(prepos['DEFAULT'].masters)

		if overlays:
			#overlay priority is negative because we want them to be looked before any other repo
			base_priority = 0
			for ov in overlays:
				if os.path.isdir(ov):
					repo_opts = default_repo_opts.copy()
					repo_opts['location'] = ov
					repo = RepoConfig(None, repo_opts)
					repo_conf_opts = prepos.get(repo.name)
					if repo_conf_opts is not None:
						if repo_conf_opts.aliases is not None:
							repo_opts['aliases'] = \
								' '.join(repo_conf_opts.aliases)
						if repo_conf_opts.eclass_overrides is not None:
							repo_opts['eclass-overrides'] = \
								' '.join(repo_conf_opts.eclass_overrides)
						if repo_conf_opts.masters is not None:
							repo_opts['masters'] = \
								' '.join(repo_conf_opts.masters)

					repo = RepoConfig(repo.name, repo_opts)
					if repo.name in prepos:
						old_location = prepos[repo.name].location
						if old_location is not None and old_location != repo.location:
							ignored_map.setdefault(repo.name, []).append(old_location)
							ignored_location_map[old_location] = repo.name
							if old_location == portdir:
								portdir = repo.user_location
						prepos[repo.name].update(repo)
						repo = prepos[repo.name]
					else:
						prepos[repo.name] = repo

					if ov == portdir and portdir not in port_ov:
						repo.priority = -1000
					else:
						repo.priority = base_priority
						base_priority += 1

				else:
					writemsg(_("!!! Invalid PORTDIR_OVERLAY"
						" (not a dir): '%s'\n") % ov, noiselevel=-1)

		return portdir
Example #50
0
	def _start(self):
		tar_options = ""
		if "xattr" in self.features:
			process = subprocess.Popen(["tar", "--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
			output = process.communicate()[0]
			if b"--xattrs" in output:
				tar_options = ["--xattrs", "--xattrs-include='*'"]
				for x in portage.util.shlex_split(self.env.get("PORTAGE_XATTR_EXCLUDE", "")):
					tar_options.append(portage._shell_quote("--xattrs-exclude=%s" % x))
				tar_options = " ".join(tar_options)

		decomp = _compressors.get(compression_probe(self.pkg_path))
		if decomp is not None:
			decomp_cmd = decomp.get("decompress")
		else:
			decomp_cmd = None
		if decomp_cmd is None:
			self.scheduler.output("!!! %s\n" %
				_("File compression header unrecognized: %s") %
				self.pkg_path, log_path=self.logfile,
				background=self.background, level=logging.ERROR)
			self.returncode = 1
			self._async_wait()
			return

		try:
			decompression_binary = shlex_split(varexpand(decomp_cmd, mydict=self.env))[0]
		except IndexError:
			decompression_binary = ""

		if find_binary(decompression_binary) is None:
			# Try alternative command if it exists
			if _compressors.get(compression_probe(self.pkg_path)).get("decompress_alt"):
				decomp_cmd = _compressors.get(
					compression_probe(self.pkg_path)).get("decompress_alt")
			try:
				decompression_binary = shlex_split(varexpand(decomp_cmd, mydict=self.env))[0]
			except IndexError:
				decompression_binary = ""

			if find_binary(decompression_binary) is None:
				missing_package = _compressors.get(compression_probe(self.pkg_path)).get("package")
				self.scheduler.output("!!! %s\n" %
					_("File compression unsupported %s.\n Command was: %s.\n Maybe missing package: %s") %
					(self.pkg_path, varexpand(decomp_cmd, mydict=self.env), missing_package), log_path=self.logfile,
					background=self.background, level=logging.ERROR)
				self.returncode = 1
				self._async_wait()
				return

		pkg_xpak = portage.xpak.tbz2(self.pkg_path)
		pkg_xpak.scan()

		# SIGPIPE handling (128 + SIGPIPE) should be compatible with
		# assert_sigpipe_ok() that's used by the ebuild unpack() helper.
		self.args = [self._shell_binary, "-c",
			("cmd0=(head -c %d -- %s) cmd1=(%s) cmd2=(tar -xp %s -C %s -f -); " + \
			'"${cmd0[@]}" | "${cmd1[@]}" | "${cmd2[@]}"; ' + \
			"p=(${PIPESTATUS[@]}) ; for i in {0..2}; do " + \
			"if [[ ${p[$i]} != 0 && ${p[$i]} != %d ]] ; then " + \
			"echo command $(eval \"echo \\\"'\\${cmd$i[*]}'\\\"\") " + \
			"failed with status ${p[$i]} ; exit ${p[$i]} ; fi ; done; " + \
			"if [ ${p[$i]} != 0 ] ; then " + \
			"echo command $(eval \"echo \\\"'\\${cmd$i[*]}'\\\"\") " + \
			"failed with status ${p[$i]} ; exit ${p[$i]} ; fi ; " + \
			"exit 0 ;") % \
			(pkg_xpak.filestat.st_size - pkg_xpak.xpaksize,
			portage._shell_quote(self.pkg_path),
			decomp_cmd,
			tar_options,
			portage._shell_quote(self.image_dir),
			128 + signal.SIGPIPE)]

		SpawnProcess._start(self)
Example #51
0
	def testSimple(self):

		debug = False

		install_something = """
S="${WORKDIR}"

pkg_pretend() {
	einfo "called pkg_pretend for $CATEGORY/$PF"
}

src_install() {
	einfo "installing something..."
	insinto /usr/lib/${P}
	echo "blah blah blah" > "${T}"/regular-file
	doins "${T}"/regular-file
	dosym regular-file /usr/lib/${P}/symlink || die

	# Test CONFIG_PROTECT
	insinto /etc
	newins "${T}"/regular-file ${PN}-${SLOT%/*}

	# Test code for bug #381629, using a copyright symbol encoded with latin-1.
	# We use $(printf "\\xa9") rather than $'\\xa9', since printf apparently
	# works in any case, while $'\\xa9' transforms to \\xef\\xbf\\xbd under
	# some conditions. TODO: Find out why it transforms to \\xef\\xbf\\xbd when
	# running tests for Python 3.2 (even though it's bash that is ultimately
	# responsible for performing the transformation).
	local latin_1_dir=/usr/lib/${P}/latin-1-$(printf "\\xa9")-directory
	insinto "${latin_1_dir}"
	echo "blah blah blah" > "${T}"/latin-1-$(printf "\\xa9")-regular-file || die
	doins "${T}"/latin-1-$(printf "\\xa9")-regular-file
	dosym latin-1-$(printf "\\xa9")-regular-file ${latin_1_dir}/latin-1-$(printf "\\xa9")-symlink || die
}

pkg_config() {
	einfo "called pkg_config for $CATEGORY/$PF"
}

pkg_info() {
	einfo "called pkg_info for $CATEGORY/$PF"
}

pkg_preinst() {
	einfo "called pkg_preinst for $CATEGORY/$PF"

	# Test that has_version and best_version work correctly with
	# prefix (involves internal ROOT -> EROOT calculation in order
	# to support ROOT override via the environment with EAPIs 3
	# and later which support prefix).
	if has_version $CATEGORY/$PN:$SLOT ; then
		einfo "has_version detects an installed instance of $CATEGORY/$PN:$SLOT"
		einfo "best_version reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)"
	else
		einfo "has_version does not detect an installed instance of $CATEGORY/$PN:$SLOT"
	fi
	if [[ ${EPREFIX} != ${PORTAGE_OVERRIDE_EPREFIX} ]] ; then
		if has_version --host-root $CATEGORY/$PN:$SLOT ; then
			einfo "has_version --host-root detects an installed instance of $CATEGORY/$PN:$SLOT"
			einfo "best_version --host-root reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)"
		else
			einfo "has_version --host-root does not detect an installed instance of $CATEGORY/$PN:$SLOT"
		fi
	fi
}

"""

		ebuilds = {
			"dev-libs/A-1": {
				"EAPI" : "5",
				"IUSE" : "+flag",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"MISC_CONTENT": install_something,
				"RDEPEND": "flag? ( dev-libs/B[flag] )",
			},
			"dev-libs/B-1": {
				"EAPI" : "5",
				"IUSE" : "+flag",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"MISC_CONTENT": install_something,
			},
			"dev-libs/C-1": {
				"EAPI" : "6",
				"KEYWORDS": "~x86",
				"RDEPEND": "dev-libs/D[flag]",
			},
			"dev-libs/D-1": {
				"EAPI" : "6",
				"KEYWORDS": "~x86",
				"IUSE" : "flag",
			},
			"virtual/foo-0": {
				"EAPI" : "5",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
			},
		}

		installed = {
			"dev-libs/A-1": {
				"EAPI" : "5",
				"IUSE" : "+flag",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"RDEPEND": "flag? ( dev-libs/B[flag] )",
				"USE": "flag",
			},
			"dev-libs/B-1": {
				"EAPI" : "5",
				"IUSE" : "+flag",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"USE": "flag",
			},
			"dev-libs/depclean-me-1": {
				"EAPI" : "5",
				"IUSE" : "",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"USE": "",
			},
			"app-misc/depclean-me-1": {
				"EAPI" : "5",
				"IUSE" : "",
				"KEYWORDS": "x86",
				"LICENSE": "GPL-2",
				"RDEPEND": "dev-libs/depclean-me",
				"USE": "",
			},
		}

		metadata_xml_files = (
			(
				"dev-libs/A",
				{
					"flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
				},
			),
			(
				"dev-libs/B",
				{
					"flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
				},
			),
		)

		playground = ResolverPlayground(
			ebuilds=ebuilds, installed=installed, debug=debug)
		settings = playground.settings
		eprefix = settings["EPREFIX"]
		eroot = settings["EROOT"]
		trees = playground.trees
		portdb = trees[eroot]["porttree"].dbapi
		test_repo_location = settings.repositories["test_repo"].location
		var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
		cachedir = os.path.join(var_cache_edb, "dep")
		cachedir_pregen = os.path.join(test_repo_location, "metadata", "md5-cache")

		portage_python = portage._python_interpreter
		dispatch_conf_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.sbindir, "dispatch-conf"))
		ebuild_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.bindir, "ebuild"))
		egencache_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.bindir, "egencache"),
			"--repo", "test_repo",
			"--repositories-configuration", settings.repositories.config_string())
		emerge_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.bindir, "emerge"))
		emaint_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.sbindir, "emaint"))
		env_update_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.sbindir, "env-update"))
		etc_update_cmd = (BASH_BINARY,
			os.path.join(self.sbindir, "etc-update"))
		fixpackages_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.sbindir, "fixpackages"))
		portageq_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.bindir, "portageq"))
		quickpkg_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.bindir, "quickpkg"))
		regenworld_cmd = (portage_python, "-b", "-Wd",
			os.path.join(self.sbindir, "regenworld"))

		rm_binary = find_binary("rm")
		self.assertEqual(rm_binary is None, False,
			"rm command not found")
		rm_cmd = (rm_binary,)

		egencache_extra_args = []
		if self._have_python_xml():
			egencache_extra_args.append("--update-use-local-desc")

		test_ebuild = portdb.findname("dev-libs/A-1")
		self.assertFalse(test_ebuild is None)

		cross_prefix = os.path.join(eprefix, "cross_prefix")
		cross_root = os.path.join(eprefix, "cross_root")
		cross_eroot = os.path.join(cross_root, eprefix.lstrip(os.sep))

		test_commands = (
			env_update_cmd,
			portageq_cmd + ("envvar", "-v", "CONFIG_PROTECT", "EROOT",
				"PORTAGE_CONFIGROOT", "PORTAGE_TMPDIR", "USERLAND"),
			etc_update_cmd,
			dispatch_conf_cmd,
			emerge_cmd + ("--version",),
			emerge_cmd + ("--info",),
			emerge_cmd + ("--info", "--verbose"),
			emerge_cmd + ("--list-sets",),
			emerge_cmd + ("--check-news",),
			rm_cmd + ("-rf", cachedir),
			rm_cmd + ("-rf", cachedir_pregen),
			emerge_cmd + ("--regen",),
			rm_cmd + ("-rf", cachedir),
			({"FEATURES" : "metadata-transfer"},) + \
				emerge_cmd + ("--regen",),
			rm_cmd + ("-rf", cachedir),
			({"FEATURES" : "metadata-transfer"},) + \
				emerge_cmd + ("--regen",),
			rm_cmd + ("-rf", cachedir),
			egencache_cmd + ("--update",) + tuple(egencache_extra_args),
			({"FEATURES" : "metadata-transfer"},) + \
				emerge_cmd + ("--metadata",),
			rm_cmd + ("-rf", cachedir),
			({"FEATURES" : "metadata-transfer"},) + \
				emerge_cmd + ("--metadata",),
			emerge_cmd + ("--metadata",),
			rm_cmd + ("-rf", cachedir),
			emerge_cmd + ("--oneshot", "virtual/foo"),
			lambda: self.assertFalse(os.path.exists(
				os.path.join(pkgdir, "virtual", "foo-0.tbz2"))),
			({"FEATURES" : "unmerge-backup"},) + \
				emerge_cmd + ("--unmerge", "virtual/foo"),
			lambda: self.assertTrue(os.path.exists(
				os.path.join(pkgdir, "virtual", "foo-0.tbz2"))),
			emerge_cmd + ("--pretend", "dev-libs/A"),
			ebuild_cmd + (test_ebuild, "manifest", "clean", "package", "merge"),
			emerge_cmd + ("--pretend", "--tree", "--complete-graph", "dev-libs/A"),
			emerge_cmd + ("-p", "dev-libs/B"),
			emerge_cmd + ("-p", "--newrepo", "dev-libs/B"),
			emerge_cmd + ("-B", "dev-libs/B",),
			emerge_cmd + ("--oneshot", "--usepkg", "dev-libs/B",),

			# trigger clean prior to pkg_pretend as in bug #390711
			ebuild_cmd + (test_ebuild, "unpack"), 
			emerge_cmd + ("--oneshot", "dev-libs/A",),

			emerge_cmd + ("--noreplace", "dev-libs/A",),
			emerge_cmd + ("--config", "dev-libs/A",),
			emerge_cmd + ("--info", "dev-libs/A", "dev-libs/B"),
			emerge_cmd + ("--pretend", "--depclean", "--verbose", "dev-libs/B"),
			emerge_cmd + ("--pretend", "--depclean",),
			emerge_cmd + ("--depclean",),
			quickpkg_cmd + ("--include-config", "y", "dev-libs/A",),
			# Test bug #523684, where a file renamed or removed by the
			# admin forces replacement files to be merged with config
			# protection.
			lambda: self.assertEqual(0,
				len(list(find_updated_config_files(eroot,
				shlex_split(settings["CONFIG_PROTECT"]))))),
			lambda: os.unlink(os.path.join(eprefix, "etc", "A-0")),
			emerge_cmd + ("--usepkgonly", "dev-libs/A"),
			lambda: self.assertEqual(1,
				len(list(find_updated_config_files(eroot,
				shlex_split(settings["CONFIG_PROTECT"]))))),
			emaint_cmd + ("--check", "all"),
			emaint_cmd + ("--fix", "all"),
			fixpackages_cmd,
			regenworld_cmd,
			portageq_cmd + ("match", eroot, "dev-libs/A"),
			portageq_cmd + ("best_visible", eroot, "dev-libs/A"),
			portageq_cmd + ("best_visible", eroot, "binary", "dev-libs/A"),
			portageq_cmd + ("contents", eroot, "dev-libs/A-1"),
			portageq_cmd + ("metadata", eroot, "ebuild", "dev-libs/A-1", "EAPI", "IUSE", "RDEPEND"),
			portageq_cmd + ("metadata", eroot, "binary", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"),
			portageq_cmd + ("metadata", eroot, "installed", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"),
			portageq_cmd + ("owners", eroot, eroot + "usr"),
			emerge_cmd + ("-p", eroot + "usr"),
			emerge_cmd + ("-p", "--unmerge", "-q", eroot + "usr"),
			emerge_cmd + ("--unmerge", "--quiet", "dev-libs/A"),
			emerge_cmd + ("-C", "--quiet", "dev-libs/B"),

			emerge_cmd + ("--autounmask-continue", "dev-libs/C",),
			# Verify that the above --autounmask-continue command caused
			# USE=flag to be applied correctly to dev-libs/D.
			portageq_cmd + ("match", eroot, "dev-libs/D[flag]"),

			# Test cross-prefix usage, including chpathtool for binpkgs.
			({"EPREFIX" : cross_prefix},) + \
				emerge_cmd + ("--usepkgonly", "dev-libs/A"),
			({"EPREFIX" : cross_prefix},) + \
				portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
			({"EPREFIX" : cross_prefix},) + \
				portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),
			({"EPREFIX" : cross_prefix},) + \
				emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
			({"EPREFIX" : cross_prefix},) + \
				emerge_cmd + ("-C", "--quiet", "dev-libs/A"),
			({"EPREFIX" : cross_prefix},) + \
				emerge_cmd + ("dev-libs/A",),
			({"EPREFIX" : cross_prefix},) + \
				portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
			({"EPREFIX" : cross_prefix},) + \
				portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),

			# Test ROOT support
			({"ROOT": cross_root},) + emerge_cmd + ("dev-libs/B",),
			portageq_cmd + ("has_version", cross_eroot, "dev-libs/B"),
		)

		distdir = playground.distdir
		pkgdir = playground.pkgdir
		fake_bin = os.path.join(eprefix, "bin")
		portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
		profile_path = settings.profile_path
		user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH)

		path =  os.environ.get("PATH")
		if path is not None and not path.strip():
			path = None
		if path is None:
			path = ""
		else:
			path = ":" + path
		path = fake_bin + path

		pythonpath =  os.environ.get("PYTHONPATH")
		if pythonpath is not None and not pythonpath.strip():
			pythonpath = None
		if pythonpath is not None and \
			pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
			pass
		else:
			if pythonpath is None:
				pythonpath = ""
			else:
				pythonpath = ":" + pythonpath
			pythonpath = PORTAGE_PYM_PATH + pythonpath

		env = {
			"PORTAGE_OVERRIDE_EPREFIX" : eprefix,
			"CLEAN_DELAY" : "0",
			"DISTDIR" : distdir,
			"EMERGE_WARNING_DELAY" : "0",
			"INFODIR" : "",
			"INFOPATH" : "",
			"PATH" : path,
			"PKGDIR" : pkgdir,
			"PORTAGE_INST_GID" : str(portage.data.portage_gid),
			"PORTAGE_INST_UID" : str(portage.data.portage_uid),
			"PORTAGE_PYTHON" : portage_python,
			"PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
			"PORTAGE_TMPDIR" : portage_tmpdir,
			"PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
			"PYTHONPATH" : pythonpath,
			"__PORTAGE_TEST_PATH_OVERRIDE" : fake_bin,
		}

		if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
			env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
				os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]

		updates_dir = os.path.join(test_repo_location, "profiles", "updates")
		dirs = [cachedir, cachedir_pregen, cross_eroot, cross_prefix,
			distdir, fake_bin, portage_tmpdir, updates_dir,
			user_config_dir, var_cache_edb]
		etc_symlinks = ("dispatch-conf.conf", "etc-update.conf")
		# Override things that may be unavailable, or may have portability
		# issues when running tests in exotic environments.
		#   prepstrip - bug #447810 (bash read builtin EINTR problem)
		true_symlinks = ["find", "prepstrip", "sed", "scanelf"]
		true_binary = find_binary("true")
		self.assertEqual(true_binary is None, False,
			"true command not found")
		try:
			for d in dirs:
				ensure_dirs(d)
			for x in true_symlinks:
				os.symlink(true_binary, os.path.join(fake_bin, x))
			for x in etc_symlinks:
				os.symlink(os.path.join(self.cnf_etc_path, x),
					os.path.join(eprefix, "etc", x))
			with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
				f.write(b"100")
			# non-empty system set keeps --depclean quiet
			with open(os.path.join(profile_path, "packages"), 'w') as f:
				f.write("*dev-libs/token-system-pkg")
			for cp, xml_data in metadata_xml_files:
				with open(os.path.join(test_repo_location, cp, "metadata.xml"), 'w') as f:
					f.write(playground.metadata_xml_template % xml_data)
			with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
				f.write("""
slotmove =app-doc/pms-3 2 3
move dev-util/git dev-vcs/git
""")

			if debug:
				# The subprocess inherits both stdout and stderr, for
				# debugging purposes.
				stdout = None
			else:
				# The subprocess inherits stderr so that any warnings
				# triggered by python -Wd will be visible.
				stdout = subprocess.PIPE

			for args in test_commands:

				if hasattr(args, '__call__'):
					args()
					continue

				if isinstance(args[0], dict):
					local_env = env.copy()
					local_env.update(args[0])
					args = args[1:]
				else:
					local_env = env

				proc = subprocess.Popen(args,
					env=local_env, stdout=stdout)

				if debug:
					proc.wait()
				else:
					output = proc.stdout.readlines()
					proc.wait()
					proc.stdout.close()
					if proc.returncode != os.EX_OK:
						for line in output:
							sys.stderr.write(_unicode_decode(line))

				self.assertEqual(os.EX_OK, proc.returncode,
					"emerge failed with args %s" % (args,))
		finally:
			playground.cleanup()
Example #52
0
def parse_args(argv, repoman_default_opts):
	"""Use a customized optionParser to parse command line arguments for repoman
	Args:
		argv - a sequence of command line arguments
	Returns:
		(opts, args), just like a call to parser.parse_args()
	"""

	argv = portage._decode_argv(argv)

	modes = {
		'commit': 'Run a scan then commit changes',
		'ci': 'Run a scan then commit changes',
		'fix': 'Fix simple QA issues (stray digests, missing digests)',
		'full': 'Scan directory tree and print all issues (not a summary)',
		'help': 'Show this screen',
		'manifest': 'Generate a Manifest (fetches files if necessary)',
		'manifest-check': 'Check Manifests for missing or incorrect digests',
		'scan': 'Scan directory tree for QA issues'
	}

	output_choices = {
		'default': 'The normal output format',
		'column': 'Columnar output suitable for use with grep'
	}

	mode_keys = list(modes)
	mode_keys.sort()

	output_keys = sorted(output_choices)

	parser = argparse.ArgumentParser(
		usage="repoman [options] [mode]",
		description="Modes: %s" % " | ".join(mode_keys),
		epilog="For more help consult the man page.")

	parser.add_argument(
		'-a', '--ask', dest='ask', action='store_true',
		default=False,
		help='Request a confirmation before commiting')

	parser.add_argument(
		'-b', '--bug', dest='bug', action='append', metavar='<BUG-NO|BUG-URL>',
		default=[],
		help=(
			'Mention a Gentoo or upstream bug in the commit footer; '
			'takes either Gentoo bug number or full bug URL'))

	parser.add_argument(
		'-c', '--closes', dest='closes', action='append', metavar='<PR-NO|PR-URL>',
		default=[],
		help=(
			'Adds a Closes footer to close GitHub pull request (or compatible); '
			'takes either GitHub PR number or full PR URL'))

	parser.add_argument(
		'-m', '--commitmsg', dest='commitmsg',
		help='specify a commit message on the command line')

	parser.add_argument(
		'-M', '--commitmsgfile', dest='commitmsgfile',
		help='specify a path to a file that contains a commit message')

	parser.add_argument(
		'--digest', choices=('y', 'n'), metavar='<y|n>',
		help='Automatically update Manifest digests for modified files')

	parser.add_argument(
		'-p', '--pretend', dest='pretend', default=False,
		action='store_true',
		help='don\'t commit or fix anything; just show what would be done')

	parser.add_argument(
		'-q', '--quiet', dest="quiet", action="count",
		default=0,
		help='do not print unnecessary messages')

	parser.add_argument(
		'--echangelog', choices=('y', 'n', 'force'), metavar="<y|n|force>",
		help=(
			'for commit mode, call echangelog if ChangeLog is unmodified (or '
			'regardless of modification if \'force\' is specified)'))

	parser.add_argument(
		'--experimental-inherit', choices=('y', 'n'), metavar="<y|n>",
		default='n',
		help=(
			'Enable experimental inherit.missing checks which may misbehave'
			' when the internal eclass database becomes outdated'))

	parser.add_argument(
		'--experimental-repository-modules', choices=('y', 'n'), metavar="<y|n>",
		default='n',
		help='Enable experimental repository modules')

	parser.add_argument(
		'-f', '--force', dest='force', action='store_true',
		default=False,
		help='Commit with QA violations')

	parser.add_argument(
		'-S', '--straight-to-stable', dest='straight_to_stable',
		default=False, action='store_true',
		help='Allow committing straight to stable')

	parser.add_argument(
		'--vcs', dest='vcs',
		help='Force using specific VCS instead of autodetection')

	parser.add_argument(
		'-v', '--verbose', dest="verbosity", action='count',
		help='be very verbose in output', default=0)

	parser.add_argument(
		'-V', '--version', dest='version', action='store_true',
		help='show version info')

	parser.add_argument(
		'-x', '--xmlparse', dest='xml_parse', action='store_true',
		default=False,
		help='forces the metadata.xml parse check to be carried out')

	parser.add_argument(
		'--if-modified', choices=('y', 'n'), default='n',
		metavar="<y|n>",
		help='only check packages that have uncommitted modifications')

	parser.add_argument(
		'-i', '--ignore-arches', dest='ignore_arches', action='store_true',
		default=False,
		help='ignore arch-specific failures (where arch != host)')

	parser.add_argument(
		"--ignore-default-opts",
		action="store_true",
		help="do not use the REPOMAN_DEFAULT_OPTS environment variable")

	parser.add_argument(
		'-I', '--ignore-masked', dest='ignore_masked', action='store_true',
		default=False,
		help='ignore masked packages (not allowed with commit mode)')

	parser.add_argument(
		'--include-arches',
		dest='include_arches', metavar='ARCHES', action='append',
		help=(
			'A space separated list of arches used to '
			'filter the selection of profiles for dependency checks'))

	parser.add_argument(
		'-d', '--include-dev', dest='include_dev', action='store_true',
		default=False,
		help='include dev profiles in dependency checks')

	parser.add_argument(
		'-e', '--include-exp-profiles', choices=('y', 'n'), metavar='<y|n>',
		default=False,
		help='include exp profiles in dependency checks')

	parser.add_argument(
		'--unmatched-removal', dest='unmatched_removal', action='store_true',
		default=False,
		help=(
			'enable strict checking of package.mask and package.unmask files'
			' for unmatched removal atoms'))

	parser.add_argument(
		'--without-mask', dest='without_mask', action='store_true',
		default=False,
		help=(
			'behave as if no package.mask entries exist'
			' (not allowed with commit mode)'))

	parser.add_argument(
		'--output-style', dest='output_style', choices=output_keys,
		help='select output type', default='default')

	parser.add_argument(
		'--mode', dest='mode', choices=mode_keys,
		help='specify which mode repoman will run in (default=full)')

	opts, args = parser.parse_known_args(argv[1:])

	if not opts.ignore_default_opts:
		default_opts = util.shlex_split(repoman_default_opts)
		if default_opts:
			opts, args = parser.parse_known_args(default_opts + sys.argv[1:])

	if opts.mode == 'help':
		parser.print_help(short=False)

	for arg in args:
		if arg in modes:
			if not opts.mode:
				opts.mode = arg
				break
		else:
			parser.error("invalid mode: %s" % arg)

	if not opts.mode:
		opts.mode = 'full'

	if opts.mode == 'ci':
		opts.mode = 'commit'  # backwards compat shortcut

	# Use verbosity and quiet options to appropriately fiddle with the loglevel
	for val in range(opts.verbosity):
		logger = logging.getLogger()
		logger.setLevel(logger.getEffectiveLevel() - 10)

	for val in range(opts.quiet):
		logger = logging.getLogger()
		logger.setLevel(logger.getEffectiveLevel() + 10)

	if opts.mode == 'commit' and opts.commitmsg:
		opts.commitmsg = _unicode_decode(opts.commitmsg)

	if opts.mode == 'commit' and not (opts.force or opts.pretend):
		if opts.ignore_masked:
			opts.ignore_masked = False
			logging.warn('Commit mode automatically disables --ignore-masked')
		if opts.without_mask:
			opts.without_mask = False
			logging.warn('Commit mode automatically disables --without-mask')

	return (opts, args)
Example #53
0
def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
    root = trees._running_eroot
    mysettings = trees[root]["vartree"].settings
    portdb = trees[root]["porttree"].dbapi
    vardb = trees[root]["vartree"].dbapi
    bindb = trees[root]["bintree"].dbapi

    world_file = os.path.join(mysettings['EROOT'], WORLD_FILE)
    world_list = grabfile(world_file)
    world_modified = False
    world_warnings = set()
    updpath_map = {}
    # Maps repo_name to list of updates. If a given repo has no updates
    # directory, it will be omitted. If a repo has an updates directory
    # but none need to be applied (according to timestamp logic), the
    # value in the dict will be an empty list.
    repo_map = {}
    timestamps = {}

    retupd = False
    update_notice_printed = False
    for repo_name in portdb.getRepositories():
        repo = portdb.getRepositoryPath(repo_name)
        updpath = os.path.join(repo, "profiles", "updates")
        if not os.path.isdir(updpath):
            continue

        if updpath in updpath_map:
            repo_map[repo_name] = updpath_map[updpath]
            continue

        try:
            if if_mtime_changed:
                update_data = grab_updates(updpath, prev_mtimes=prev_mtimes)
            else:
                update_data = grab_updates(updpath)
        except DirectoryNotFound:
            continue
        myupd = []
        updpath_map[updpath] = myupd
        repo_map[repo_name] = myupd
        if len(update_data) > 0:
            for mykey, mystat, mycontent in update_data:
                if not update_notice_printed:
                    update_notice_printed = True
                    writemsg_stdout("\n")
                    writemsg_stdout(
                        colorize("GOOD", _("Performing Global Updates\n")))
                    writemsg_stdout(
                        _("(Could take a couple of minutes if you have a lot of binary packages.)\n"
                          ))
                    if not quiet:
                        writemsg_stdout(_("  %s='update pass'  %s='binary update'  "
                         "%s='/var/db update'  %s='/var/db move'\n"
                         "  %s='/var/db SLOT move'  %s='binary move'  "
                         "%s='binary SLOT move'\n  %s='update /etc/portage/package.*'\n") % \
                         (bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
                valid_updates, errors = parse_updates(mycontent)
                myupd.extend(valid_updates)
                if not quiet:
                    writemsg_stdout(bold(mykey))
                    writemsg_stdout(len(valid_updates) * "." + "\n")
                if len(errors) == 0:
                    # Update our internal mtime since we
                    # processed all of our directives.
                    timestamps[mykey] = mystat[stat.ST_MTIME]
                else:
                    for msg in errors:
                        writemsg("%s\n" % msg, noiselevel=-1)
            if myupd:
                retupd = True

    if retupd:
        if os.access(bindb.bintree.pkgdir, os.W_OK):
            # Call binarytree.populate(), since we want to make sure it's
            # only populated with local packages here (getbinpkgs=0).
            bindb.bintree.populate()
        else:
            bindb = None

    master_repo = portdb.repositories.mainRepo()
    if master_repo is not None:
        master_repo = master_repo.name
    if master_repo in repo_map:
        repo_map['DEFAULT'] = repo_map[master_repo]

    for repo_name, myupd in repo_map.items():
        if repo_name == 'DEFAULT':
            continue
        if not myupd:
            continue

        def repo_match(repository):
            return repository == repo_name or \
             (repo_name == master_repo and repository not in repo_map)

        def _world_repo_match(atoma, atomb):
            """
			Check whether to perform a world change from atoma to atomb.
			If best vardb match for atoma comes from the same repository
			as the update file, allow that. Additionally, if portdb still
			can find a match for old atom name, warn about that.
			"""
            matches = vardb.match(atoma)
            if not matches:
                matches = vardb.match(atomb)
            if matches and \
             repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
                if portdb.match(atoma):
                    world_warnings.add((atoma, atomb))
                return True
            else:
                return False

        for update_cmd in myupd:
            for pos, atom in enumerate(world_list):
                new_atom = update_dbentry(update_cmd, atom)
                if atom != new_atom:
                    if _world_repo_match(atom, new_atom):
                        world_list[pos] = new_atom
                        world_modified = True

        for update_cmd in myupd:
            if update_cmd[0] == "move":
                moves = vardb.move_ent(update_cmd, repo_match=repo_match)
                if moves:
                    writemsg_stdout(moves * "@")
                if bindb:
                    moves = bindb.move_ent(update_cmd, repo_match=repo_match)
                    if moves:
                        writemsg_stdout(moves * "%")
            elif update_cmd[0] == "slotmove":
                moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
                if moves:
                    writemsg_stdout(moves * "s")
                if bindb:
                    moves = bindb.move_slot_ent(update_cmd,
                                                repo_match=repo_match)
                    if moves:
                        writemsg_stdout(moves * "S")

    if world_modified:
        world_list.sort()
        write_atomic(world_file, "".join("%s\n" % (x, ) for x in world_list))
        if world_warnings:
            # XXX: print warning that we've updated world entries
            # and the old name still matches something (from an overlay)?
            pass

    if retupd:

        def _config_repo_match(repo_name, atoma, atomb):
            """
			Check whether to perform a world change from atoma to atomb.
			If best vardb match for atoma comes from the same repository
			as the update file, allow that. Additionally, if portdb still
			can find a match for old atom name, warn about that.
			"""
            matches = vardb.match(atoma)
            if not matches:
                matches = vardb.match(atomb)
                if not matches:
                    return False
            repository = vardb.aux_get(best(matches), ['repository'])[0]
            return repository == repo_name or \
             (repo_name == master_repo and repository not in repo_map)

        update_config_files(root,
                            shlex_split(mysettings.get("CONFIG_PROTECT", "")),
                            shlex_split(
                                mysettings.get("CONFIG_PROTECT_MASK", "")),
                            repo_map,
                            match_callback=_config_repo_match,
                            case_insensitive="case-insensitive-fs"
                            in mysettings.features)

        # The above global updates proceed quickly, so they
        # are considered a single mtimedb transaction.
        if timestamps:
            # We do not update the mtime in the mtimedb
            # until after _all_ of the above updates have
            # been processed because the mtimedb will
            # automatically commit when killed by ctrl C.
            for mykey, mtime in timestamps.items():
                prev_mtimes[mykey] = mtime

        do_upgrade_packagesmessage = False
        # We gotta do the brute force updates for these now.
        if True:

            def onUpdate(_maxval, curval):
                if curval > 0:
                    writemsg_stdout("#")

            if quiet:
                onUpdate = None
            vardb.update_ents(repo_map, onUpdate=onUpdate)
            if bindb:

                def onUpdate(_maxval, curval):
                    if curval > 0:
                        writemsg_stdout("*")

                if quiet:
                    onUpdate = None
                bindb.update_ents(repo_map, onUpdate=onUpdate)
        else:
            do_upgrade_packagesmessage = 1

        # Update progress above is indicated by characters written to stdout so
        # we print a couple new lines here to separate the progress output from
        # what follows.
        writemsg_stdout("\n\n")

        if do_upgrade_packagesmessage and bindb and \
         bindb.cpv_all():
            writemsg_stdout(
                _(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"
                  ))
            writemsg_stdout(bold(_("Note: This can take a very long time.")))
            writemsg_stdout("\n")

    return retupd
Example #54
0
	def _start(self):
		tar_options = ""
		if "xattr" in self.features:
			process = subprocess.Popen(["tar", "--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
			output = process.communicate()[0]
			if b"--xattrs" in output:
				tar_options = ["--xattrs", "--xattrs-include='*'"]
				for x in portage.util.shlex_split(self.env.get("PORTAGE_XATTR_EXCLUDE", "")):
					tar_options.append(portage._shell_quote("--xattrs-exclude=%s" % x))
				tar_options = " ".join(tar_options)

		decomp = _compressors.get(compression_probe(self.pkg_path))
		if decomp is not None:
			decomp_cmd = decomp.get("decompress")
		else:
			decomp_cmd = None
		if decomp_cmd is None:
			self.scheduler.output("!!! %s\n" %
				_("File compression header unrecognized: %s") %
				self.pkg_path, log_path=self.logfile,
				background=self.background, level=logging.ERROR)
			self.returncode = 1
			self._async_wait()
			return

		try:
			decompression_binary = shlex_split(varexpand(decomp_cmd, mydict=self.env))[0]
		except IndexError:
			decompression_binary = ""

		if find_binary(decompression_binary) is None:
			# Try alternative command if it exists
			if _compressors.get(compression_probe(self.pkg_path)).get("decompress_alt"):
				decomp_cmd = _compressors.get(
					compression_probe(self.pkg_path)).get("decompress_alt")
			try:
				decompression_binary = shlex_split(varexpand(decomp_cmd, mydict=self.env))[0]
			except IndexError:
				decompression_binary = ""

			if find_binary(decompression_binary) is None:
				missing_package = _compressors.get(compression_probe(self.pkg_path)).get("package")
				self.scheduler.output("!!! %s\n" %
					_("File compression unsupported %s.\n Command was: %s.\n Maybe missing package: %s") %
					(self.pkg_path, varexpand(decomp_cmd, mydict=self.env), missing_package), log_path=self.logfile,
					background=self.background, level=logging.ERROR)
				self.returncode = 1
				self._async_wait()
				return

		# Add -q to decomp_cmd opts, in order to avoid "trailing garbage
		# after EOF ignored" warning messages due to xpak trailer.
		# SIGPIPE handling (128 + SIGPIPE) should be compatible with
		# assert_sigpipe_ok() that's used by the ebuild unpack() helper.
		self.args = [self._shell_binary, "-c",
			("%s -cq -- %s | tar -xp %s -C %s -f - ; " + \
			"p=(${PIPESTATUS[@]}) ; " + \
			"if [[ ${p[0]} != 0 && ${p[0]} != %d ]] ; then " % (128 + signal.SIGPIPE) + \
			"echo bzip2 failed with status ${p[0]} ; exit ${p[0]} ; fi ; " + \
			"if [ ${p[1]} != 0 ] ; then " + \
			"echo tar failed with status ${p[1]} ; exit ${p[1]} ; fi ; " + \
			"exit 0 ;") % \
			(decomp_cmd,
			portage._shell_quote(self.pkg_path),
			tar_options,
			portage._shell_quote(self.image_dir))]

		SpawnProcess._start(self)