示例#1
0
    def _testAsynchronousLockWaitKill(self):
        scheduler = global_event_loop()
        tempdir = tempfile.mkdtemp()
        try:
            path = os.path.join(tempdir, 'lock_me')
            lock1 = AsynchronousLock(path=path, scheduler=scheduler)
            lock1.start()
            self.assertEqual(lock1.wait(), os.EX_OK)
            self.assertEqual(lock1.returncode, os.EX_OK)
            lock2 = AsynchronousLock(path=path,
                                     scheduler=scheduler,
                                     _force_async=True,
                                     _force_process=True)
            lock2.start()
            # lock2 should be waiting for lock1 to release
            self.assertEqual(lock2.poll(), None)
            self.assertEqual(lock2.returncode, None)

            # Kill lock2's process and then check wait() and
            # returncode results. This is intended to simulate
            # a SIGINT sent via the controlling tty.
            self.assertEqual(lock2._imp is not None, True)
            self.assertEqual(lock2._imp._proc is not None, True)
            self.assertEqual(lock2._imp._proc.pid is not None, True)
            lock2._imp._kill_test = True
            os.kill(lock2._imp._proc.pid, signal.SIGTERM)
            self.assertEqual(lock2.wait() == os.EX_OK, False)
            self.assertEqual(lock2.returncode == os.EX_OK, False)
            self.assertEqual(lock2.returncode is None, False)
            lock1.unlock()
        finally:
            shutil.rmtree(tempdir)
示例#2
0
 def _cancel(self):
     if self.isAlive():
         try:
             os.kill(self.pid, signal.SIGTERM)
         except OSError as e:
             if e.errno != errno.ESRCH:
                 raise
示例#3
0
    def _wait(self):

        if self.returncode is not None:
            return self.returncode

        if self._registered:
            if self.cancelled:
                self._wait_loop(timeout=self._cancel_timeout)
                if self._registered:
                    try:
                        os.kill(self.pid, signal.SIGKILL)
                    except OSError as e:
                        if e.errno != errno.ESRCH:
                            raise
                        del e
                    self._wait_loop(timeout=self._cancel_timeout)
                    if self._registered:
                        self._orphan_process_warn()
            else:
                self._wait_loop()

            if self.returncode is not None:
                return self.returncode

        if not isinstance(self.pid, int):
            # Get debug info for bug #403697.
            raise AssertionError("%s: pid is non-integer: %s" % (self.__class__.__name__, repr(self.pid)))

        self._waitpid_loop()

        return self.returncode
示例#4
0
 def _cancel(self):
     if self.isAlive():
         try:
             os.kill(self.pid, signal.SIGTERM)
         except OSError as e:
             if e.errno != errno.ESRCH:
                 raise
示例#5
0
    def _wait(self):

        if self.returncode is not None:
            return self.returncode

        if self._registered:
            if self.cancelled:
                self._wait_loop(timeout=self._cancel_timeout)
                if self._registered:
                    try:
                        os.kill(self.pid, signal.SIGKILL)
                    except OSError as e:
                        if e.errno != errno.ESRCH:
                            raise
                        del e
                    self._wait_loop(timeout=self._cancel_timeout)
                    if self._registered:
                        self._orphan_process_warn()
            else:
                self._wait_loop()

            if self.returncode is not None:
                return self.returncode

        if not isinstance(self.pid, int):
            # Get debug info for bug #403697.
            raise AssertionError("%s: pid is non-integer: %s" %
                                 (self.__class__.__name__, repr(self.pid)))

        self._waitpid_loop()

        return self.returncode
示例#6
0
	def _testAsynchronousLockWaitKill(self):
		scheduler = global_event_loop()
		tempdir = tempfile.mkdtemp()
		try:
			path = os.path.join(tempdir, 'lock_me')
			lock1 = AsynchronousLock(path=path, scheduler=scheduler)
			lock1.start()
			self.assertEqual(lock1.wait(), os.EX_OK)
			self.assertEqual(lock1.returncode, os.EX_OK)
			lock2 = AsynchronousLock(path=path, scheduler=scheduler,
				_force_async=True, _force_process=True)
			lock2.start()
			# lock2 should be waiting for lock1 to release
			self.assertEqual(lock2.poll(), None)
			self.assertEqual(lock2.returncode, None)

			# Kill lock2's process and then check wait() and
			# returncode results. This is intended to simulate
			# a SIGINT sent via the controlling tty.
			self.assertEqual(lock2._imp is not None, True)
			self.assertEqual(lock2._imp._proc is not None, True)
			self.assertEqual(lock2._imp._proc.pid is not None, True)
			lock2._imp._kill_test = True
			os.kill(lock2._imp._proc.pid, signal.SIGTERM)
			self.assertEqual(lock2.wait() == os.EX_OK, False)
			self.assertEqual(lock2.returncode == os.EX_OK, False)
			self.assertEqual(lock2.returncode is None, False)
			lock1.unlock()
		finally:
			shutil.rmtree(tempdir)
示例#7
0
def cleanup():
	while spawned_pids:
		pid = spawned_pids.pop()
		try:
			if os.waitpid(pid, os.WNOHANG) == (0, 0):
				os.kill(pid, signal.SIGTERM)
				os.waitpid(pid, 0)
		except OSError:
			# This pid has been cleaned up outside
			# of spawn().
			pass
示例#8
0
def cleanup():
    while spawned_pids:
        pid = spawned_pids.pop()
        try:
            if os.waitpid(pid, os.WNOHANG) == (0, 0):
                os.kill(pid, signal.SIGTERM)
                os.waitpid(pid, 0)
        except OSError:
            # This pid has been cleaned up outside
            # of spawn().
            pass
示例#9
0
    def cancel(self):
        if self.isAlive():
            try:
                os.kill(self.pid, signal.SIGTERM)
            except OSError as e:
                if e.errno != errno.ESRCH:
                    raise
                del e

        self.cancelled = True
        if self.pid is not None:
            self.wait()
        return self.returncode
示例#10
0
			def kill_all(pids, sig):
				for p in pids:
					try:
						os.kill(p, sig)
					except OSError as e:
						if e.errno == errno.EPERM:
							# Reported with hardened kernel (bug #358211).
							writemsg_level(
								"!!! kill: (%i) - Operation not permitted\n" %
								(p,), level=logging.ERROR,
								noiselevel=-1)
						elif e.errno != errno.ESRCH:
							raise
示例#11
0
			def kill_all(pids, sig):
				for p in pids:
					try:
						os.kill(p, sig)
					except OSError as e:
						if e.errno == errno.EPERM:
							# Reported with hardened kernel (bug #358211).
							writemsg_level(
								"!!! kill: (%i) - Operation not permitted\n" %
								(p,), level=logging.ERROR,
								noiselevel=-1)
						elif e.errno != errno.ESRCH:
							raise
示例#12
0
	def _cancel(self):
		if self.isAlive():
			try:
				os.kill(self.pid, signal.SIGTERM)
			except OSError as e:
				if e.errno == errno.EPERM:
					# Reported with hardened kernel (bug #358211).
					writemsg_level(
						"!!! kill: (%i) - Operation not permitted\n" %
						(self.pid,), level=logging.ERROR,
						noiselevel=-1)
				elif e.errno != errno.ESRCH:
					raise
示例#13
0
	def cancel(self):
		if self.isAlive():
			try:
				os.kill(self.pid, signal.SIGTERM)
			except OSError as e:
				if e.errno != errno.ESRCH:
					raise
				del e

		self.cancelled = True
		if self.pid is not None:
			self.wait()
		return self.returncode
示例#14
0
	def _wait(self):

		if self.returncode is not None:
			return self.returncode

		if self._registered:
			if self.cancelled:
				timeout = 1000
				self.scheduler.schedule(self._reg_id, timeout=timeout)
				if self._registered:
					try:
						os.kill(self.pid, signal.SIGKILL)
					except OSError as e:
						if e.errno != errno.ESRCH:
							raise
						del e
					self.scheduler.schedule(self._reg_id, timeout=timeout)
					if self._registered:
						self._orphan_process_warn()
			else:
				self.scheduler.schedule(self._reg_id)
			self._unregister()
			if self.returncode is not None:
				return self.returncode

		try:
			# With waitpid and WNOHANG, only check the
			# first element of the tuple since the second
			# element may vary (bug #337465).
			wait_retval = os.waitpid(self.pid, os.WNOHANG)
		except OSError as e:
			if e.errno != errno.ECHILD:
				raise
			del e
			self._set_returncode((self.pid, 1 << 8))
		else:
			if wait_retval[0] != 0:
				self._set_returncode(wait_retval)
			else:
				try:
					wait_retval = os.waitpid(self.pid, 0)
				except OSError as e:
					if e.errno != errno.ECHILD:
						raise
					del e
					self._set_returncode((self.pid, 1 << 8))
				else:
					self._set_returncode(wait_retval)

		return self.returncode
示例#15
0
def cleanup():
	while spawned_pids:
		pid = spawned_pids.pop()
		try:
			# With waitpid and WNOHANG, only check the
			# first element of the tuple since the second
			# element may vary (bug #337465).
			if os.waitpid(pid, os.WNOHANG)[0] == 0:
				os.kill(pid, signal.SIGTERM)
				os.waitpid(pid, 0)
		except OSError:
			# This pid has been cleaned up outside
			# of spawn().
			pass
示例#16
0
 def _cancel(self):
     if self.isAlive():
         try:
             os.kill(self.pid, signal.SIGTERM)
         except OSError as e:
             if e.errno == errno.EPERM:
                 # Reported with hardened kernel (bug #358211).
                 writemsg_level(
                     "!!! kill: (%i) - Operation not permitted\n" %
                     (self.pid, ),
                     level=logging.ERROR,
                     noiselevel=-1)
             elif e.errno != errno.ESRCH:
                 raise
示例#17
0
	def _wait(self):

		if self.returncode is not None:
			return self.returncode

		if self._registered:
			if self.cancelled:
				timeout = 1000
				self.scheduler.schedule(self._reg_id, timeout=timeout)
				if self._registered:
					try:
						os.kill(self.pid, signal.SIGKILL)
					except OSError as e:
						if e.errno != errno.ESRCH:
							raise
						del e
					self.scheduler.schedule(self._reg_id, timeout=timeout)
					if self._registered:
						self._orphan_process_warn()
			else:
				self.scheduler.schedule(self._reg_id)
			self._unregister()
			if self.returncode is not None:
				return self.returncode

		try:
			wait_retval = os.waitpid(self.pid, os.WNOHANG)
		except OSError as e:
			if e.errno != errno.ECHILD:
				raise
			del e
			self._set_returncode((self.pid, 1))
		else:
			if wait_retval != (0, 0):
				self._set_returncode(wait_retval)
			else:
				try:
					wait_retval = os.waitpid(self.pid, 0)
				except OSError as e:
					if e.errno != errno.ECHILD:
						raise
					del e
					self._set_returncode((self.pid, 1))
				else:
					self._set_returncode(wait_retval)

		return self.returncode
示例#18
0
    def _wait(self):

        if self.returncode is not None:
            return self.returncode

        if self._registered:
            if self.cancelled:
                timeout = 1000
                self.scheduler.schedule(self._reg_id, timeout=timeout)
                if self._registered:
                    try:
                        os.kill(self.pid, signal.SIGKILL)
                    except OSError as e:
                        if e.errno != errno.ESRCH:
                            raise
                        del e
                    self.scheduler.schedule(self._reg_id, timeout=timeout)
                    if self._registered:
                        self._orphan_process_warn()
            else:
                self.scheduler.schedule(self._reg_id)
            self._unregister()
            if self.returncode is not None:
                return self.returncode

        try:
            wait_retval = os.waitpid(self.pid, os.WNOHANG)
        except OSError as e:
            if e.errno != errno.ECHILD:
                raise
            del e
            self._set_returncode((self.pid, 1))
        else:
            if wait_retval != (0, 0):
                self._set_returncode(wait_retval)
            else:
                try:
                    wait_retval = os.waitpid(self.pid, 0)
                except OSError as e:
                    if e.errno != errno.ECHILD:
                        raise
                    del e
                    self._set_returncode((self.pid, 1))
                else:
                    self._set_returncode(wait_retval)

        return self.returncode
示例#19
0
    def _wait(self):

        if self.returncode is not None:
            return self.returncode

        if self._registered:
            if self.cancelled:
                self._wait_loop(timeout=self._cancel_timeout)
                if self._registered:
                    try:
                        os.kill(self.pid, signal.SIGKILL)
                    except OSError as e:
                        if e.errno == errno.EPERM:
                            # Reported with hardened kernel (bug #358211).
                            writemsg_level(
                                "!!! kill: (%i) - Operation not permitted\n" %
                                (self.pid, ),
                                level=logging.ERROR,
                                noiselevel=-1)
                        elif e.errno != errno.ESRCH:
                            raise
                        del e
                    self._wait_loop(timeout=self._cancel_timeout)
                    if self._registered:
                        self._orphan_process_warn()
            else:
                self._wait_loop()

            if self.returncode is not None:
                return self.returncode

        if not isinstance(self.pid, int):
            # Get debug info for bug #403697.
            raise AssertionError("%s: pid is non-integer: %s" %
                                 (self.__class__.__name__, repr(self.pid)))

        self._waitpid_loop()

        return self.returncode
示例#20
0
	def _wait(self):

		if self.returncode is not None:
			return self.returncode

		if self._registered:
			if self.cancelled:
				self._wait_loop(timeout=self._cancel_timeout)
				if self._registered:
					try:
						os.kill(self.pid, signal.SIGKILL)
					except OSError as e:
						if e.errno == errno.EPERM:
							# Reported with hardened kernel (bug #358211).
							writemsg_level(
								"!!! kill: (%i) - Operation not permitted\n" %
								(self.pid,), level=logging.ERROR,
								noiselevel=-1)
						elif e.errno != errno.ESRCH:
							raise
						del e
					self._wait_loop(timeout=self._cancel_timeout)
					if self._registered:
						self._orphan_process_warn()
			else:
				self._wait_loop()

			if self.returncode is not None:
				return self.returncode

		if not isinstance(self.pid, int):
			# Get debug info for bug #403697.
			raise AssertionError(
				"%s: pid is non-integer: %s" %
				(self.__class__.__name__, repr(self.pid)))

		self._waitpid_loop()

		return self.returncode
示例#21
0
    def _do_rsync(self, syncuri, timestamp, opts):
        updatecache_flg = False
        is_synced = False
        if timestamp != 0 and "--quiet" not in opts:
            print(">>> Checking server timestamp ...")

        rsynccommand = [self.bin_command
                        ] + self.rsync_opts + self.extra_rsync_opts

        if self.proto == 'ssh' and self.ssh_opts:
            rsynccommand.append("--rsh=ssh " + self.ssh_opts)

        if "--debug" in opts:
            print(rsynccommand)

        local_state_unchanged = False
        exitcode = os.EX_OK
        servertimestamp = 0
        # Even if there's no timestamp available locally, fetch the
        # timestamp anyway as an initial probe to verify that the server is
        # responsive.  This protects us from hanging indefinitely on a
        # connection attempt to an unresponsive server which rsync's
        # --timeout option does not prevent.

        #if True:
        # Temporary file for remote server timestamp comparison.
        # NOTE: If FEATURES=usersync is enabled then the tempfile
        # needs to be in a directory that's readable by the usersync
        # user. We assume that ${PORTAGE_TMPDIR}/portage will satisfy this
        # requirement, since that's not necessarily true for the
        # default directory used by the tempfile module.
        if self.usersync_uid is not None:
            tmpdir = os.path.join(self.settings['PORTAGE_TMPDIR'], 'portage')
            ensure_dirs_kwargs = {}
            if portage.secpass >= 1:
                ensure_dirs_kwargs['gid'] = portage.portage_gid
                ensure_dirs_kwargs['mode'] = 0o70
                ensure_dirs_kwargs['mask'] = 0
            portage.util.ensure_dirs(tmpdir, **ensure_dirs_kwargs)
        else:
            # use default dir from tempfile module
            tmpdir = None
        fd, tmpservertimestampfile = \
         tempfile.mkstemp(dir=tmpdir)
        os.close(fd)
        if self.usersync_uid is not None:
            portage.util.apply_permissions(tmpservertimestampfile,
                                           uid=self.usersync_uid)
        command = rsynccommand[:]
        command.append('--inplace')
        command.append(syncuri.rstrip("/") + \
         "/metadata/timestamp.chk")
        command.append(tmpservertimestampfile)
        content = None
        pids = []
        try:
            # Timeout here in case the server is unresponsive.  The
            # --timeout rsync option doesn't apply to the initial
            # connection attempt.
            try:
                if self.rsync_initial_timeout:
                    portage.exception.AlarmSignal.register(
                        self.rsync_initial_timeout)

                pids.extend(
                    portage.process.spawn(command,
                                          returnpid=True,
                                          **self.spawn_kwargs))
                exitcode = os.waitpid(pids[0], 0)[1]
                if self.usersync_uid is not None:
                    portage.util.apply_permissions(tmpservertimestampfile,
                                                   uid=os.getuid())
                content = portage.grabfile(tmpservertimestampfile)
            finally:
                if self.rsync_initial_timeout:
                    portage.exception.AlarmSignal.unregister()
                try:
                    os.unlink(tmpservertimestampfile)
                except OSError:
                    pass
        except portage.exception.AlarmSignal:
            # timed out
            print('timed out')
            # With waitpid and WNOHANG, only check the
            # first element of the tuple since the second
            # element may vary (bug #337465).
            if pids and os.waitpid(pids[0], os.WNOHANG)[0] == 0:
                os.kill(pids[0], signal.SIGTERM)
                os.waitpid(pids[0], 0)
            # This is the same code rsync uses for timeout.
            exitcode = 30
        else:
            if exitcode != os.EX_OK:
                if exitcode & 0xff:
                    exitcode = (exitcode & 0xff) << 8
                else:
                    exitcode = exitcode >> 8

        if content:
            try:
                servertimestamp = time.mktime(
                    time.strptime(content[0], TIMESTAMP_FORMAT))
            except (OverflowError, ValueError):
                pass
        del command, pids, content

        if exitcode == os.EX_OK:
            if (servertimestamp != 0) and (servertimestamp == timestamp):
                local_state_unchanged = True
                is_synced = True
                self.logger(self.xterm_titles,
                            ">>> Cancelling sync -- Already current.")
                print()
                print(">>>")
                print(
                    ">>> Timestamps on the server and in the local repository are the same."
                )
                print(
                    ">>> Cancelling all further sync action. You are already up to date."
                )
                print(">>>")
                print(">>> In order to force sync, remove '%s'." %
                      self.servertimestampfile)
                print(">>>")
                print()
            elif (servertimestamp != 0) and (servertimestamp < timestamp):
                self.logger(self.xterm_titles,
                            ">>> Server out of date: %s" % syncuri)
                print()
                print(">>>")
                print(">>> SERVER OUT OF DATE: %s" % syncuri)
                print(">>>")
                print(">>> In order to force sync, remove '%s'." %
                      self.servertimestampfile)
                print(">>>")
                print()
                exitcode = SERVER_OUT_OF_DATE
            elif (servertimestamp == 0) or (servertimestamp > timestamp):
                # actual sync
                command = rsynccommand[:]

                submodule_paths = self._get_submodule_paths()
                if submodule_paths:
                    # The only way to select multiple directories to
                    # sync, without calling rsync multiple times, is
                    # to use --relative.
                    command.append("--relative")
                    for path in submodule_paths:
                        # /./ is special syntax supported with the
                        # rsync --relative option.
                        command.append(syncuri + "/./" + path)
                else:
                    command.append(syncuri + "/")

                command.append(self.download_dir)

                exitcode = None
                try:
                    exitcode = portage.process.spawn(command,
                                                     **self.spawn_kwargs)
                finally:
                    if exitcode is None:
                        # interrupted
                        exitcode = 128 + signal.SIGINT

                    #   0	Success
                    #   1	Syntax or usage error
                    #   2	Protocol incompatibility
                    #   5	Error starting client-server protocol
                    #  35	Timeout waiting for daemon connection
                    if exitcode not in (0, 1, 2, 5, 35):
                        # If the exit code is not among those listed above,
                        # then we may have a partial/inconsistent sync
                        # state, so our previously read timestamp as well
                        # as the corresponding file can no longer be
                        # trusted.
                        timestamp = 0
                        try:
                            os.unlink(self.servertimestampfile)
                        except OSError:
                            pass
                    else:
                        updatecache_flg = True

                if exitcode in [0, 1, 3, 4, 11, 14, 20, 21]:
                    is_synced = True
        elif exitcode in [1, 3, 4, 11, 14, 20, 21]:
            is_synced = True
        else:
            # Code 2 indicates protocol incompatibility, which is expected
            # for servers with protocol < 29 that don't support
            # --prune-empty-directories.  Retry for a server that supports
            # at least rsync protocol version 29 (>=rsync-2.6.4).
            pass

        return local_state_unchanged, is_synced, exitcode, updatecache_flg
示例#22
0
def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
          uid=None, gid=None, groups=None, umask=None, logfile=None,
          path_lookup=True, pre_exec=None):
	"""
	Spawns a given command.
	
	@param mycommand: the command to execute
	@type mycommand: String or List (Popen style list)
	@param env: A dict of Key=Value pairs for env variables
	@type env: Dictionary
	@param opt_name: an optional name for the spawn'd process (defaults to the binary name)
	@type opt_name: String
	@param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example
	@type fd_pipes: Dictionary
	@param returnpid: Return the Process IDs for a successful spawn.
	NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
	@type returnpid: Boolean
	@param uid: User ID to spawn as; useful for dropping privilages
	@type uid: Integer
	@param gid: Group ID to spawn as; useful for dropping privilages
	@type gid: Integer
	@param groups: Group ID's to spawn in: useful for having the process run in multiple group contexts.
	@type groups: List
	@param umask: An integer representing the umask for the process (see man chmod for umask details)
	@type umask: Integer
	@param logfile: name of a file to use for logging purposes
	@type logfile: String
	@param path_lookup: If the binary is not fully specified then look for it in PATH
	@type path_lookup: Boolean
	@param pre_exec: A function to be called with no arguments just prior to the exec call.
	@type pre_exec: callable
	
	logfile requires stdout and stderr to be assigned to this process (ie not pointed
	   somewhere else.)
	
	"""

	# mycommand is either a str or a list
	if isinstance(mycommand, basestring):
		mycommand = mycommand.split()

	if sys.hexversion < 0x3000000:
		# Avoid a potential UnicodeEncodeError from os.execve().
		env_bytes = {}
		for k, v in env.items():
			env_bytes[_unicode_encode(k, encoding=_encodings['content'])] = \
				_unicode_encode(v, encoding=_encodings['content'])
		env = env_bytes
		del env_bytes

	# If an absolute path to an executable file isn't given
	# search for it unless we've been told not to.
	binary = mycommand[0]
	if binary not in (BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY) and \
		(not os.path.isabs(binary) or not os.path.isfile(binary)
	    or not os.access(binary, os.X_OK)):
		binary = path_lookup and find_binary(binary) or None
		if not binary:
			raise CommandNotFound(mycommand[0])

	# If we haven't been told what file descriptors to use
	# default to propagating our stdin, stdout and stderr.
	if fd_pipes is None:
		fd_pipes = {
			0:sys.__stdin__.fileno(),
			1:sys.__stdout__.fileno(),
			2:sys.__stderr__.fileno(),
		}

	# mypids will hold the pids of all processes created.
	mypids = []

	if logfile:
		# Using a log file requires that stdout and stderr
		# are assigned to the process we're running.
		if 1 not in fd_pipes or 2 not in fd_pipes:
			raise ValueError(fd_pipes)

		# Create a pipe
		(pr, pw) = os.pipe()

		# Create a tee process, giving it our stdout and stderr
		# as well as the read end of the pipe.
		mypids.extend(spawn(('tee', '-i', '-a', logfile),
		              returnpid=True, fd_pipes={0:pr,
		              1:fd_pipes[1], 2:fd_pipes[2]}))

		# We don't need the read end of the pipe, so close it.
		os.close(pr)

		# Assign the write end of the pipe to our stdout and stderr.
		fd_pipes[1] = pw
		fd_pipes[2] = pw

	pid = os.fork()

	if pid == 0:
		try:
			_exec(binary, mycommand, opt_name, fd_pipes,
			      env, gid, groups, uid, umask, pre_exec)
		except SystemExit:
			raise
		except Exception as e:
			# We need to catch _any_ exception so that it doesn't
			# propagate out of this function and cause exiting
			# with anything other than os._exit()
			sys.stderr.write("%s:\n   %s\n" % (e, " ".join(mycommand)))
			traceback.print_exc()
			sys.stderr.flush()
			os._exit(1)

	if not isinstance(pid, int):
		raise AssertionError("fork returned non-integer: %s" % (repr(pid),))

	# Add the pid to our local and the global pid lists.
	mypids.append(pid)
	spawned_pids.append(pid)

	# If we started a tee process the write side of the pipe is no
	# longer needed, so close it.
	if logfile:
		os.close(pw)

	# If the caller wants to handle cleaning up the processes, we tell
	# it about all processes that were created.
	if returnpid:
		return mypids

	# Otherwise we clean them up.
	while mypids:

		# Pull the last reader in the pipe chain. If all processes
		# in the pipe are well behaved, it will die when the process
		# it is reading from dies.
		pid = mypids.pop(0)

		# and wait for it.
		retval = os.waitpid(pid, 0)[1]

		# When it's done, we can remove it from the
		# global pid list as well.
		spawned_pids.remove(pid)

		if retval:
			# If it failed, kill off anything else that
			# isn't dead yet.
			for pid in mypids:
				# With waitpid and WNOHANG, only check the
				# first element of the tuple since the second
				# element may vary (bug #337465).
				if os.waitpid(pid, os.WNOHANG)[0] == 0:
					os.kill(pid, signal.SIGTERM)
					os.waitpid(pid, 0)
				spawned_pids.remove(pid)

			# If it got a signal, return the signal that was sent.
			if (retval & 0xff):
				return ((retval & 0xff) << 8)

			# Otherwise, return its exit code.
			return (retval >> 8)

	# Everything succeeded
	return 0
示例#23
0
	def _do_rsync(self, syncuri, timestamp, opts):
		is_synced = False
		if timestamp != 0 and "--quiet" not in opts:
			print(">>> Checking server timestamp ...")

		rsynccommand = [self.bin_command] + self.rsync_opts + self.extra_rsync_opts

		if self.proto == 'ssh' and self.ssh_opts:
			rsynccommand.append("--rsh=ssh " + self.ssh_opts)

		if "--debug" in opts:
			print(rsynccommand)

		exitcode = os.EX_OK
		servertimestamp = 0
		# Even if there's no timestamp available locally, fetch the
		# timestamp anyway as an initial probe to verify that the server is
		# responsive.  This protects us from hanging indefinitely on a
		# connection attempt to an unresponsive server which rsync's
		# --timeout option does not prevent.

		#if True:
		# Temporary file for remote server timestamp comparison.
		# NOTE: If FEATURES=usersync is enabled then the tempfile
		# needs to be in a directory that's readable by the usersync
		# user. We assume that PORTAGE_TMPDIR will satisfy this
		# requirement, since that's not necessarily true for the
		# default directory used by the tempfile module.
		if self.usersync_uid is not None:
			tmpdir = self.settings['PORTAGE_TMPDIR']
		else:
			# use default dir from tempfile module
			tmpdir = None
		fd, tmpservertimestampfile = \
			tempfile.mkstemp(dir=tmpdir)
		os.close(fd)
		if self.usersync_uid is not None:
			portage.util.apply_permissions(tmpservertimestampfile,
				uid=self.usersync_uid)
		command = rsynccommand[:]
		command.append(syncuri.rstrip("/") + \
			"/metadata/timestamp.chk")
		command.append(tmpservertimestampfile)
		content = None
		pids = []
		try:
			# Timeout here in case the server is unresponsive.  The
			# --timeout rsync option doesn't apply to the initial
			# connection attempt.
			try:
				if self.rsync_initial_timeout:
					portage.exception.AlarmSignal.register(
						self.rsync_initial_timeout)

				pids.extend(portage.process.spawn(
					command, returnpid=True,
					**portage._native_kwargs(self.spawn_kwargs)))
				exitcode = os.waitpid(pids[0], 0)[1]
				if self.usersync_uid is not None:
					portage.util.apply_permissions(tmpservertimestampfile,
						uid=os.getuid())
				content = portage.grabfile(tmpservertimestampfile)
			finally:
				if self.rsync_initial_timeout:
					portage.exception.AlarmSignal.unregister()
				try:
					os.unlink(tmpservertimestampfile)
				except OSError:
					pass
		except portage.exception.AlarmSignal:
			# timed out
			print('timed out')
			# With waitpid and WNOHANG, only check the
			# first element of the tuple since the second
			# element may vary (bug #337465).
			if pids and os.waitpid(pids[0], os.WNOHANG)[0] == 0:
				os.kill(pids[0], signal.SIGTERM)
				os.waitpid(pids[0], 0)
			# This is the same code rsync uses for timeout.
			exitcode = 30
		else:
			if exitcode != os.EX_OK:
				if exitcode & 0xff:
					exitcode = (exitcode & 0xff) << 8
				else:
					exitcode = exitcode >> 8

		if content:
			try:
				servertimestamp = time.mktime(time.strptime(
					content[0], TIMESTAMP_FORMAT))
			except (OverflowError, ValueError):
				pass
		del command, pids, content

		if exitcode == os.EX_OK:
			if (servertimestamp != 0) and (servertimestamp == timestamp):
				self.logger(self.xterm_titles,
					">>> Cancelling sync -- Already current.")
				print()
				print(">>>")
				print(">>> Timestamps on the server and in the local repository are the same.")
				print(">>> Cancelling all further sync action. You are already up to date.")
				print(">>>")
				print(">>> In order to force sync, remove '%s'." % self.servertimestampfile)
				print(">>>")
				print()
				return is_synced, exitcode
			elif (servertimestamp != 0) and (servertimestamp < timestamp):
				self.logger(self.xterm_titles,
					">>> Server out of date: %s" % syncuri)
				print()
				print(">>>")
				print(">>> SERVER OUT OF DATE: %s" % syncuri)
				print(">>>")
				print(">>> In order to force sync, remove '%s'." % self.servertimestampfile)
				print(">>>")
				print()
				exitcode = SERVER_OUT_OF_DATE
			elif (servertimestamp == 0) or (servertimestamp > timestamp):
				# actual sync
				command = rsynccommand + [syncuri+"/", self.repo.location]
				exitcode = None
				try:
					exitcode = portage.process.spawn(command,
						**portage._native_kwargs(self.spawn_kwargs))
				finally:
					if exitcode is None:
						# interrupted
						exitcode = 128 + signal.SIGINT

					#   0	Success
					#   1	Syntax or usage error
					#   2	Protocol incompatibility
					#   5	Error starting client-server protocol
					#  35	Timeout waiting for daemon connection
					if exitcode not in (0, 1, 2, 5, 35):
						# If the exit code is not among those listed above,
						# then we may have a partial/inconsistent sync
						# state, so our previously read timestamp as well
						# as the corresponding file can no longer be
						# trusted.
						timestamp = 0
						try:
							os.unlink(self.servertimestampfile)
						except OSError:
							pass

				if exitcode in [0,1,3,4,11,14,20,21]:
					is_synced = True
		elif exitcode in [1,3,4,11,14,20,21]:
			is_synced = True
		else:
			# Code 2 indicates protocol incompatibility, which is expected
			# for servers with protocol < 29 that don't support
			# --prune-empty-directories.  Retry for a server that supports
			# at least rsync protocol version 29 (>=rsync-2.6.4).
			pass
		return is_synced, exitcode
示例#24
0
文件: process.py 项目: gentoo/portage
def spawn(mycommand, env=None, opt_name=None, fd_pipes=None, returnpid=False,
          uid=None, gid=None, groups=None, umask=None, cwd=None, logfile=None,
          path_lookup=True, pre_exec=None,
          close_fds=(sys.version_info < (3, 4)), unshare_net=False,
          unshare_ipc=False, unshare_mount=False, unshare_pid=False,
	  cgroup=None):
	"""
	Spawns a given command.
	
	@param mycommand: the command to execute
	@type mycommand: String or List (Popen style list)
	@param env: If env is not None, it must be a mapping that defines the environment
		variables for the new process; these are used instead of the default behavior
		of inheriting the current process's environment.
	@type env: None or Mapping
	@param opt_name: an optional name for the spawn'd process (defaults to the binary name)
	@type opt_name: String
	@param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example
		(default is {0:stdin, 1:stdout, 2:stderr})
	@type fd_pipes: Dictionary
	@param returnpid: Return the Process IDs for a successful spawn.
	NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
	@type returnpid: Boolean
	@param uid: User ID to spawn as; useful for dropping privilages
	@type uid: Integer
	@param gid: Group ID to spawn as; useful for dropping privilages
	@type gid: Integer
	@param groups: Group ID's to spawn in: useful for having the process run in multiple group contexts.
	@type groups: List
	@param umask: An integer representing the umask for the process (see man chmod for umask details)
	@type umask: Integer
	@param cwd: Current working directory
	@type cwd: String
	@param logfile: name of a file to use for logging purposes
	@type logfile: String
	@param path_lookup: If the binary is not fully specified then look for it in PATH
	@type path_lookup: Boolean
	@param pre_exec: A function to be called with no arguments just prior to the exec call.
	@type pre_exec: callable
	@param close_fds: If True, then close all file descriptors except those
		referenced by fd_pipes (default is True for python3.3 and earlier, and False for
		python3.4 and later due to non-inheritable file descriptor behavior from PEP 446).
	@type close_fds: Boolean
	@param unshare_net: If True, networking will be unshared from the spawned process
	@type unshare_net: Boolean
	@param unshare_ipc: If True, IPC will be unshared from the spawned process
	@type unshare_ipc: Boolean
	@param unshare_mount: If True, mount namespace will be unshared and mounts will
		be private to the namespace
	@type unshare_mount: Boolean
	@param unshare_pid: If True, PID ns will be unshared from the spawned process
	@type unshare_pid: Boolean
	@param cgroup: CGroup path to bind the process to
	@type cgroup: String

	logfile requires stdout and stderr to be assigned to this process (ie not pointed
	   somewhere else.)
	
	"""

	# mycommand is either a str or a list
	if isinstance(mycommand, basestring):
		mycommand = mycommand.split()

	env = os.environ if env is None else env

	if sys.hexversion < 0x3000000:
		# Avoid a potential UnicodeEncodeError from os.execve().
		env_bytes = {}
		for k, v in env.items():
			env_bytes[_unicode_encode(k, encoding=_encodings['content'])] = \
				_unicode_encode(v, encoding=_encodings['content'])
		env = env_bytes
		del env_bytes

	# If an absolute path to an executable file isn't given
	# search for it unless we've been told not to.
	binary = mycommand[0]
	if binary not in (BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY) and \
		(not os.path.isabs(binary) or not os.path.isfile(binary)
	    or not os.access(binary, os.X_OK)):
		binary = path_lookup and find_binary(binary) or None
		if not binary:
			raise CommandNotFound(mycommand[0])

	# If we haven't been told what file descriptors to use
	# default to propagating our stdin, stdout and stderr.
	if fd_pipes is None:
		fd_pipes = {
			0:portage._get_stdin().fileno(),
			1:sys.__stdout__.fileno(),
			2:sys.__stderr__.fileno(),
		}

	# mypids will hold the pids of all processes created.
	mypids = []

	if logfile:
		# Using a log file requires that stdout and stderr
		# are assigned to the process we're running.
		if 1 not in fd_pipes or 2 not in fd_pipes:
			raise ValueError(fd_pipes)

		# Create a pipe
		(pr, pw) = os.pipe()

		# Create a tee process, giving it our stdout and stderr
		# as well as the read end of the pipe.
		mypids.extend(spawn(('tee', '-i', '-a', logfile),
		              returnpid=True, fd_pipes={0:pr,
		              1:fd_pipes[1], 2:fd_pipes[2]}))

		# We don't need the read end of the pipe, so close it.
		os.close(pr)

		# Assign the write end of the pipe to our stdout and stderr.
		fd_pipes[1] = pw
		fd_pipes[2] = pw

	# This caches the libc library lookup and _unshare_validator results
	# in the current process, so that results are cached for use in
	# child processes.
	unshare_flags = 0
	if unshare_net or unshare_ipc or unshare_mount or unshare_pid:
		# from /usr/include/bits/sched.h
		CLONE_NEWNS = 0x00020000
		CLONE_NEWIPC = 0x08000000
		CLONE_NEWPID = 0x20000000
		CLONE_NEWNET = 0x40000000

		if unshare_net:
			unshare_flags |= CLONE_NEWNET
		if unshare_ipc:
			unshare_flags |= CLONE_NEWIPC
		if unshare_mount:
			# NEWNS = mount namespace
			unshare_flags |= CLONE_NEWNS
		if unshare_pid:
			# we also need mount namespace for slave /proc
			unshare_flags |= CLONE_NEWPID | CLONE_NEWNS

		_unshare_validate(unshare_flags)

	# Force instantiation of portage.data.userpriv_groups before the
	# fork, so that the result is cached in the main process.
	bool(groups)

	parent_pid = os.getpid()
	pid = None
	try:
		pid = os.fork()

		if pid == 0:
			try:
				_exec(binary, mycommand, opt_name, fd_pipes,
					env, gid, groups, uid, umask, cwd, pre_exec, close_fds,
					unshare_net, unshare_ipc, unshare_mount, unshare_pid,
					unshare_flags, cgroup)
			except SystemExit:
				raise
			except Exception as e:
				# We need to catch _any_ exception so that it doesn't
				# propagate out of this function and cause exiting
				# with anything other than os._exit()
				writemsg("%s:\n   %s\n" % (e, " ".join(mycommand)),
					noiselevel=-1)
				traceback.print_exc()
				sys.stderr.flush()

	finally:
		if pid == 0 or (pid is None and os.getpid() != parent_pid):
			# Call os._exit() from a finally block in order
			# to suppress any finally blocks from earlier
			# in the call stack (see bug #345289). This
			# finally block has to be setup before the fork
			# in order to avoid a race condition.
			os._exit(1)

	if not isinstance(pid, int):
		raise AssertionError("fork returned non-integer: %s" % (repr(pid),))

	# Add the pid to our local and the global pid lists.
	mypids.append(pid)

	# If we started a tee process the write side of the pipe is no
	# longer needed, so close it.
	if logfile:
		os.close(pw)

	# If the caller wants to handle cleaning up the processes, we tell
	# it about all processes that were created.
	if returnpid:
		return mypids

	# Otherwise we clean them up.
	while mypids:

		# Pull the last reader in the pipe chain. If all processes
		# in the pipe are well behaved, it will die when the process
		# it is reading from dies.
		pid = mypids.pop(0)

		# and wait for it.
		retval = os.waitpid(pid, 0)[1]

		if retval:
			# If it failed, kill off anything else that
			# isn't dead yet.
			for pid in mypids:
				# With waitpid and WNOHANG, only check the
				# first element of the tuple since the second
				# element may vary (bug #337465).
				if os.waitpid(pid, os.WNOHANG)[0] == 0:
					os.kill(pid, signal.SIGTERM)
					os.waitpid(pid, 0)

			# If it got a signal, return the signal that was sent.
			if (retval & 0xff):
				return ((retval & 0xff) << 8)

			# Otherwise, return its exit code.
			return (retval >> 8)

	# Everything succeeded
	return 0
示例#25
0
文件: process.py 项目: cgfuh/portage
def spawn(mycommand, env=None, opt_name=None, fd_pipes=None, returnpid=False,
          uid=None, gid=None, groups=None, umask=None, cwd=None, logfile=None,
          path_lookup=True, pre_exec=None,
          close_fds=(sys.version_info < (3, 4)), unshare_net=False,
          unshare_ipc=False, unshare_mount=False, unshare_pid=False,
	  cgroup=None):
	"""
	Spawns a given command.
	
	@param mycommand: the command to execute
	@type mycommand: String or List (Popen style list)
	@param env: If env is not None, it must be a mapping that defines the environment
		variables for the new process; these are used instead of the default behavior
		of inheriting the current process's environment.
	@type env: None or Mapping
	@param opt_name: an optional name for the spawn'd process (defaults to the binary name)
	@type opt_name: String
	@param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example
		(default is {0:stdin, 1:stdout, 2:stderr})
	@type fd_pipes: Dictionary
	@param returnpid: Return the Process IDs for a successful spawn.
	NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
	@type returnpid: Boolean
	@param uid: User ID to spawn as; useful for dropping privilages
	@type uid: Integer
	@param gid: Group ID to spawn as; useful for dropping privilages
	@type gid: Integer
	@param groups: Group ID's to spawn in: useful for having the process run in multiple group contexts.
	@type groups: List
	@param umask: An integer representing the umask for the process (see man chmod for umask details)
	@type umask: Integer
	@param cwd: Current working directory
	@type cwd: String
	@param logfile: name of a file to use for logging purposes
	@type logfile: String
	@param path_lookup: If the binary is not fully specified then look for it in PATH
	@type path_lookup: Boolean
	@param pre_exec: A function to be called with no arguments just prior to the exec call.
	@type pre_exec: callable
	@param close_fds: If True, then close all file descriptors except those
		referenced by fd_pipes (default is True for python3.3 and earlier, and False for
		python3.4 and later due to non-inheritable file descriptor behavior from PEP 446).
	@type close_fds: Boolean
	@param unshare_net: If True, networking will be unshared from the spawned process
	@type unshare_net: Boolean
	@param unshare_ipc: If True, IPC will be unshared from the spawned process
	@type unshare_ipc: Boolean
	@param unshare_mount: If True, mount namespace will be unshared and mounts will
		be private to the namespace
	@type unshare_mount: Boolean
	@param unshare_pid: If True, PID ns will be unshared from the spawned process
	@type unshare_pid: Boolean
	@param cgroup: CGroup path to bind the process to
	@type cgroup: String

	logfile requires stdout and stderr to be assigned to this process (ie not pointed
	   somewhere else.)
	
	"""

	# mycommand is either a str or a list
	if isinstance(mycommand, basestring):
		mycommand = mycommand.split()

	env = os.environ if env is None else env

	if sys.hexversion < 0x3000000:
		# Avoid a potential UnicodeEncodeError from os.execve().
		env_bytes = {}
		for k, v in env.items():
			env_bytes[_unicode_encode(k, encoding=_encodings['content'])] = \
				_unicode_encode(v, encoding=_encodings['content'])
		env = env_bytes
		del env_bytes

	# If an absolute path to an executable file isn't given
	# search for it unless we've been told not to.
	binary = mycommand[0]
	if binary not in (BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY) and \
		(not os.path.isabs(binary) or not os.path.isfile(binary)
	    or not os.access(binary, os.X_OK)):
		binary = path_lookup and find_binary(binary) or None
		if not binary:
			raise CommandNotFound(mycommand[0])

	# If we haven't been told what file descriptors to use
	# default to propagating our stdin, stdout and stderr.
	if fd_pipes is None:
		fd_pipes = {
			0:portage._get_stdin().fileno(),
			1:sys.__stdout__.fileno(),
			2:sys.__stderr__.fileno(),
		}

	# mypids will hold the pids of all processes created.
	mypids = []

	if logfile:
		# Using a log file requires that stdout and stderr
		# are assigned to the process we're running.
		if 1 not in fd_pipes or 2 not in fd_pipes:
			raise ValueError(fd_pipes)

		# Create a pipe
		(pr, pw) = os.pipe()

		# Create a tee process, giving it our stdout and stderr
		# as well as the read end of the pipe.
		mypids.extend(spawn(('tee', '-i', '-a', logfile),
		              returnpid=True, fd_pipes={0:pr,
		              1:fd_pipes[1], 2:fd_pipes[2]}))

		# We don't need the read end of the pipe, so close it.
		os.close(pr)

		# Assign the write end of the pipe to our stdout and stderr.
		fd_pipes[1] = pw
		fd_pipes[2] = pw

	# Cache _has_ipv6() result for use in child processes.
	_has_ipv6()

	# This caches the libc library lookup and _unshare_validator results
	# in the current process, so that results are cached for use in
	# child processes.
	unshare_flags = 0
	if unshare_net or unshare_ipc or unshare_mount or unshare_pid:
		# from /usr/include/bits/sched.h
		CLONE_NEWNS = 0x00020000
		CLONE_NEWUTS = 0x04000000
		CLONE_NEWIPC = 0x08000000
		CLONE_NEWPID = 0x20000000
		CLONE_NEWNET = 0x40000000

		if unshare_net:
			# UTS namespace to override hostname
			unshare_flags |= CLONE_NEWNET | CLONE_NEWUTS
		if unshare_ipc:
			unshare_flags |= CLONE_NEWIPC
		if unshare_mount:
			# NEWNS = mount namespace
			unshare_flags |= CLONE_NEWNS
		if unshare_pid:
			# we also need mount namespace for slave /proc
			unshare_flags |= CLONE_NEWPID | CLONE_NEWNS

		_unshare_validate(unshare_flags)

	# Force instantiation of portage.data.userpriv_groups before the
	# fork, so that the result is cached in the main process.
	bool(groups)

	parent_pid = os.getpid()
	pid = None
	try:
		pid = os.fork()

		if pid == 0:
			try:
				_exec(binary, mycommand, opt_name, fd_pipes,
					env, gid, groups, uid, umask, cwd, pre_exec, close_fds,
					unshare_net, unshare_ipc, unshare_mount, unshare_pid,
					unshare_flags, cgroup)
			except SystemExit:
				raise
			except Exception as e:
				# We need to catch _any_ exception so that it doesn't
				# propagate out of this function and cause exiting
				# with anything other than os._exit()
				writemsg("%s:\n   %s\n" % (e, " ".join(mycommand)),
					noiselevel=-1)
				traceback.print_exc()
				sys.stderr.flush()

	finally:
		if pid == 0 or (pid is None and os.getpid() != parent_pid):
			# Call os._exit() from a finally block in order
			# to suppress any finally blocks from earlier
			# in the call stack (see bug #345289). This
			# finally block has to be setup before the fork
			# in order to avoid a race condition.
			os._exit(1)

	if not isinstance(pid, int):
		raise AssertionError("fork returned non-integer: %s" % (repr(pid),))

	# Add the pid to our local and the global pid lists.
	mypids.append(pid)

	# If we started a tee process the write side of the pipe is no
	# longer needed, so close it.
	if logfile:
		os.close(pw)

	# If the caller wants to handle cleaning up the processes, we tell
	# it about all processes that were created.
	if returnpid:
		return mypids

	# Otherwise we clean them up.
	while mypids:

		# Pull the last reader in the pipe chain. If all processes
		# in the pipe are well behaved, it will die when the process
		# it is reading from dies.
		pid = mypids.pop(0)

		# and wait for it.
		retval = os.waitpid(pid, 0)[1]

		if retval:
			# If it failed, kill off anything else that
			# isn't dead yet.
			for pid in mypids:
				# With waitpid and WNOHANG, only check the
				# first element of the tuple since the second
				# element may vary (bug #337465).
				if os.waitpid(pid, os.WNOHANG)[0] == 0:
					os.kill(pid, signal.SIGTERM)
					os.waitpid(pid, 0)

			# If it got a signal, return the signal that was sent.
			if (retval & 0xff):
				return ((retval & 0xff) << 8)

			# Otherwise, return its exit code.
			return (retval >> 8)

	# Everything succeeded
	return 0