Example #1
0
	def _start_post_builddir_lock(self, lock_future=None, start_ipc_daemon=False):
		if lock_future is not None:
			if lock_future is not self._start_future:
				raise AssertionError('lock_future is not self._start_future')
			self._start_future = None
			if lock_future.cancelled():
				self._build_dir = None
				self.cancelled = True
				self._was_cancelled()
				self._async_wait()
				return

			lock_future.result()

		if start_ipc_daemon:
			self.settings['PORTAGE_IPC_DAEMON'] = "1"
			self._start_ipc_daemon()

		if self.fd_pipes is None:
			self.fd_pipes = {}
		null_fd = None
		if 0 not in self.fd_pipes and \
			self.phase not in self._phases_interactive_whitelist and \
			"interactive" not in self.settings.get("PROPERTIES", "").split():
			null_fd = os.open('/dev/null', os.O_RDONLY)
			self.fd_pipes[0] = null_fd

		try:
			SpawnProcess._start(self)
		finally:
			if null_fd is not None:
				os.close(null_fd)
Example #2
0
	def _unregister(self):

		if self._reg_id is not None:
			self.scheduler.source_remove(self._reg_id)
			self._reg_id = None

		if self.input_fd is not None:
			if isinstance(self.input_fd, int):
				os.close(self.input_fd)
			else:
				self.input_fd.close()
			self.input_fd = None

		if self.stdout_fd is not None:
			os.close(self.stdout_fd)
			self.stdout_fd = None

		if self._log_file is not None:
			self._log_file.close()
			self._log_file = None

		if self._log_file_real is not None:
			# Avoid "ResourceWarning: unclosed file" since python 3.2.
			self._log_file_real.close()
			self._log_file_real = None

		self._registered = False
Example #3
0
	def _start(self):
		in_pr, in_pw = os.pipe()
		out_pr, out_pw = os.pipe()
		self._files = {}
		self._files['pipe_in'] = in_pr
		self._files['pipe_out'] = out_pw

		fcntl.fcntl(in_pr, fcntl.F_SETFL,
			fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)

		# FD_CLOEXEC is enabled by default in Python >=3.4.
		if sys.hexversion < 0x3040000:
			try:
				fcntl.FD_CLOEXEC
			except AttributeError:
				pass
			else:
				fcntl.fcntl(in_pr, fcntl.F_SETFD,
					fcntl.fcntl(in_pr, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)

		self._reg_id = self.scheduler.io_add_watch(in_pr,
			self.scheduler.IO_IN, self._output_handler)
		self._registered = True
		self._proc = SpawnProcess(
			args=[portage._python_interpreter,
				os.path.join(portage._bin_path, 'lock-helper.py'), self.path],
				env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path),
				fd_pipes={0:out_pr, 1:in_pw, 2:sys.__stderr__.fileno()},
				scheduler=self.scheduler)
		self._proc.addExitListener(self._proc_exit)
		self._proc.start()
		os.close(out_pr)
		os.close(in_pw)
Example #4
0
	def _elog_output_handler(self, fd, event):
		output = None
		if event & self.scheduler.IO_IN:
			try:
				output = os.read(fd, self._bufsize)
			except OSError as e:
				if e.errno not in (errno.EAGAIN, errno.EINTR):
					raise
		if output:
			lines = _unicode_decode(output).split('\n')
			if len(lines) == 1:
				self._buf += lines[0]
			else:
				lines[0] = self._buf + lines[0]
				self._buf = lines.pop()
				out = io.StringIO()
				for line in lines:
					funcname, phase, key, msg = line.split(' ', 3)
					self._elog_keys.add(key)
					reporter = getattr(portage.elog.messages, funcname)
					reporter(msg, phase=phase, key=key, out=out)

		if event & self.scheduler.IO_HUP:
			self.scheduler.source_remove(self._elog_reg_id)
			self._elog_reg_id = None
			os.close(self._elog_reader_fd)
			self._elog_reader_fd = None
			return False

		return True
Example #5
0
	def _testPipeReader(self, master_fd, slave_fd, test_string):
		"""
		Use a poll loop to read data from a pipe and assert that
		the data written to the pipe is identical to the data
		read from the pipe.
		"""

		# WARNING: It is very important to use unbuffered mode here,
		# in order to avoid issue 5380 with python3.
		master_file = os.fdopen(master_fd, 'rb', 0)
		scheduler = global_event_loop()

		consumer = PipeReader(
			input_files={"producer" : master_file},
			_use_array=self._use_array,
			scheduler=scheduler)

		producer = PopenProcess(
			pipe_reader=consumer,
			proc=subprocess.Popen(["bash", "-c", self._echo_cmd % test_string],
				stdout=slave_fd),
			scheduler=scheduler)

		producer.start()
		os.close(slave_fd)
		producer.wait()
		consumer.wait()

		self.assertEqual(producer.returncode, os.EX_OK)
		self.assertEqual(consumer.returncode, os.EX_OK)

		return consumer.getvalue().decode('ascii', 'replace')
Example #6
0
def _setup_pipes(fd_pipes, close_fds=True):
	"""Setup pipes for a forked process.

	WARNING: When not followed by exec, the close_fds behavior
	can trigger interference from destructors that close file
	descriptors. This interference happens when the garbage
	collector intermittently executes such destructors after their
	corresponding file descriptors have been re-used, leading
	to intermittent "[Errno 9] Bad file descriptor" exceptions in
	forked processes. This problem has been observed with PyPy 1.8,
	and also with CPython under some circumstances (as triggered
	by xmpppy in bug #374335). In order to close a safe subset of
	file descriptors, see portage.locks._close_fds().
	"""
	my_fds = {}
	# To protect from cases where direct assignment could
	# clobber needed fds ({1:2, 2:1}) we first dupe the fds
	# into unused fds.
	for fd in fd_pipes:
		my_fds[fd] = os.dup(fd_pipes[fd])
	# Then assign them to what they should be.
	for fd in my_fds:
		os.dup2(my_fds[fd], fd)

	if close_fds:
		# Then close _all_ fds that haven't been explicitly
		# requested to be kept open.
		for fd in get_open_fds():
			if fd not in my_fds:
				try:
					os.close(fd)
				except OSError:
					pass
Example #7
0
	def _unregister(self):
		"""
		Unregister from the scheduler and close open files.
		"""

		if not self.unmerge:
			# Populate the vardbapi cache for the new package
			# while its inodes are still hot.
			try:
				self.vartree.dbapi.aux_get(self.settings.mycpv, ["EAPI"])
			except KeyError:
				pass

		self._unlock_vdb()
		if self._elog_reg_id is not None:
			self.scheduler.source_remove(self._elog_reg_id)
			self._elog_reg_id = None
		if self._elog_reader_fd is not None:
			os.close(self._elog_reader_fd)
			self._elog_reader_fd = None
		if self._elog_keys is not None:
			for key in self._elog_keys:
				portage.elog.elog_process(key, self.settings,
					phasefilter=("prerm", "postrm"))
			self._elog_keys = None

		super(MergeProcess, self)._unregister()
Example #8
0
def perform_checksum(filename, hashname="MD5", calc_prelink=0):
	"""
	Run a specific checksum against a file. The filename can
	be either unicode or an encoded byte string. If filename
	is unicode then a UnicodeDecodeError will be raised if
	necessary.

	@param filename: File to run the checksum against
	@type filename: String
	@param hashname: The type of hash function to run
	@type hashname: String
	@param calc_prelink: Whether or not to reverse prelink before running the checksum
	@type calc_prelink: Integer
	@rtype: Tuple
	@return: The hash and size of the data
	"""
	global prelink_capable
	# Make sure filename is encoded with the correct encoding before
	# it is passed to spawn (for prelink) and/or the hash function.
	filename = _unicode_encode(filename,
		encoding=_encodings['fs'], errors='strict')
	myfilename = filename
	prelink_tmpfile = None
	try:
		if (calc_prelink and prelink_capable and
		    is_prelinkable_elf(filename)):
			# Create non-prelinked temporary file to checksum.
			# Files rejected by prelink are summed in place.
			try:
				tmpfile_fd, prelink_tmpfile = tempfile.mkstemp()
				try:
					retval = portage.process.spawn([PRELINK_BINARY,
						"--verify", filename], fd_pipes={1:tmpfile_fd})
				finally:
					os.close(tmpfile_fd)
				if retval == os.EX_OK:
					myfilename = prelink_tmpfile
			except portage.exception.CommandNotFound:
				# This happens during uninstallation of prelink.
				prelink_capable = False
		try:
			if hashname not in hashfunc_map:
				raise portage.exception.DigestException(hashname + \
					" hash function not available (needs dev-python/pycrypto)")
			myhash, mysize = hashfunc_map[hashname](myfilename)
		except (OSError, IOError) as e:
			if e.errno in (errno.ENOENT, errno.ESTALE):
				raise portage.exception.FileNotFound(myfilename)
			elif e.errno == portage.exception.PermissionDenied.errno:
				raise portage.exception.PermissionDenied(myfilename)
			raise
		return myhash, mysize
	finally:
		if prelink_tmpfile:
			try:
				os.unlink(prelink_tmpfile)
			except OSError as e:
				if e.errno != errno.ENOENT:
					raise
				del e
	def _testPipeLogger(self, test_string):

		producer = PopenProcess(proc=subprocess.Popen(
			["bash", "-c", self._echo_cmd % test_string],
			stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
			scheduler=global_event_loop())

		fd, log_file_path = tempfile.mkstemp()
		try:

			consumer = PipeLogger(background=True,
				input_fd=os.dup(producer.proc.stdout.fileno()),
				log_file_path=log_file_path)

			# Close the stdout pipe, since we duplicated it, and it
			# must be closed in order to avoid a ResourceWarning.
			producer.proc.stdout.close()
			producer.pipe_reader = consumer

			producer.start()
			producer.wait()

			self.assertEqual(producer.returncode, os.EX_OK)
			self.assertEqual(consumer.returncode, os.EX_OK)

			with open(log_file_path, 'rb') as f:
				content = f.read()

		finally:
			os.close(fd)
			os.unlink(log_file_path)

		return content.decode('ascii', 'replace')
Example #10
0
	def _testPipeLogger(self, test_string):

		producer = PopenProcess(proc=subprocess.Popen(
			["bash", "-c", self._echo_cmd % test_string],
			stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
			scheduler=global_event_loop())

		fd, log_file_path = tempfile.mkstemp()
		try:

			consumer = PipeLogger(background=True,
				input_fd=producer.proc.stdout,
				log_file_path=log_file_path)

			producer.pipe_reader = consumer

			producer.start()
			producer.wait()

			self.assertEqual(producer.returncode, os.EX_OK)
			self.assertEqual(consumer.returncode, os.EX_OK)

			with open(log_file_path, 'rb') as f:
				content = f.read()

		finally:
			os.close(fd)
			os.unlink(log_file_path)

		return content.decode('ascii', 'replace')
Example #11
0
	def testLogfile(self):
		logfile = None
		try:
			fd, logfile = tempfile.mkstemp()
			os.close(fd)
			null_fd = os.open('/dev/null', os.O_RDWR)
			test_string = 2 * "blah blah blah\n"
			task_scheduler = TaskScheduler()
			proc = SpawnProcess(
				args=[BASH_BINARY, "-c",
				"echo -n '%s'" % test_string],
				env={}, fd_pipes={0:sys.stdin.fileno(), 1:null_fd, 2:null_fd},
				scheduler=task_scheduler.sched_iface,
				logfile=logfile)
			task_scheduler.add(proc)
			task_scheduler.run()
			os.close(null_fd)
			f = codecs.open(_unicode_encode(logfile,
				encoding=_encodings['fs'], errors='strict'),
				mode='r', encoding=_encodings['content'], errors='strict')
			log_content = f.read()
			f.close()
			# When logging passes through a pty, this comparison will fail
			# unless the oflag terminal attributes have the termios.OPOST
			# bit disabled. Otherwise, tranformations such as \n -> \r\n
			# may occur.
			self.assertEqual(test_string, log_content)
		finally:
			if logfile:
				try:
					os.unlink(logfile)
				except EnvironmentError as e:
					if e.errno != errno.ENOENT:
						raise
					del e
Example #12
0
	def _spawn(self, args, fd_pipes=None, **kwargs):
		"""
		Fork a subprocess, apply local settings, and call fetch().
		"""

		parent_pid = os.getpid()
		pid = None
		try:
			pid = os.fork()

			if pid != 0:
				if not isinstance(pid, int):
					raise AssertionError(
						"fork returned non-integer: %s" % (repr(pid),))
				return [pid]

			rval = 1
			try:

				# Use default signal handlers in order to avoid problems
				# killing subprocesses as reported in bug #353239.
				signal.signal(signal.SIGINT, signal.SIG_DFL)
				signal.signal(signal.SIGTERM, signal.SIG_DFL)

				# Unregister SIGCHLD handler and wakeup_fd for the parent
				# process's event loop (bug 655656).
				signal.signal(signal.SIGCHLD, signal.SIG_DFL)
				try:
					wakeup_fd = signal.set_wakeup_fd(-1)
					if wakeup_fd > 0:
						os.close(wakeup_fd)
				except (ValueError, OSError):
					pass

				portage.locks._close_fds()
				# We don't exec, so use close_fds=False
				# (see _setup_pipes docstring).
				portage.process._setup_pipes(fd_pipes, close_fds=False)

				rval = self._run()
			except SystemExit:
				raise
			except:
				traceback.print_exc()
				# os._exit() skips stderr flush!
				sys.stderr.flush()
			finally:
				os._exit(rval)

		finally:
			if pid == 0 or (pid is None and os.getpid() != parent_pid):
				# Call os._exit() from a finally block in order
				# to suppress any finally blocks from earlier
				# in the call stack (see bug #345289). This
				# finally block has to be setup before the fork
				# in order to avoid a race condition.
				os._exit(1)
Example #13
0
def _close_fds():
	"""
	This is intended to be called after a fork, in order to close file
	descriptors for locks held by the parent process. This can be called
	safely after a fork without exec, unlike the _setup_pipes close_fds
	behavior.
	"""
	while _open_fds:
		os.close(_open_fds.pop())
Example #14
0
	def _check_sig_key(self):
		null_fd = os.open('/dev/null', os.O_RDONLY)
		popen_proc = PopenProcess(proc=subprocess.Popen(
			["gpg", "--verify", self._manifest_path],
			stdin=null_fd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
			pipe_reader=PipeReader())
		os.close(null_fd)
		popen_proc.pipe_reader.input_files = {
			"producer" : popen_proc.proc.stdout}
		self._start_task(popen_proc, self._check_sig_key_exit)
Example #15
0
	def _unregister(self):
		self._registered = False

		if self._files is not None:
			try:
				pipe_in = self._files.pop('pipe_in')
			except KeyError:
				pass
			else:
				self.scheduler.remove_reader(pipe_in)
				os.close(pipe_in)
Example #16
0
	def _test_lock(fd, lock_path):
		os.close(fd)
		try:
			with open(lock_path, 'a') as f:
				fcntl.lockf(f.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
		except EnvironmentError as e:
			if e.errno == errno.EAGAIN:
				# Parent process holds lock, as expected.
				sys.exit(0)

		# Something went wrong.
		sys.exit(1)
Example #17
0
	def _reopen_input(self):
		"""
		Re-open the input stream, in order to suppress
		POLLHUP events (bug #339976).
		"""
		self.scheduler.unregister(self._reg_id)
		os.close(self._files.pipe_in)
		self._files.pipe_in = \
			os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
		self._reg_id = self.scheduler.register(
			self._files.pipe_in,
			self._registered_events, self._input_handler)
Example #18
0
	def _start(self):
		pr, pw = os.pipe()
		self.fd_pipes = {}
		self.fd_pipes[pw] = pw
		self._digest_pw = pw
		self._digest_pipe_reader = PipeReader(
			input_files={"input":pr},
			scheduler=self.scheduler)
		self._digest_pipe_reader.addExitListener(self._digest_pipe_reader_exit)
		self._digest_pipe_reader.start()
		ForkProcess._start(self)
		os.close(pw)
Example #19
0
def get_commit_message_with_editor(editor, message=None):
    """
	Execute editor with a temporary file as it's argument
	and return the file content afterwards.

	@param editor: An EDITOR value from the environment
	@type: string
	@param message: An iterable of lines to show in the editor.
	@type: iterable
	@rtype: string or None
	@return: A string on success or None if an error occurs.
	"""
    fd, filename = mkstemp()
    try:
        os.write(
            fd,
            _unicode_encode(
                _(
                    "\n# Please enter the commit message "
                    + "for your changes.\n# (Comment lines starting "
                    + "with '#' will not be included)\n"
                ),
                encoding=_encodings["content"],
                errors="backslashreplace",
            ),
        )
        if message:
            os.write(fd, b"#\n")
            for line in message:
                os.write(fd, _unicode_encode("#" + line, encoding=_encodings["content"], errors="backslashreplace"))
        os.close(fd)
        retval = os.system(editor + " '%s'" % filename)
        if not (os.WIFEXITED(retval) and os.WEXITSTATUS(retval) == os.EX_OK):
            return None
        try:
            with io.open(
                _unicode_encode(filename, encoding=_encodings["fs"], errors="strict"),
                mode="r",
                encoding=_encodings["content"],
                errors="replace",
            ) as f:
                mylines = f.readlines()
        except OSError as e:
            if e.errno != errno.ENOENT:
                raise
            del e
            return None
        return "".join(line for line in mylines if not line.startswith("#"))
    finally:
        try:
            os.unlink(filename)
        except OSError:
            pass
Example #20
0
	def _unregister(self):
		"""
		Unregister from the scheduler and close open files.
		"""

		self._registered = False

		if self._files is not None:
			for f in self._files.values():
				self.scheduler.remove_reader(f)
				os.close(f)
			self._files = None
Example #21
0
	def _unlock(self):
		if self._proc is None:
			raise AssertionError('not locked')
		if not self._acquired:
			raise AssertionError('lock not acquired yet')
		if self.returncode != os.EX_OK:
			raise AssertionError("lock process failed with returncode %s" \
				% (self.returncode,))
		if self._unlock_future is not None:
			raise AssertionError("already unlocked")
		self._unlock_future = self.scheduler.create_future()
		os.write(self._files['pipe_out'], b'\0')
		os.close(self._files['pipe_out'])
		self._files = None
Example #22
0
	def _unregister(self):
		self._registered = False

		if self._reg_id is not None:
			self.scheduler.unregister(self._reg_id)
			self._reg_id = None

		if self._files is not None:
			try:
				pipe_in = self._files.pop('pipe_in')
			except KeyError:
				pass
			else:
				os.close(pipe_in)
Example #23
0
	def unlock(self):
		if self._proc is None:
			raise AssertionError('not locked')
		if self.returncode is None:
			raise AssertionError('lock not acquired yet')
		if self.returncode != os.EX_OK:
			raise AssertionError("lock process failed with returncode %s" \
				% (self.returncode,))
		self._unlocked = True
		os.write(self._files['pipe_out'], b'\0')
		os.close(self._files['pipe_out'])
		self._files = None
		self._proc.wait()
		self._proc = None
Example #24
0
	def _unregister(self):
		"""
		Unregister from the scheduler and close open files.
		"""

		self._registered = False

		if self._reg_id is not None:
			self.scheduler.source_remove(self._reg_id)
			self._reg_id = None

		if self._files is not None:
			for f in self._files.values():
				os.close(f)
			self._files = None
Example #25
0
	def _unregister(self):
		self._registered = False

		if self._thread is not None:
			self._thread.join()
			self._thread = None

		if self._reg_id is not None:
			self.scheduler.unregister(self._reg_id)
			self._reg_id = None

		if self._files is not None:
			for f in self._files.values():
				os.close(f)
			self._files = None
Example #26
0
def hardlink_lockfile(lockfilename, max_wait=14400):
	"""Does the NFS, hardlink shuffle to ensure locking on the disk.
	We create a PRIVATE lockfile, that is just a placeholder on the disk.
	Then we HARDLINK the real lockfile to that private file.
	If our file can 2 references, then we have the lock. :)
	Otherwise we lather, rise, and repeat.
	We default to a 4 hour timeout.
	"""

	start_time = time.time()
	myhardlock = hardlock_name(lockfilename)
	reported_waiting = False
	
	while(time.time() < (start_time + max_wait)):
		# We only need it to exist.
		myfd = os.open(myhardlock, os.O_CREAT|os.O_RDWR,0o660)
		os.close(myfd)
	
		if not os.path.exists(myhardlock):
			raise FileNotFound(
				_("Created lockfile is missing: %(filename)s") % \
				{"filename" : myhardlock})

		try:
			res = os.link(myhardlock, lockfilename)
		except OSError:
			pass

		if hardlink_is_mine(myhardlock, lockfilename):
			# We have the lock.
			if reported_waiting:
				writemsg("\n", noiselevel=-1)
			return True

		if reported_waiting:
			writemsg(".", noiselevel=-1)
		else:
			reported_waiting = True
			from portage.const import PORTAGE_BIN_PATH
			msg = _("\nWaiting on (hardlink) lockfile: (one '.' per 3 seconds)\n"
				"%(bin_path)s/clean_locks can fix stuck locks.\n"
				"Lockfile: %(lockfilename)s\n") % \
				{"bin_path": PORTAGE_BIN_PATH, "lockfilename": lockfilename}
			writemsg(msg, noiselevel=-1)
		time.sleep(3)
	
	os.unlink(myhardlock)
	return False
Example #27
0
	def _unregister(self):
		"""
		Unregister from the scheduler and close open files.
		"""

		self._registered = False

		if self.input_files is not None:
			for f in self.input_files.values():
				if isinstance(f, int):
					self.scheduler.remove_reader(f)
					os.close(f)
				else:
					self.scheduler.remove_reader(f.fileno())
					f.close()
			self.input_files = None
Example #28
0
	def _testPipeReader(self, test_string):
		"""
		Use a poll loop to read data from a pipe and assert that
		the data written to the pipe is identical to the data
		read from the pipe.
		"""

		if self._use_pty:
			got_pty, master_fd, slave_fd = _create_pty_or_pipe()
			if not got_pty:
				os.close(slave_fd)
				os.close(master_fd)
				skip_reason = "pty not acquired"
				self.portage_skip = skip_reason
				self.fail(skip_reason)
				return
		else:
			master_fd, slave_fd = os.pipe()

		# WARNING: It is very important to use unbuffered mode here,
		# in order to avoid issue 5380 with python3.
		master_file = os.fdopen(master_fd, 'rb', 0)
		slave_file = os.fdopen(slave_fd, 'wb', 0)
		task_scheduler = TaskScheduler(max_jobs=2)
		producer = SpawnProcess(
			args=["bash", "-c", self._echo_cmd % test_string],
			env=os.environ, fd_pipes={1:slave_fd},
			scheduler=task_scheduler.sched_iface)
		task_scheduler.add(producer)
		slave_file.close()

		consumer = PipeReader(
			input_files={"producer" : master_file},
			scheduler=task_scheduler.sched_iface, _use_array=self._use_array)

		task_scheduler.add(consumer)

		# This will ensure that both tasks have exited, which
		# is necessary to avoid "ResourceWarning: unclosed file"
		# warnings since Python 3.2 (and also ensures that we
		# don't leave any zombie child processes).
		task_scheduler.run()
		self.assertEqual(producer.returncode, os.EX_OK)
		self.assertEqual(consumer.returncode, os.EX_OK)

		return consumer.getvalue().decode('ascii', 'replace')
Example #29
0
	def _unregister(self):
		"""
		Unregister from the scheduler and close open files.
		"""

		self._registered = False

		if self._reg_id is not None:
			self.scheduler.unregister(self._reg_id)
			self._reg_id = None

		if self._files is not None:
			for f in self._files.values():
				if isinstance(f, int):
					os.close(f)
				else:
					f.close()
			self._files = None
Example #30
0
def _get_lock_fn():
	"""
	Returns fcntl.lockf if proven to work, and otherwise returns fcntl.flock.
	On some platforms fcntl.lockf is known to be broken.
	"""
	global _lock_fn
	if _lock_fn is not None:
		return _lock_fn

	def _test_lock(fd, lock_path):
		os.close(fd)
		try:
			with open(lock_path, 'a') as f:
				fcntl.lockf(f.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
		except EnvironmentError as e:
			if e.errno == errno.EAGAIN:
				# Parent process holds lock, as expected.
				sys.exit(0)

		# Something went wrong.
		sys.exit(1)

	fd, lock_path = tempfile.mkstemp()
	try:
		try:
			fcntl.lockf(fd, fcntl.LOCK_EX)
		except EnvironmentError:
			pass
		else:
			proc = multiprocessing.Process(target=_test_lock,
				args=(fd, lock_path))
			proc.start()
			proc.join()
			if proc.exitcode == os.EX_OK:
				# Use fcntl.lockf because the test passed.
				_lock_fn = fcntl.lockf
				return _lock_fn
	finally:
		os.close(fd)
		os.unlink(lock_path)

	# Fall back to fcntl.flock.
	_lock_fn = fcntl.flock
	return _lock_fn