def _testAsynchronousLockWaitCancel(self): scheduler = global_event_loop() tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, "lock_me") lock1 = AsynchronousLock(path=path, scheduler=scheduler) lock1.start() self.assertEqual(lock1.wait(), os.EX_OK) self.assertEqual(lock1.returncode, os.EX_OK) lock2 = AsynchronousLock(path=path, scheduler=scheduler, _force_async=True, _force_process=True) lock2.start() # lock2 should be waiting for lock1 to release self.assertEqual(lock2.poll(), None) self.assertEqual(lock2.returncode, None) # Cancel lock2 and then check wait() and returncode results. lock2.cancel() self.assertEqual(lock2.wait() == os.EX_OK, False) self.assertEqual(lock2.returncode == os.EX_OK, False) self.assertEqual(lock2.returncode is None, False) scheduler.run_until_complete(lock1.async_unlock()) finally: shutil.rmtree(tempdir)
def _testAsynchronousLockWait(self): scheduler = global_event_loop() tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') lock1 = AsynchronousLock(path=path, scheduler=scheduler) lock1.start() self.assertEqual(lock1.wait(), os.EX_OK) self.assertEqual(lock1.returncode, os.EX_OK) # lock2 requires _force_async=True since the portage.locks # module is not designed to work as intended here if the # same process tries to lock the same file more than # one time concurrently. lock2 = AsynchronousLock(path=path, scheduler=scheduler, _force_async=True, _force_process=True) lock2.start() # lock2 should be waiting for lock1 to release self.assertEqual(lock2.poll(), None) self.assertEqual(lock2.returncode, None) scheduler.run_until_complete(lock1.async_unlock()) self.assertEqual(lock2.wait(), os.EX_OK) self.assertEqual(lock2.returncode, os.EX_OK) scheduler.run_until_complete(lock2.async_unlock()) finally: shutil.rmtree(tempdir)
def _testAsynchronousLockWait(self): scheduler = global_event_loop() tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') lock1 = AsynchronousLock(path=path, scheduler=scheduler) lock1.start() self.assertEqual(lock1.wait(), os.EX_OK) self.assertEqual(lock1.returncode, os.EX_OK) # lock2 requires _force_async=True since the portage.locks # module is not designed to work as intended here if the # same process tries to lock the same file more than # one time concurrently. lock2 = AsynchronousLock(path=path, scheduler=scheduler, _force_async=True, _force_process=True) lock2.start() # lock2 should be waiting for lock1 to release self.assertEqual(lock2.poll(), None) self.assertEqual(lock2.returncode, None) lock1.unlock() self.assertEqual(lock2.wait(), os.EX_OK) self.assertEqual(lock2.returncode, os.EX_OK) lock2.unlock() finally: shutil.rmtree(tempdir)
def _testAsynchronousLockWaitKill(self): scheduler = global_event_loop() tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') lock1 = AsynchronousLock(path=path, scheduler=scheduler) lock1.start() self.assertEqual(lock1.wait(), os.EX_OK) self.assertEqual(lock1.returncode, os.EX_OK) lock2 = AsynchronousLock(path=path, scheduler=scheduler, _force_async=True, _force_process=True) lock2.start() # lock2 should be waiting for lock1 to release self.assertEqual(lock2.poll(), None) self.assertEqual(lock2.returncode, None) # Kill lock2's process and then check wait() and # returncode results. This is intended to simulate # a SIGINT sent via the controlling tty. self.assertEqual(lock2._imp is not None, True) self.assertEqual(lock2._imp._proc is not None, True) self.assertEqual(lock2._imp._proc.pid is not None, True) lock2._imp._kill_test = True os.kill(lock2._imp._proc.pid, signal.SIGTERM) self.assertEqual(lock2.wait() == os.EX_OK, False) self.assertEqual(lock2.returncode == os.EX_OK, False) self.assertEqual(lock2.returncode is None, False) scheduler.run_until_complete(lock1.async_unlock()) finally: shutil.rmtree(tempdir)
def _testAsynchronousLockWaitKill(self): scheduler = global_event_loop() tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') lock1 = AsynchronousLock(path=path, scheduler=scheduler) lock1.start() self.assertEqual(lock1.wait(), os.EX_OK) self.assertEqual(lock1.returncode, os.EX_OK) lock2 = AsynchronousLock(path=path, scheduler=scheduler, _force_async=True, _force_process=True) lock2.start() # lock2 should be waiting for lock1 to release self.assertEqual(lock2.poll(), None) self.assertEqual(lock2.returncode, None) # Kill lock2's process and then check wait() and # returncode results. This is intended to simulate # a SIGINT sent via the controlling tty. self.assertEqual(lock2._imp is not None, True) self.assertEqual(lock2._imp._proc is not None, True) self.assertEqual(lock2._imp._proc.pid is not None, True) lock2._imp._kill_test = True os.kill(lock2._imp._proc.pid, signal.SIGTERM) self.assertEqual(lock2.wait() == os.EX_OK, False) self.assertEqual(lock2.returncode == os.EX_OK, False) self.assertEqual(lock2.returncode is None, False) lock1.unlock() finally: shutil.rmtree(tempdir)
def _testAsynchronousLock(self): scheduler = global_event_loop() tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') for force_async, async_unlock in itertools.product( (True, False), repeat=2): for force_dummy in (True, False): async_lock = AsynchronousLock(path=path, scheduler=scheduler, _force_async=force_async, _force_thread=True, _force_dummy=force_dummy) async_lock.start() self.assertEqual(async_lock.wait(), os.EX_OK) self.assertEqual(async_lock.returncode, os.EX_OK) if async_unlock: scheduler.run_until_complete(async_lock.async_unlock()) else: async_lock.unlock() async_lock = AsynchronousLock(path=path, scheduler=scheduler, _force_async=force_async, _force_process=True) async_lock.start() self.assertEqual(async_lock.wait(), os.EX_OK) self.assertEqual(async_lock.returncode, os.EX_OK) if async_unlock: scheduler.run_until_complete(async_lock.async_unlock()) else: async_lock.unlock() finally: shutil.rmtree(tempdir)
def testAsynchronousLock(self): scheduler = PollScheduler().sched_iface tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') for force_async in (True, False): for force_dummy in (True, False): async_lock = AsynchronousLock(path=path, scheduler=scheduler, _force_async=force_async, _force_thread=True, _force_dummy=force_dummy) async_lock.start() self.assertEqual(async_lock.wait(), os.EX_OK) self.assertEqual(async_lock.returncode, os.EX_OK) async_lock.unlock() async_lock = AsynchronousLock(path=path, scheduler=scheduler, _force_async=force_async, _force_process=True) async_lock.start() self.assertEqual(async_lock.wait(), os.EX_OK) self.assertEqual(async_lock.returncode, os.EX_OK) async_lock.unlock() finally: shutil.rmtree(tempdir)
def _testAsynchronousLock(self): scheduler = global_event_loop() tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') for force_async in (True, False): for force_dummy in ((False, ) if dummy_threading is None else (True, False)): async_lock = AsynchronousLock(path=path, scheduler=scheduler, _force_async=force_async, _force_thread=True, _force_dummy=force_dummy) async_lock.start() self.assertEqual(async_lock.wait(), os.EX_OK) self.assertEqual(async_lock.returncode, os.EX_OK) scheduler.run_until_complete(async_lock.async_unlock()) async_lock = AsynchronousLock(path=path, scheduler=scheduler, _force_async=force_async, _force_process=True) async_lock.start() self.assertEqual(async_lock.wait(), os.EX_OK) self.assertEqual(async_lock.returncode, os.EX_OK) scheduler.run_until_complete(async_lock.async_unlock()) finally: shutil.rmtree(tempdir)
def builddir_unlocked(future): if future.exception() is not None: result.set_exception(future.exception()) else: self._lock_obj = None self.locked = False self.settings.pop('PORTAGE_BUILDDIR_LOCKED', None) catdir_lock = AsynchronousLock(path=self._catdir, scheduler=self.scheduler) catdir_lock.addExitListener(catdir_locked) catdir_lock.start()
def lock(self): """ This raises an AlreadyLocked exception if lock() is called while a lock is already held. In order to avoid this, call unlock() or check whether the "locked" attribute is True or False before calling lock(). """ if self._lock_obj is not None: raise self.AlreadyLocked((self._lock_obj, )) dir_path = self.settings.get('PORTAGE_BUILDDIR') if not dir_path: raise AssertionError('PORTAGE_BUILDDIR is unset') catdir = os.path.dirname(dir_path) self._catdir = catdir try: portage.util.ensure_dirs(os.path.dirname(catdir), gid=portage.portage_gid, mode=0o70, mask=0) except PortageException: if not os.path.isdir(os.path.dirname(catdir)): raise catdir_lock = AsynchronousLock(path=catdir, scheduler=self.scheduler) catdir_lock.start() catdir_lock.wait() self._assert_lock(catdir_lock) try: try: portage.util.ensure_dirs(catdir, gid=portage.portage_gid, mode=0o70, mask=0) except PortageException: if not os.path.isdir(catdir): raise builddir_lock = AsynchronousLock(path=dir_path, scheduler=self.scheduler) builddir_lock.start() builddir_lock.wait() self._assert_lock(builddir_lock) self._lock_obj = builddir_lock self.settings['PORTAGE_BUILDIR_LOCKED'] = '1' finally: self.locked = self._lock_obj is not None catdir_lock.unlock()
def lock(self): """ This raises an AlreadyLocked exception if lock() is called while a lock is already held. In order to avoid this, call unlock() or check whether the "locked" attribute is True or False before calling lock(). """ if self._lock_obj is not None: raise self.AlreadyLocked((self._lock_obj,)) dir_path = self.settings.get('PORTAGE_BUILDDIR') if not dir_path: raise AssertionError('PORTAGE_BUILDDIR is unset') catdir = os.path.dirname(dir_path) self._catdir = catdir try: portage.util.ensure_dirs(os.path.dirname(catdir), gid=portage.portage_gid, mode=0o70, mask=0) except PortageException: if not os.path.isdir(os.path.dirname(catdir)): raise catdir_lock = AsynchronousLock(path=catdir, scheduler=self.scheduler) catdir_lock.start() catdir_lock.wait() self._assert_lock(catdir_lock) try: try: portage.util.ensure_dirs(catdir, gid=portage.portage_gid, mode=0o70, mask=0) except PortageException: if not os.path.isdir(catdir): raise builddir_lock = AsynchronousLock(path=dir_path, scheduler=self.scheduler) builddir_lock.start() builddir_lock.wait() self._assert_lock(builddir_lock) self._lock_obj = builddir_lock self.settings['PORTAGE_BUILDIR_LOCKED'] = '1' finally: self.locked = self._lock_obj is not None catdir_lock.unlock()
def unlock(self): if self._lock_obj is None: return self._lock_obj.unlock() self._lock_obj = None self.locked = False self.settings.pop('PORTAGE_BUILDDIR_LOCKED', None) catdir_lock = AsynchronousLock(path=self._catdir, scheduler=self.scheduler) catdir_lock.start() if catdir_lock.wait() == os.EX_OK: try: os.rmdir(self._catdir) except OSError: pass finally: catdir_lock.unlock()
def unlock(self): if self._lock_obj is None: return self._lock_obj.unlock() self._lock_obj = None self.locked = False self.settings.pop('PORTAGE_BUILDIR_LOCKED', None) catdir_lock = AsynchronousLock(path=self._catdir, scheduler=self.scheduler) catdir_lock.start() if catdir_lock.wait() == os.EX_OK: try: os.rmdir(self._catdir) except OSError as e: if e.errno not in (errno.ENOENT, errno.ENOTEMPTY, errno.EEXIST, errno.EPERM): raise finally: catdir_lock.unlock()
def lock(self): """ This raises an AlreadyLocked exception if lock() is called while a lock is already held. In order to avoid this, call unlock() or check whether the "locked" attribute is True or False before calling lock(). """ if self._lock_obj is not None: raise self.AlreadyLocked((self._lock_obj, )) async_lock = AsynchronousLock(path=self.pkg_path, scheduler=self.scheduler) async_lock.start() if async_lock.wait() != os.EX_OK: # TODO: Use CompositeTask for better handling, like in EbuildPhase. raise AssertionError("AsynchronousLock failed with returncode %s" \ % (async_lock.returncode,)) self._lock_obj = async_lock self.locked = True
def lock(self): """ This raises an AlreadyLocked exception if lock() is called while a lock is already held. In order to avoid this, call unlock() or check whether the "locked" attribute is True or False before calling lock(). """ if self._lock_obj is not None: raise self.AlreadyLocked((self._lock_obj,)) async_lock = AsynchronousLock(path=self.pkg_path, scheduler=self.scheduler) async_lock.start() if async_lock.wait() != os.EX_OK: # TODO: Use CompositeTask for better handling, like in EbuildPhase. raise AssertionError("AsynchronousLock failed with returncode %s" \ % (async_lock.returncode,)) self._lock_obj = async_lock self.locked = True
def _testAsynchronousLockWaitCancel(self): scheduler = global_event_loop() tempdir = tempfile.mkdtemp() try: path = os.path.join(tempdir, 'lock_me') lock1 = AsynchronousLock(path=path, scheduler=scheduler) lock1.start() self.assertEqual(lock1.wait(), os.EX_OK) self.assertEqual(lock1.returncode, os.EX_OK) lock2 = AsynchronousLock(path=path, scheduler=scheduler, _force_async=True, _force_process=True) lock2.start() # lock2 should be waiting for lock1 to release self.assertEqual(lock2.poll(), None) self.assertEqual(lock2.returncode, None) # Cancel lock2 and then check wait() and returncode results. lock2.cancel() self.assertEqual(lock2.wait() == os.EX_OK, False) self.assertEqual(lock2.returncode == os.EX_OK, False) self.assertEqual(lock2.returncode is None, False) lock1.unlock() finally: shutil.rmtree(tempdir)
class _BinpkgFetcherProcess(SpawnProcess): __slots__ = ("pkg", "pretend", "locked", "pkg_path", "_lock_obj") def _start(self): pkg = self.pkg pretend = self.pretend bintree = pkg.root_config.trees["bintree"] settings = bintree.settings pkg_path = self.pkg_path exists = os.path.exists(pkg_path) resume = exists and os.path.basename(pkg_path) in bintree.invalids if not (pretend or resume): # Remove existing file or broken symlink. try: os.unlink(pkg_path) except OSError: pass # urljoin doesn't work correctly with # unrecognized protocols like sftp fetchcommand = None resumecommand = None if bintree._remote_has_index: remote_metadata = bintree._remotepkgs[bintree.dbapi._instance_key( pkg.cpv)] rel_uri = remote_metadata.get("PATH") if not rel_uri: rel_uri = pkg.cpv + ".tbz2" remote_base_uri = remote_metadata["BASE_URI"] uri = remote_base_uri.rstrip("/") + "/" + rel_uri.lstrip("/") fetchcommand = remote_metadata.get('FETCHCOMMAND') resumecommand = remote_metadata.get('RESUMECOMMAND') else: uri = settings["PORTAGE_BINHOST"].rstrip("/") + \ "/" + pkg.pf + ".tbz2" if pretend: portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1) self.returncode = os.EX_OK self._async_wait() return fcmd = None if resume: fcmd = resumecommand else: fcmd = fetchcommand if fcmd is None: protocol = urllib_parse_urlparse(uri)[0] fcmd_prefix = "FETCHCOMMAND" if resume: fcmd_prefix = "RESUMECOMMAND" fcmd = settings.get(fcmd_prefix + "_" + protocol.upper()) if not fcmd: fcmd = settings.get(fcmd_prefix) fcmd_vars = { "DISTDIR": os.path.dirname(pkg_path), "URI": uri, "FILE": os.path.basename(pkg_path) } for k in ("PORTAGE_SSH_OPTS", ): v = settings.get(k) if v is not None: fcmd_vars[k] = v fetch_env = dict(settings.items()) fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \ for x in portage.util.shlex_split(fcmd)] if self.fd_pipes is None: self.fd_pipes = {} fd_pipes = self.fd_pipes # Redirect all output to stdout since some fetchers like # wget pollute stderr (if portage detects a problem then it # can send it's own message to stderr). fd_pipes.setdefault(0, portage._get_stdin().fileno()) fd_pipes.setdefault(1, sys.__stdout__.fileno()) fd_pipes.setdefault(2, sys.__stdout__.fileno()) self.args = fetch_args self.env = fetch_env if settings.selinux_enabled(): self._selinux_type = settings["PORTAGE_FETCH_T"] self.log_filter_file = settings.get('PORTAGE_LOG_FILTER_FILE_CMD') SpawnProcess._start(self) def _pipe(self, fd_pipes): """When appropriate, use a pty so that fetcher progress bars, like wget has, will work properly.""" if self.background or not sys.__stdout__.isatty(): # When the output only goes to a log file, # there's no point in creating a pty. return os.pipe() stdout_pipe = None if not self.background: stdout_pipe = fd_pipes.get(1) got_pty, master_fd, slave_fd = \ _create_pty_or_pipe(copy_term_size=stdout_pipe) return (master_fd, slave_fd) def sync_timestamp(self): # If possible, update the mtime to match the remote package if # the fetcher didn't already do it automatically. bintree = self.pkg.root_config.trees["bintree"] if bintree._remote_has_index: remote_mtime = bintree._remotepkgs[bintree.dbapi._instance_key( self.pkg.cpv)].get("_mtime_") if remote_mtime is not None: try: remote_mtime = int(remote_mtime) except ValueError: pass else: try: local_mtime = os.stat(self.pkg_path)[stat.ST_MTIME] except OSError: pass else: if remote_mtime != local_mtime: try: os.utime(self.pkg_path, (remote_mtime, remote_mtime)) except OSError: pass def async_lock(self): """ This raises an AlreadyLocked exception if lock() is called while a lock is already held. In order to avoid this, call unlock() or check whether the "locked" attribute is True or False before calling lock(). """ if self._lock_obj is not None: raise self.AlreadyLocked((self._lock_obj, )) result = self.scheduler.create_future() def acquired_lock(async_lock): if async_lock.wait() == os.EX_OK: self.locked = True result.set_result(None) else: result.set_exception( AssertionError( "AsynchronousLock failed with returncode %s" % (async_lock.returncode, ))) self._lock_obj = AsynchronousLock(path=self.pkg_path, scheduler=self.scheduler) self._lock_obj.addExitListener(acquired_lock) self._lock_obj.start() return result class AlreadyLocked(portage.exception.PortageException): pass def async_unlock(self): if self._lock_obj is None: raise AssertionError('already unlocked') result = self._lock_obj.async_unlock() self._lock_obj = None self.locked = False return result
def async_lock(self): """ Acquire the lock asynchronously. Notification is available via the add_done_callback method of the returned Future instance. This raises an AlreadyLocked exception if async_lock() is called while a lock is already held. In order to avoid this, call async_unlock() or check whether the "locked" attribute is True or False before calling async_lock(). @returns: Future, result is None """ if self._lock_obj is not None: raise self.AlreadyLocked((self._lock_obj, )) dir_path = self.settings.get('PORTAGE_BUILDDIR') if not dir_path: raise AssertionError('PORTAGE_BUILDDIR is unset') catdir = os.path.dirname(dir_path) self._catdir = catdir catdir_lock = AsynchronousLock(path=catdir, scheduler=self.scheduler) builddir_lock = AsynchronousLock(path=dir_path, scheduler=self.scheduler) result = self.scheduler.create_future() def catdir_locked(catdir_lock): try: self._assert_lock(catdir_lock) except AssertionError as e: result.set_exception(e) return try: portage.util.ensure_dirs(catdir, gid=portage.portage_gid, mode=0o70, mask=0) except PortageException as e: if not os.path.isdir(catdir): result.set_exception(e) return builddir_lock.addExitListener(builddir_locked) builddir_lock.start() def builddir_locked(builddir_lock): try: self._assert_lock(builddir_lock) except AssertionError as e: catdir_lock.async_unlock.add_done_callback( functools.partial(catdir_unlocked, exception=e)) return self._lock_obj = builddir_lock self.locked = True self.settings['PORTAGE_BUILDDIR_LOCKED'] = '1' catdir_lock.async_unlock().add_done_callback(catdir_unlocked) def catdir_unlocked(future, exception=None): if not (exception is None and future.exception() is None): result.set_exception(exception or future.exception()) else: result.set_result(None) try: portage.util.ensure_dirs(os.path.dirname(catdir), gid=portage.portage_gid, mode=0o70, mask=0) except PortageException: if not os.path.isdir(os.path.dirname(catdir)): raise catdir_lock.addExitListener(catdir_locked) catdir_lock.start() return result
class _BinpkgFetcherProcess(SpawnProcess): __slots__ = ("pkg", "pretend", "locked", "pkg_path", "_lock_obj") def _start(self): pkg = self.pkg pretend = self.pretend bintree = pkg.root_config.trees["bintree"] settings = bintree.settings pkg_path = self.pkg_path exists = os.path.exists(pkg_path) resume = exists and os.path.basename(pkg_path) in bintree.invalids if not (pretend or resume): # Remove existing file or broken symlink. try: os.unlink(pkg_path) except OSError: pass # urljoin doesn't work correctly with # unrecognized protocols like sftp if bintree._remote_has_index: instance_key = bintree.dbapi._instance_key(pkg.cpv) rel_uri = bintree._remotepkgs[instance_key].get("PATH") if not rel_uri: rel_uri = pkg.cpv + ".tbz2" remote_base_uri = bintree._remotepkgs[ instance_key]["BASE_URI"] uri = remote_base_uri.rstrip("/") + "/" + rel_uri.lstrip("/") else: uri = settings["PORTAGE_BINHOST"].rstrip("/") + \ "/" + pkg.pf + ".tbz2" if pretend: portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1) self.returncode = os.EX_OK self._async_wait() return protocol = urllib_parse_urlparse(uri)[0] fcmd_prefix = "FETCHCOMMAND" if resume: fcmd_prefix = "RESUMECOMMAND" fcmd = settings.get(fcmd_prefix + "_" + protocol.upper()) if not fcmd: fcmd = settings.get(fcmd_prefix) fcmd_vars = { "DISTDIR" : os.path.dirname(pkg_path), "URI" : uri, "FILE" : os.path.basename(pkg_path) } for k in ("PORTAGE_SSH_OPTS",): v = settings.get(k) if v is not None: fcmd_vars[k] = v fetch_env = dict(settings.items()) fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \ for x in portage.util.shlex_split(fcmd)] if self.fd_pipes is None: self.fd_pipes = {} fd_pipes = self.fd_pipes # Redirect all output to stdout since some fetchers like # wget pollute stderr (if portage detects a problem then it # can send it's own message to stderr). fd_pipes.setdefault(0, portage._get_stdin().fileno()) fd_pipes.setdefault(1, sys.__stdout__.fileno()) fd_pipes.setdefault(2, sys.__stdout__.fileno()) self.args = fetch_args self.env = fetch_env if settings.selinux_enabled(): self._selinux_type = settings["PORTAGE_FETCH_T"] SpawnProcess._start(self) def _pipe(self, fd_pipes): """When appropriate, use a pty so that fetcher progress bars, like wget has, will work properly.""" if self.background or not sys.__stdout__.isatty(): # When the output only goes to a log file, # there's no point in creating a pty. return os.pipe() stdout_pipe = None if not self.background: stdout_pipe = fd_pipes.get(1) got_pty, master_fd, slave_fd = \ _create_pty_or_pipe(copy_term_size=stdout_pipe) return (master_fd, slave_fd) def sync_timestamp(self): # If possible, update the mtime to match the remote package if # the fetcher didn't already do it automatically. bintree = self.pkg.root_config.trees["bintree"] if bintree._remote_has_index: remote_mtime = bintree._remotepkgs[ bintree.dbapi._instance_key( self.pkg.cpv)].get("_mtime_") if remote_mtime is not None: try: remote_mtime = long(remote_mtime) except ValueError: pass else: try: local_mtime = os.stat(self.pkg_path)[stat.ST_MTIME] except OSError: pass else: if remote_mtime != local_mtime: try: os.utime(self.pkg_path, (remote_mtime, remote_mtime)) except OSError: pass def async_lock(self): """ This raises an AlreadyLocked exception if lock() is called while a lock is already held. In order to avoid this, call unlock() or check whether the "locked" attribute is True or False before calling lock(). """ if self._lock_obj is not None: raise self.AlreadyLocked((self._lock_obj,)) result = self.scheduler.create_future() def acquired_lock(async_lock): if async_lock.wait() == os.EX_OK: self.locked = True result.set_result(None) else: result.set_exception(AssertionError( "AsynchronousLock failed with returncode %s" % (async_lock.returncode,))) self._lock_obj = AsynchronousLock(path=self.pkg_path, scheduler=self.scheduler) self._lock_obj.addExitListener(acquired_lock) self._lock_obj.start() return result class AlreadyLocked(portage.exception.PortageException): pass def async_unlock(self): if self._lock_obj is None: raise AssertionError('already unlocked') result = self._lock_obj.async_unlock() self._lock_obj = None self.locked = False return result