def jlock(self, vfs): """Create a lock for the journal file""" if self._currentlock(self._lockref) is not None: raise error.Abort(_('journal lock does not support nesting')) desc = _('journal of %s') % vfs.base try: l = lock.lock(vfs, 'namejournal.lock', 0, desc=desc) except error.LockHeld as inst: self.ui.warn( _("waiting for lock on %s held by %r\n") % (desc, inst.locker)) # default to 600 seconds timeout l = lock.lock( vfs, 'namejournal.lock', int(self.ui.config("ui", "timeout", "600")), desc=desc) self.ui.warn(_("got lock after %s seconds\n") % l.delay) self._lockref = weakref.ref(l) return l
def jlock(self, vfs): """Create a lock for the journal file""" if self._lockref and self._lockref(): raise error.Abort(_('journal lock does not support nesting')) desc = _('journal of %s') % vfs.base try: l = lock.lock(vfs, 'journal.lock', 0, desc=desc) except error.LockHeld as inst: self.ui.warn( _("waiting for lock on %s held by %r\n") % (desc, inst.locker)) # default to 600 seconds timeout l = lock.lock(vfs, 'journal.lock', int(self.ui.config("ui", "timeout", "600")), desc=desc) self.ui.warn(_("got lock after %s seconds\n") % l.delay) self._lockref = weakref.ref(l) return l
def testrecursivelock(self): state = teststate(self, tempfile.mkdtemp(dir=os.getcwd())) lock = state.makelock() state.assertacquirecalled(True) state.resetacquirefn() lock.lock() # recursive lock should not call acquirefn again state.assertacquirecalled(False) lock.release() # brings lock refcount down from 2 to 1 state.assertreleasecalled(False) state.assertpostreleasecalled(False) state.assertlockexists(True) lock.release() # releases the lock state.assertreleasecalled(True) state.assertpostreleasecalled(True) state.assertlockexists(False)
def get_local_path(self, url, md5sum=None, trusted_md5sum=None): """ Returns a local path of the URL contents. If cached locally (in cache_dir), immediately returns the path. Otherwise, downloads the file, saves it to the cache, and returns the path. Arguments: - md5sum: if set, new downloads must match the indicated md5sum (old downloads do not) - trusted_md5sum: if set, this overrides the md5sum argument, and all existing and downloads must match the indicated md5sum """ path_to_cached_download = self.get_cached_path(url) if path_to_cached_download: return path_to_cached_download # This is the cache prefix, e.g. "/DOWNLOAD_CACHE_ROOT/download_cache/320ef6acf360e72cbc54ad58e4d7c8d046de4d46" cache_prefix = self._get_target_dir(url) if not os.path.isdir(cache_prefix): os.makedirs(cache_prefix) # Path to lock on lock_path = os.path.join(cache_prefix, 'lock') # Provide feedback if there is another download in progress there_is_another_lock = False # mercurial lock creates a symlink at lock_path. However, we check both (just in case). if os.path.lexists(lock_path) or os.path.exists(lock_path): there_is_another_lock = True print >> sys.stderr, 'Will stop to wait for url: %s' % url ColorCli.print_red( 'Another download is in progress. Will wait up to 600 seconds. Ctrl-C to stop any time.' ) try: # Grab lock for 10 minutes l = lock.lock(lock_path, timeout=600) l.lock() if there_is_another_lock: ColorCli.print_green('Done waiting for other download.') # Call the inner method that is protected by a lock return self._get_local_path_singleton( url, md5sum=md5sum, trusted_md5sum=trusted_md5sum) except mercurial_error.LockHeld: # couldn't take the lock ColorCli.print_red('Timed out waiting for the lock. Exiting.') sys.exit(1) else: l.release()
def get_local_path(self, url, md5sum=None, trusted_md5sum=None): """ Returns a local path of the URL contents. If cached locally (in cache_dir), immediately returns the path. Otherwise, downloads the file, saves it to the cache, and returns the path. Arguments: - md5sum: if set, new downloads must match the indicated md5sum (old downloads do not) - trusted_md5sum: if set, this overrides the md5sum argument, and all existing and downloads must match the indicated md5sum """ path_to_cached_download = self.get_cached_path(url) if path_to_cached_download: return path_to_cached_download # This is the cache prefix, e.g. "/DOWNLOAD_CACHE_ROOT/download_cache/320ef6acf360e72cbc54ad58e4d7c8d046de4d46" cache_prefix = self._get_target_dir(url) if not os.path.isdir(cache_prefix): os.makedirs(cache_prefix) # Path to lock on lock_path = os.path.join(cache_prefix, 'lock') # Provide feedback if there is another download in progress there_is_another_lock = False # mercurial lock creates a symlink at lock_path. However, we check both (just in case). if os.path.lexists(lock_path) or os.path.exists(lock_path): there_is_another_lock = True print >> sys.stderr, 'Will stop to wait for url: %s' % url ColorCli.print_red('Another download is in progress. Will wait up to 600 seconds. Ctrl-C to stop any time.') try: # Grab lock for 10 minutes l = lock.lock(lock_path, timeout=600) l.lock() if there_is_another_lock: ColorCli.print_green('Done waiting for other download.') # Call the inner method that is protected by a lock return self._get_local_path_singleton(url, md5sum=md5sum, trusted_md5sum=trusted_md5sum) except mercurial_error.LockHeld: # couldn't take the lock ColorCli.print_red('Timed out waiting for the lock. Exiting.') sys.exit(1) else: l.release()
def handle(self, *args, **options): if len(args) != 1 or args[0] not in self.dl_manager.get_actions_list(): raise CommandError('You must specify one valid command (%s)' % repr(self.dl_manager.get_actions_list())) command = args[0] repeat = options.get('repeat') forever = options.get('forever') # Only allow one cron process per command to run at a single time lock_path = os.path.join(settings.LOCK_PATH, '.%s.pid' % command) try: l = lock.lock(lock_path, timeout=MAX_RUN_TIME) # wait at most 50s self.do(command, repeat, forever) except error.LockHeld: log.debug("Active process for command '%s', aborting.", command) else: l.release()
def flock(lockpath, description, timeout=-1): """A flock based lock object. Currently it is always non-blocking. Note that since it is flock based, you can accidentally take it multiple times within one process and the first one to be released will release all of them. So the caller needs to be careful to not create more than one instance per lock. """ # best effort lightweight lock try: import fcntl fcntl.flock except ImportError: # fallback to Mercurial lock vfs = vfsmod.vfs(os.path.dirname(lockpath)) with lockmod.lock(vfs, os.path.basename(lockpath), timeout=timeout): yield return # make sure lock file exists util.makedirs(os.path.dirname(lockpath)) with open(lockpath, 'a'): pass lockfd = os.open(lockpath, os.O_RDONLY, 0o664) start = time.time() while True: try: fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB) break except IOError as ex: if ex.errno == errno.EAGAIN: if timeout != -1 and time.time() - start > timeout: raise error.LockHeld(errno.EAGAIN, lockpath, description, '') else: time.sleep(0.05) continue raise try: yield finally: fcntl.flock(lockfd, fcntl.LOCK_UN) os.close(lockfd)
def main(): # set up logging log.basicConfig(format=LOGFORMAT, level=log.DEBUG, filename=LOGFILE, handler=LOGHANDLER) mq.set_host(config['mq_host']) mq.set_exchange(config['mq_exchange']) mq.connect() try: if not os.access(config['work_dir'], os.F_OK): os.makedirs(config['work_dir']) os.chdir(config['work_dir']) # look for available (not locked) hgpusher.# in the working directoy i = 0 while True: hgp_lock = None work_dir = 'hgpusher.%d' % (i) if not os.access(work_dir, os.F_OK): os.makedirs(work_dir) try: print "Trying dir: %s" % (work_dir) hgp_lock = lock.lock(os.path.join(work_dir, '.lock'), timeout=1) print "Working directory: %s" % (work_dir) os.chdir(work_dir) # get rid of active dir if os.access('active/', os.F_OK): shutil.rmtree('active/') os.makedirs('active/') mq.listen(queue=config['mq_hgp_queue'], callback=message_handler, routing_key='hgpusher') except error.LockHeld: # couldn't take the lock, check next workdir i += 1 continue else: hgp_lock.release() print "Released working directory" break except os.error, e: log_msg('Error switching to working directory: %s' % e) exit(1)
def run(self, targetdata, targethistory): ledger = repackledger() with lockmod.lock( repacklockvfs(self.repo), b"repacklock", desc=None, timeout=0 ): self.repo.hook(b'prerepack') # Populate ledger from source self.data.markledger(ledger, options=self.options) self.history.markledger(ledger, options=self.options) # Run repack self.repackdata(ledger, targetdata) self.repackhistory(ledger, targethistory) # Call cleanup on each source for source in ledger.sources: source.cleanup(ledger)
def safelog(repo, command): '''boilerplate for log command input: repo: mercurial.localrepo command: list of strings, first is string of command run output: bool True if changes have been recorded, False otherwise ''' changes = False if repo is not None: # some hg commands don't require repo # undolog specific lock # allows running command during other commands when # otherwise legal. Could cause weird undolog states, # which gap handling generally covers. try: try: repo.vfs.makedirs('undolog') except OSError: repo.ui.debug("can't make undolog folder in .hg\n") return changes with lockmod.lock(repo.vfs, "undolog/lock", desc="undolog", timeout=2): # developer config: undo._duringundologlock if repo.ui.configbool('undo', '_duringundologlock'): repo.hook("duringundologlock") tr = lighttransaction(repo) with tr: changes = log(repo.filtered('visible'), command, tr) if changes and not ("undo" == command[0] or "redo" == command[0]): _delundoredo(repo) except error.LockUnavailable: # no write permissions repo.ui.debug("undolog lacks write permission\n") except error.LockHeld: # timeout, not fatal: don't abort actual command # This shouldn't happen too often as it would # create gaps in the undo log repo.ui.debug("undolog lock timeout\n") _logtoscuba(repo.ui, 'undolog lock timeout') return changes
def lock(self): return lockmod.lock(self._repo.vfs, self._vfspath + '.lock')
messages = True, flag_check = False, log_file = None, ) options, args = parser.parse_known_args() for config in options.configs: if not os.path.exists(config): log.error("Config file %s does not exist or is not valid." % config) sys.exit(1) lock_file = None try: lock_file = lock.lock(LOCK_FILE_PATH timeout=1) # set up logging if not options.log_file: # log to stdout handler = logging.StreamHandler() else: handler = logging.handlers.RotatingFileHandler(options.log_file, maxBytes=50000, backupCount=5) if not options.verbose: log.setLevel(logging.INFO) else: log.setLevel(logging.DEBUG) log.addHandler(handler)
def main(): # set up logging log.setLevel(logging.DEBUG) LOGHANDLER.setFormatter(LOGFORMAT) log.addHandler(LOGHANDLER) mq.connect() mq.declare_and_bind(config['mq_hgp_queue'], 'hgpusher') if len(sys.argv) > 1: for arg in sys.argv[1:]: if arg == '--purge-queue': # purge the autoland queue mq.purge_queue(config['mq_hgp_queue'], prompt=True) exit(0) try: config['work_dir'] = os.path.abspath(config['work_dir']) if not os.path.isdir(config['work_dir']): os.makedirs(config['work_dir']) os.chdir(config['work_dir']) # look for available (not locked) hgpusher.# in the working directoy i = 0 while True: hgp_lock = None work_dir = 'hgpusher.%d' % (i) if not os.path.isdir(work_dir): os.makedirs(work_dir) try: log.debug('Trying dir: %s' % (work_dir)) hgp_lock = lock.lock(os.path.join(work_dir, '.lock'), timeout=1) log.debug('Working directory: %s' % (work_dir)) os.chdir(work_dir) # get rid of active dir try: shutil.rmtree('active') except OSError: pass os.makedirs('active') plog_handler = logging.FileHandler('permissions.log') plog_handler.setFormatter(LOGFORMAT) plog_handler.setLevel(logging.INFO) plog.addHandler(plog_handler) mq.listen(queue=config['mq_hgp_queue'], callback=message_handler) except error.LockHeld: # couldn't take the lock, check next workdir i += 1 continue finally: if hgp_lock: hgp_lock.release() log.debug('Released working directory') raise except Exception, err: log.error('An error occurred: %s\n%s' % (err, traceback.format_exc())) exit(1)
def debugwaitonrepack(repo): with lockmod.lock(repack.repacklockvfs(repo), b"repacklock", timeout=-1): return