class ManagementLock(object): def __init__(self): self.lock = None def acquire(self): self.lock = InterProcessLock(LOCK_PATH) # Attempt to obtain a lock, retry every 10 seconds. Wait at most 5 minutes. # The retrying is necessary so we can report on stderr that we are waiting # for a lock. Otherwise, a user trying to run the command manually might # get confused why the command execution is delayed. if self.lock.acquire(blocking=False): return print("Another management command is running, waiting for lock...", file=sys.stderr) if self.lock.acquire(delay=10, max_delay=10, timeout=300): return self.lock = None raise RuntimeError("Failed to acquire lock.") def release(self): if self.lock is not None: self.lock.release() def __enter__(self): self.acquire() return self def __exit__(self, _exc_type, _exc_val, _exc_tb): self.release() return False
def _update_db(self): if self._prim_db: lock = InterProcessLock("%s.lock" % self._prim_db) acquired = lock.acquire(blocking=False) if not acquired: logger.debug("Waiting 60 seconds for file lock") acquired = lock.acquire(blocking=True, timeout=60) if acquired: try: with open(self._prim_db, "w", encoding="utf-8") as out: out.write(unicode(json.dumps(self._dbs[self._prim_db]))) return True finally: lock.release() else: logger.error( "Could not update platform database: " "Lock acquire failed after 60 seconds" ) return False else: logger.error( "Can't update platform database: destination database is ambiguous" ) return False
class AdaptiveLock: timeout = 600 def __init__(self): self.method = "hard_links" self.lock_instance = None def lock(self, lock_file): if self.method == "hard_links": self.lock_instance = FluflLock( lock_file, lifetime=60) # seconds after which the lock is broken try: self.lock_instance.lock( timeout=self.timeout) # try for a long time return except TimeoutError: pass except OSError: pass except PermissionError: pass logger.warning( "Unable to use hard link-based file locks. " "Trying fcntl-based file locks", exc_info=True) self.method = "fcntl" if self.method == "fcntl": self.lock_instance = FcntlLock(lock_file) acquired = self.lock_instance.acquire(timeout=self.timeout) if acquired: return logger.warning( "Unable to use fcntl-based file locks. " "Disabling file locks", exc_info=True) self.method = None self.delay = 10.0 # use a large delay to make write collisions unlikely def unlock(self): if self.method == "hard_links": self.lock_instance.unlock( unconditionally=True) # do not raise errors in unlock self.lock_instance = None elif self.method == "fcntl": self.lock_instance.release()
def acquire_inter_process_lock(lock_name): # Lock preventing simultaneous crawling processes lock_name = _compute_lock_name(lock_name) lock = InterProcessLock('/tmp/%s' % lock_name) acquired_lock = lock.acquire(blocking=False) try: yield acquired_lock finally: if acquired_lock: lock.release()
def wrapper(self, *args): ret = None lock = InterProcessLock(self.lock_file) acquired = lock.acquire(blocking=False) if not acquired: self.debug("timed_mbedls_lock", "Waiting %d seconds for mock file lock." % timeout) acquired = lock.acquire(blocking=True, timeout=timeout) if acquired: try: ret = wrapper.original(self, *args) except Exception, e: lock.release() raise e lock.release()
def wrapper(self, *args): ret = None lock = InterProcessLock(self.lock_file) acquired = lock.acquire(blocking=False) if not acquired: self.debug("timed_mbedls_lock", "Waiting %d seconds for mock file lock." % timeout) acquired = lock.acquire(blocking=True, timeout=timeout) if acquired: try: ret = wrapper.original(self, *args) except Exception as e: lock.release() raise e lock.release() else: self.err("timed_mbedls_lock", "Failed to acquired mock file lock in %d seconds!" % timeout) sys.exit(1) return ret
def _update_db(self): if self._prim_db: lock = InterProcessLock("%s.lock" % self._prim_db) acquired = lock.acquire(blocking=False) if not acquired: logger.debug("Waiting 60 seconds for file lock") acquired = lock.acquire(blocking=True, timeout=60) if acquired: try: with open(self._prim_db, "w", encoding="utf-8") as out: out.write(unicode( json.dumps(self._dbs[self._prim_db]))) return True finally: lock.release() else: logger.error("Could not update platform database: " "Lock acquire failed after 60 seconds") return False else: logger.error("Can't update platform database: " "destination database is ambiguous") return False
def lock_if_check_fails( check, lock_path, operation=None, blocking=True, _return_acquired=False, **kwargs ): """A context manager to establish a lock conditionally on result of a check It is intended to be used as a lock for a specific file and/or operation, e.g. for `annex get`ing a file or extracting an archive, so only one process would be performing such an operation. If verification of the check fails, it tries to acquire the lock, but if that fails on the first try, it will rerun check before proceeding to func checker and lock_path_prefix could be a value, or callable, or a tuple composing callable and its args Unfortunately yoh did not find any way in Python 2 to have a context manager which just skips the entire block if some condition is met (in Python3 there is ExitStack which could potentially be used). So we would need still to check in the block body if the context manager return value is not None. Note also that the used type of the lock (fasteners.InterprocessLock) works only across processes and would not lock within the same (threads) process. Parameters ---------- check: callable or (callable, args) or value If value (possibly after calling a callable) evaluates to True, no lock is acquired, and no context is executed lock_path: callable or (callable, args) or value Provides a path for the lock file, composed from that path + '.lck' extension operation: str, optional If provided, would be part of the locking extension blocking: bool, optional If blocking, process would be blocked until acquired and verified that it was acquired after it gets the lock _return_acquired: bool, optional Return also if lock was acquired. For "private" use within DataLad (tests), do not rely on it in 3rd party solutions. **kwargs Passed to `.acquire` of the fasteners.InterProcessLock Returns ------- result of check, lock[, acquired] """ check1 = _get(check) if check1: # we are done - nothing to do yield check1, None return # acquire blocking lock lock_filename = _get(lock_path) lock_filename += '.' if operation: lock_filename += operation + '-' lock_filename += 'lck' lock = InterProcessLock(lock_filename) acquired = False try: lgr.debug("Acquiring a lock %s", lock_filename) acquired = lock.acquire(blocking=blocking, **kwargs) lgr.debug("Acquired? lock %s: %s", lock_filename, acquired) if blocking: assert acquired check2 = _get(check) ret_lock = None if check2 else lock if _return_acquired: yield check2, ret_lock, acquired else: yield check2, ret_lock finally: if acquired: lgr.debug("Releasing lock %s", lock_filename) lock.release() if exists(lock_filename): unlink(lock_filename)
class DownloadDirectory: def __init__(self, filepath, digests): #: The path to which to save the file after downloading self.filepath = Path(filepath) #: Expected hashes of the downloaded data, as a mapping from algorithm #: names to digests self.digests = digests #: The working directory in which downloaded data will be temporarily #: stored self.dirpath = self.filepath.with_name(self.filepath.name + ".dandidownload") #: The file in `dirpath` to which data will be written as it is #: received self.writefile = self.dirpath / "file" #: A `fasteners.InterProcessLock` on `dirpath` self.lock = None #: An open filehandle to `writefile` self.fp = None #: How much of the data has been downloaded so far self.offset = None def __enter__(self): from fasteners import InterProcessLock self.dirpath.mkdir(parents=True, exist_ok=True) self.lock = InterProcessLock(str(self.dirpath / "lock")) if not self.lock.acquire(blocking=False): raise RuntimeError( "Could not acquire download lock for {self.filepath}") chkpath = self.dirpath / "checksum" try: with chkpath.open() as fp: digests = json.load(fp) except (FileNotFoundError, ValueError): digests = {} matching_algs = self.digests.keys() & digests.keys() if matching_algs and all(self.digests[alg] == digests[alg] for alg in matching_algs): # Pick up where we left off, writing to the end of the file lgr.debug( "Download directory exists and has matching checksum; resuming download" ) self.fp = self.writefile.open("ab") else: # Delete the file (if it even exists) and start anew if not chkpath.exists(): lgr.debug("Starting new download in new download directory") else: lgr.debug( "Download directory found, but digests do not match; starting new download" ) try: self.writefile.unlink() except FileNotFoundError: pass self.fp = self.writefile.open("wb") with chkpath.open("w") as fp: json.dump(self.digests, fp) self.offset = self.fp.tell() return self def __exit__(self, exc_type, exc_value, traceback): self.fp.close() try: if exc_type is None: self.writefile.replace(self.filepath) finally: self.lock.release() if exc_type is None: rmtree(self.dirpath, ignore_errors=True) self.lock = None self.fp = None self.offset = None return False def append(self, blob): self.fp.write(blob)
class AdaptiveLock: def __init__(self, timeout: int = 180): self.timeout = timeout self.methods: List[str] = ["fcntl", "hard_links", "delay"] self.lock_instance = None def lock(self, lock_file): if self.methods[0] == "hard_links": self.lock_instance = FluflLock( lock_file, lifetime=self.timeout ) # seconds after which the lock is broken try: self.lock_instance.lock( timeout=self.timeout) # try for a long time return except (FluflLockError, TimeOutError): # timeouts etc. pass except OSError: # such as PermissionError pass logger.warning("Unable to use hard link-based file locks", exc_info=True) self.methods.pop(0) self.lock(lock_file) elif self.methods[0] == "fcntl": self.lock_instance = FcntlLock(lock_file) acquired = self.lock_instance.acquire(timeout=self.timeout, delay=1) if acquired: return logger.warning("Unable to use fcntl-based file locks", exc_info=True) self.methods.pop(0) self.lock(lock_file) else: # use a random delay to make write collisions unlikely delay = gauss(20, 5) if delay > 0: sleep(delay) def unlock(self): if self.methods[0] == "hard_links": assert isinstance(self.lock_instance, FluflLock) self.lock_instance.unlock( unconditionally=True) # do not raise errors in unlock self.lock_instance = None elif self.methods[0] == "fcntl": assert isinstance(self.lock_instance, FcntlLock) self.lock_instance.release()
def run(self): """ Run application. """ # configure SQLAlchemy logging # log_level = self.logger.getEffectiveLevel() # logging.getLogger('sqlalchemy.engine').setLevel(log_level) exit_code = ExitCodes.EXIT_SUCCESS self.logger.info(f"{self.PROG}: Version v{__version__}") self.logger.debug(f"Configuration: {dict(self.config)!r}") try: path_pidfile = self.config["path_pidfile"] pid_lock = InterProcessLock(path_pidfile) pid_lock_gotten = pid_lock.acquire(blocking=False) if not pid_lock_gotten: raise AlreadyHarvesting(path_pidfile) self.logger.debug( f"Aquired PID lock {self.config['path_pidfile']!r}" ) if ( self.config["no_routes"] and self.config["no_vnetworks"] and not self.config["truncate"] ): raise NothingToDo() harvesting = not ( self.config["no_routes"] and self.config["no_vnetworks"] ) Session = db.ScopedSession() engine = create_engine( self.config["sqlalchemy_database_uri"], echo=False ) Session.configure(bind=engine) if engine.name == "sqlite": db.configure_sqlite(self.DB_PRAGMAS) # TODO(damb): Implement multithreaded harvesting using a thread # pool. try: if harvesting: self.logger.info("Start harvesting.") if not self.config["no_routes"]: self._harvest_routes(Session) else: self.logger.info( "Disabled processing <route></route> information." ) if not self.config["no_vnetworks"]: self._harvest_vnetworks(Session) else: self.logger.info( "Disabled processing <vnetwork></vnetwork> " "information." ) if harvesting: self.logger.info("Finished harvesting successfully.") if self.config["truncate"]: self.logger.warning("Removing outdated data.") session = Session() with db.session_guard(session) as _session: num_removed_rows = db.clean( _session, self.config["truncate"], ) self.logger.info( f"Number of rows removed: {num_removed_rows}" ) except OperationalError as err: raise db.StationLiteDBEngineError(err) # TODO(damb): signal handling except Error as err: self.logger.error(err) exit_code = ExitCodes.EXIT_ERROR except Exception as err: exc_type, exc_value, exc_traceback = sys.exc_info() self.logger.critical("Local Exception: %s" % err) self.logger.critical( "Traceback information: " + repr( traceback.format_exception( exc_type, exc_value, exc_traceback ) ) ) exit_code = ExitCodes.EXIT_ERROR finally: try: if pid_lock_gotten: pid_lock.release() except NameError: pass sys.exit(exit_code)
class Lock: """ A inter-process and inter-thread lock. This reuses uses code from oslo.concurrency but provides non-blocking acquire. Use the :meth:`singleton` class method to retrieve an existing instance for thread-safe usage. """ _instances: Dict[str, "Lock"] = dict() _singleton_lock = threading.Lock() @classmethod def singleton(cls, name: str, lock_path: Optional[str] = None) -> "Lock": """ Retrieve an existing lock object with a given 'name' or create a new one. Use this method for thread-safe locks. :param name: Name of lock file. :param lock_path: Directory for lock files. Defaults to the temporary directory returned by :func:`tempfile.gettempdir()` if not given. """ with cls._singleton_lock: try: instance = cls._instances[name] except KeyError: instance = cls(name, lock_path) cls._instances[name] = instance return instance def __init__(self, name: str, lock_path: Optional[str] = None) -> None: self.name = name dirname = lock_path or tempfile.gettempdir() lock_path = os.path.join(dirname, name) self._internal_lock = threading.Semaphore() self._external_lock = InterProcessLock(lock_path) self._lock = threading.RLock() def acquire(self) -> bool: """ Attempts to acquire the given lock. :returns: Whether or not the acquisition succeeded. """ with self._lock: locked_internal = self._internal_lock.acquire(blocking=False) if not locked_internal: return False try: locked_external = self._external_lock.acquire(blocking=False) except Exception: self._internal_lock.release() raise else: if locked_external: return True else: self._internal_lock.release() return False def release(self) -> None: """Release the previously acquired lock.""" with self._lock: self._external_lock.release() self._internal_lock.release() def locked(self) -> bool: """Checks if the lock is currently held by any thread or process.""" with self._lock: gotten = self.acquire() if gotten: self.release() return not gotten def locking_pid(self) -> Optional[int]: """ Returns the PID of the process which currently holds the lock or ``None``. This should work on macOS, OpenBSD and Linux but may fail on some platforms. Always use :meth:`locked` to check if the lock is held by any process. :returns: The PID of the process which currently holds the lock or ``None``. """ with self._lock: if self._external_lock.acquired: return os.getpid() try: # don't close again in case we are the locking process self._external_lock._do_open() lockdata, fmt, pid_index = _get_lockdata() lockdata = fcntl.fcntl(self._external_lock.lockfile, fcntl.F_GETLK, lockdata) lockdata_list = struct.unpack(fmt, lockdata) pid = lockdata_list[pid_index] if pid > 0: return pid except OSError: pass return None
def main(argv=None): '''Command line options.''' # setup argparser parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version='%s v%s (%s)' % (os.path.basename(__file__), __version__, __updated__)) # Actions action_group = parser.add_argument_group("Actions", "A single action must be selected.") actions = action_group.add_mutually_exclusive_group(required=True) actions.add_argument("--report", dest="report", type=str, help="Submit the given textual report", metavar="TEXT") actions.add_argument("--report-from-file", dest="report_file", type=str, help="Submit the given file as textual report", metavar="FILE") actions.add_argument("--cycle", dest="cycle", type=str, help="Cycle the pool with the given ID", metavar="ID") actions.add_argument("--disable", dest="disable", type=str, help="Disable the pool with the given ID", metavar="ID") actions.add_argument("--enable", dest="enable", type=str, help="Enable the pool with the given ID", metavar="ID") # Options parser.add_argument("--keep-reporting", dest="keep_reporting", default=0, type=int, help="Keep reporting from the specified file with specified interval", metavar="SECONDS") parser.add_argument("--random-offset", dest="random_offset", default=0, type=int, help="Random offset for the reporting interval (+/-)", metavar="SECONDS") # Settings parser.add_argument("--serverhost", dest="serverhost", help="Server hostname for remote signature management", metavar="HOST") parser.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT") parser.add_argument("--serverproto", dest="serverproto", help="Server protocol to use (default is https)", metavar="PROTO") parser.add_argument("--serverauthtokenfile", dest="serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") parser.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID") # process options opts = parser.parse_args(argv) if opts.keep_reporting and not opts.report_file: print("Error: --keep-reporting is only valid with --report-from-file", file=sys.stderr) return 2 serverauthtoken = None if opts.serverauthtokenfile: with open(opts.serverauthtokenfile) as f: serverauthtoken = f.read().rstrip() reporter = EC2Reporter(opts.serverhost, opts.serverport, opts.serverproto, serverauthtoken, opts.clientid) report = None if opts.cycle: reporter.cycle(opts.cycle) return 0 elif opts.enable: reporter.enable(opts.enable) return 0 elif opts.disable: reporter.disable(opts.disable) return 0 elif opts.report_file: if opts.keep_reporting: if opts.random_offset > 0: random.seed(reporter.clientId) lock = InterProcessLock(opts.report_file + ".lock") while True: if os.path.exists(opts.report_file): if not lock.acquire(timeout=opts.keep_reporting): continue try: with open(opts.report_file) as f: report = f.read() try: reporter.report(report) except RuntimeError as e: # Ignore errors if the server is temporarily unavailable print("Failed to contact server: %s" % e, file=sys.stderr) finally: lock.release() random_offset = 0 if opts.random_offset: random_offset = random.randint(-opts.random_offset, opts.random_offset) time.sleep(opts.keep_reporting + random_offset) else: with open(opts.report_file) as f: report = f.read() else: report = opts.report reporter.report(report) return 0
from config import config from fasteners import InterProcessLock ipl = InterProcessLock(config['lock_file']) def lock_obtained(): print('Lock obtained, starting YeenBot...') from yeenbot import main main() def lock_not_obtained(): print('Lock not obtained, exiting') # Attempt to acquire the lock acquired = ipl.acquire(blocking=False) try: if acquired: lock_obtained() else: lock_not_obtained() finally: if acquired: ipl.release()
class Lock: """A inter-process and inter-thread lock This internally uses :class:`fasteners.InterProcessLock` but provides non-blocking acquire. It also guarantees thread-safety when using the :meth:`singleton` class method to create / retrieve a lock instance. :param path: Path of the lock file to use / create. """ _instances: Dict[str, "Lock"] = {} _singleton_lock = threading.Lock() @classmethod def singleton(cls, path: str) -> "Lock": """ Retrieve an existing lock object with a given 'name' or create a new one. Use this method for thread-safe locks. :param path: Path of the lock file to use / create. """ with cls._singleton_lock: try: instance = cls._instances[path] except KeyError: instance = cls(path) cls._instances[path] = instance return instance def __init__(self, path: str) -> None: self.path = path self._internal_lock = threading.Semaphore() self._external_lock = InterProcessLock(self.path) self._lock = threading.RLock() def acquire(self) -> bool: """ Attempts to acquire the given lock. :returns: Whether or not the acquisition succeeded. """ with self._lock: locked_internal = self._internal_lock.acquire(blocking=False) if not locked_internal: return False try: locked_external = self._external_lock.acquire(blocking=False) except Exception: self._internal_lock.release() raise else: if locked_external: return True else: self._internal_lock.release() return False def release(self) -> None: """Release the previously acquired lock.""" with self._lock: self._external_lock.release() self._internal_lock.release() def locked(self) -> bool: """ Checks if the lock is currently held by any thread or process. :returns: Whether the lock is acquired. """ with self._lock: gotten = self.acquire() if gotten: self.release() return not gotten def locking_pid(self) -> Optional[int]: """ Returns the PID of the process which currently holds the lock or ``None``. This should work on macOS, OpenBSD and Linux but may fail on some platforms. Always use :meth:`locked` to check if the lock is held by any process. :returns: The PID of the process which currently holds the lock or ``None``. """ with self._lock: if self._external_lock.acquired: return os.getpid() try: # Don't close again in case we are the locking process. self._external_lock._do_open() lockdata, fmt, pid_index = _get_lockdata() lockdata = fcntl.fcntl(self._external_lock.lockfile, fcntl.F_GETLK, lockdata) lockdata_list = struct.unpack(fmt, lockdata) pid = lockdata_list[pid_index] if pid > 0: return pid except OSError: pass return None
def main(argv=None): '''Command line options.''' # setup argparser parser = argparse.ArgumentParser() parser.add_argument('--version', action='version', version='%s v%s (%s)' % (os.path.basename(__file__), __version__, __updated__)) # Actions action_group = parser.add_argument_group( "Actions", "A single action must be selected.") actions = action_group.add_mutually_exclusive_group(required=True) actions.add_argument("--report", dest="report", type=str, help="Submit the given textual report", metavar="TEXT") actions.add_argument("--report-from-file", dest="report_file", type=str, help="Submit the given file as textual report", metavar="FILE") actions.add_argument("--cycle", dest="cycle", type=str, help="Cycle the pool with the given ID", metavar="ID") # Options parser.add_argument( "--keep-reporting", dest="keep_reporting", default=0, type=int, help="Keep reporting from the specified file with specified interval", metavar="SECONDS") parser.add_argument("--random-offset", dest="random_offset", default=0, type=int, help="Random offset for the reporting interval (+/-)", metavar="SECONDS") # Settings parser.add_argument("--serverhost", dest="serverhost", help="Server hostname for remote signature management", metavar="HOST") parser.add_argument("--serverport", dest="serverport", type=int, help="Server port to use", metavar="PORT") parser.add_argument("--serverproto", dest="serverproto", help="Server protocol to use (default is https)", metavar="PROTO") parser.add_argument("--serverauthtokenfile", dest="serverauthtokenfile", help="File containing the server authentication token", metavar="FILE") parser.add_argument("--clientid", dest="clientid", help="Client ID to use when submitting issues", metavar="ID") # process options opts = parser.parse_args(argv) if opts.keep_reporting and not opts.report_file: print("Error: --keep-reporting is only valid with --report-from-file", file=sys.stderr) return 2 serverauthtoken = None if opts.serverauthtokenfile: with open(opts.serverauthtokenfile) as f: serverauthtoken = f.read().rstrip() reporter = EC2Reporter(opts.serverhost, opts.serverport, opts.serverproto, serverauthtoken, opts.clientid) report = None if opts.cycle: reporter.cycle(opts.cycle) return 0 elif opts.report_file: if opts.keep_reporting: if opts.random_offset > 0: random.seed(reporter.clientId) lock = InterProcessLock(opts.report_file + ".lock") while True: if os.path.exists(opts.report_file): if not lock.acquire(timeout=opts.keep_reporting): continue try: with open(opts.report_file) as f: report = f.read() try: reporter.report(report) except RuntimeError as e: # Ignore errors if the server is temporarily unavailable print("Failed to contact server: %s" % e, file=sys.stderr) finally: lock.release() random_offset = 0 if opts.random_offset: random_offset = random.randint(-opts.random_offset, opts.random_offset) time.sleep(opts.keep_reporting + random_offset) else: with open(opts.report_file) as f: report = f.read() else: report = opts.report reporter.report(report) return 0
def run(self): """ Run application. """ # output work with # configure SQLAlchemy logging # log_level = self.logger.getEffectiveLevel() # logging.getLogger('sqlalchemy.engine').setLevel(log_level) exit_code = ExitCodes.EXIT_SUCCESS try: pid_lock = InterProcessLock(self.args.path_pidfile) pid_lock_gotten = pid_lock.acquire(blocking=False) if not pid_lock_gotten: raise AlreadyHarvesting(self.args.path_pidfile) self.logger.debug('Aquired PID lock {0!r}'.format( self.args.path_pidfile)) if (self.args.no_routes and self.args.no_vnetworks and not self.args.truncate): raise NothingToDo() harvesting = not (self.args.no_routes and self.args.no_vnetworks) Session = db.ScopedSession() Session.configure(bind=self.args.db_engine) db.configure_db(self.DB_PRAGMAS) # TODO(damb): Implement multithreaded harvesting using a thread # pool. try: if harvesting: self.logger.info('Start harvesting.') if not self.args.no_routes: self._harvest_routes(Session) else: self.logger.warn( 'Disabled processing <route></route> information.') if not self.args.no_vnetworks: self._harvest_vnetworks(Session) else: self.logger.warn( 'Disabled processing <vnetwork></vnetwork> ' 'information.') if harvesting: self.logger.info('Finished harvesting successfully.') if self.args.truncate: self.logger.warning('Removing outdated data.') session = Session() with db.session_guard(session) as _session: num_removed_rows = db.clean(_session, self.args.truncate) self.logger.info('Number of rows removed: {}'.format( num_removed_rows)) except OperationalError as err: raise db.StationLiteDBEngineError(err) # TODO(damb): signal handling except Error as err: self.logger.error(err) exit_code = ExitCodes.EXIT_ERROR except Exception as err: exc_type, exc_value, exc_traceback = sys.exc_info() self.logger.critical('Local Exception: %s' % err) self.logger.critical('Traceback information: ' + repr( traceback.format_exception(exc_type, exc_value, exc_traceback)) ) exit_code = ExitCodes.EXIT_ERROR finally: try: if pid_lock_gotten: pid_lock.release() except NameError: pass sys.exit(exit_code)