def lock_instance(): try: lock = LockFile('file_syncer.lock') except LockError: print('Can not run multiple instance of this script. Try it later! :)') exit() yield lock.close()
def send_all(block_size=500, backend=None): """ Send all non-deferred messages in the queue. A lock file is used to ensure that this process can not be started again while it is already running. The ``block_size`` argument allows for queued messages to be iterated in blocks, allowing new prioritised messages to be inserted during iteration of a large number of queued messages. """ logger.debug("Acquiring lock...") try: lock = LockFile(LOCK_PATH) except LockError: logger.debug("Lock already in place. Exiting.") return logger.debug("Lock acquired.") start_time = time.time() sent = deferred = skipped = 0 try: if constants.EMAIL_BACKEND_SUPPORT: connection = get_connection(backend=backend) else: connection = get_connection() blacklist = models.Blacklist.objects.values_list('email', flat=True) connection.open() for message in _message_queue(block_size): result = send_queued_message(message, smtp_connection=connection, blacklist=blacklist) if result == constants.RESULT_SENT: sent += 1 elif result == constants.RESULT_FAILED: deferred += 1 elif result == constants.RESULT_SKIPPED: skipped += 1 connection.close() finally: logger.debug("Releasing lock...") lock.close() logger.debug("Lock released.") logger.debug("") if sent or deferred or skipped: log = logger.warning else: log = logger.info log("%s sent, %s deferred, %s skipped." % (sent, deferred, skipped)) logger.debug("Completed in %.2f seconds." % (time.time() - start_time))
def process_conversion_queue(self): """process the queue. """ try: lock = LockFile(LOCKFILE_NAME) except LockError: return "`process_conversion_queue` is locked by another process (%r)." % ( LOCKFILE_NAME) try: return self._process_conversion_queue() finally: lock.close()
def process_conversion_queue(self): """process the queue. """ try: lock = LockFile(LOCKFILE_NAME) except LockError: return '`process_conversion_queue` is locked by another ' + \ 'process ({0}).'.format(LOCKFILE_NAME) try: return self._process_conversion_queue() finally: lock.close()
def ensure_zeo_is_running(event): """We start zeo after the application has performed the basic initialization because we cannot import opennode.oms.zodb.db until all grokkers are run in the correct order. """ if get_config().get('db', 'storage_type') != 'zeo': return log.msg("Ensuring ZEO is running", system='db') # prevent zeo starting during unit tests etc global _daemon_started if not _daemon_started: return from opennode.oms.zodb.db import get_db_dir db_dir = get_db_dir() from zc.lockfile import LockFile, LockError try: with closing(LockFile(os.path.join(db_dir, 'data.fs.lock'))): log.msg("Starting ZEO server", system='db') run_zeo(db_dir) except LockError: log.msg("ZEO is already running", system='db')
def process_conversion_queue(self): """process the queue. """ if HAS_PLONE_PROTECT: # Disabling CSRF protection alsoProvides(self.request, IDisableCSRFProtection) try: lock = LockFile(LOCKFILE_NAME) except LockError: return '`process_conversion_queue` is locked by another ' + \ 'process ({0}).'.format(LOCKFILE_NAME) try: return self._process_conversion_queue() finally: lock.close()
class LockTest(TestCase): """ Tests for Django Mailer trying to send mail when the lock is already in place. """ def setUp(self): # Create somewhere to store the log debug output. self.output = StringIO() # Create a log handler which can capture the log debug output. self.handler = logging.StreamHandler(self.output) self.handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(message)s') self.handler.setFormatter(formatter) # Add the log handler. logger = logging.getLogger('django_mailer') logger.addHandler(self.handler) # Use a test lock-file name in case something goes wrong, then emulate # that the lock file has already been acquired by another process. self.original_lock_path = engine.LOCK_PATH engine.LOCK_PATH += '.mailer-test' self.lock = LockFile(engine.LOCK_PATH) def tearDown(self): # Remove the log handler. logger = logging.getLogger('django_mailer') logger.removeHandler(self.handler) # Revert the lock file unique name engine.LOCK_PATH = self.original_lock_path self.lock.close() def test_locked(self): # Acquire the lock so that send_all will fail. engine.send_all() self.output.seek(0) self.assertEqual(self.output.readlines()[-1].strip(), 'Lock already in place. Exiting.')
def _do_base_backup(config): lock = LockFile(config['lock_file']) conn = connect("") cur = conn.cursor() label = datetime.now().strftime("%Y%m%d%H%M%S") cur.execute("SELECT pg_start_backup('%s');" % label) fi,file_name = mkstemp() fp = fdopen(fi,'w') tar_f = tarfile.open(fileobj=fp,mode='w:gz') tar_f.add(config['data_dir'],arcname="",exclude=lambda x: '/pg_xlog' in x) tar_f.close() fp.close() conn = connect_s3(config['access_key'], config['secret_key']) bucket = conn.get_bucket(config['bucket']) key = bucket.new_key(config['prefix']+"base_"+label+".tar.gz") key.set_contents_from_filename(file_name) unlink(file_name) cur.execute("SELECT pg_stop_backup();") lock.close()
def setUp(self): # Create somewhere to store the log debug output. self.output = StringIO() # Create a log handler which can capture the log debug output. self.handler = logging.StreamHandler(self.output) self.handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(message)s') self.handler.setFormatter(formatter) # Add the log handler. logger = logging.getLogger('django_mailer') logger.addHandler(self.handler) # Use a test lock-file name in case something goes wrong, then emulate # that the lock file has already been acquired by another process. self.original_lock_path = engine.LOCK_PATH engine.LOCK_PATH += '.mailer-test' self.lock = LockFile(engine.LOCK_PATH)
async def start_can(self): """Starts the JaegerCAN interface.""" use_lock = config["fps"]["use_lock"] if use_lock and self.pid_lock is None: try: if not os.path.exists(os.path.dirname(LOCK_FILE)): os.makedirs(os.path.dirname(LOCK_FILE)) self.pid_lock = LockFile(LOCK_FILE) except Exception: raise JaegerError( f"Failed creating lock file {LOCK_FILE}. " "Probably another instance is running. " "If that is not the case, remove the lock file and retry.") if isinstance(self.can, JaegerCAN): await self.can.start() return self.can = await JaegerCAN.create(self.can, fps=self) return True
QtGui.QGuiApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling) MiscUtils.configure_logging() log_queue = Manager().Queue() logger_thread = threading.Thread(target=MiscUtils.logger_thread_exec, args=(log_queue, )) logger_thread.start() app = QtWidgets.QApplication(sys.argv) app.setWindowIcon(QtGui.QIcon(MiscUtils.get_app_icon_path())) app.setApplicationDisplayName( "Batch Media Compressor") # TODO test + add org / ver app.setQuitOnLastWindowClosed(False) try: lock = LockFile(MiscUtils.get_lock_file_path()) tray_icon = TrayIcon(log_queue) tray_icon.show() return_code = app.exec_() tray_icon.cleanup() lock.close() except LockError: error_msg = "Cannot acquire lock on file {}.\n\nAnother instance of the application is probably running.".format( MiscUtils.get_lock_file_path()) logging.fatal(error_msg) QtWidgets.QMessageBox.critical(None, "Fatal Error", error_msg, QtWidgets.QMessageBox.Ok) return_code = -1 logging.info("Application is being shutdown") log_queue.put(None)