def pull(dry_run, flavor): """ Pull down tasks from forges and add them to your taskwarrior tasks. Relies on configuration in bugwarriorrc """ twiggy.quickSetup() try: main_section = _get_section_name(flavor) # Load our config file config = load_config(main_section) tw_config = TaskWarriorBase.load_config(get_taskrc_path(config, main_section)) lockfile_path = os.path.join(os.path.expanduser(tw_config["data"]["location"]), "bugwarrior.lockfile") lockfile = PIDLockFile(lockfile_path) lockfile.acquire(timeout=10) try: # Get all the issues. This can take a while. issue_generator = aggregate_issues(config, main_section) # Stuff them in the taskwarrior db as necessary synchronize(issue_generator, config, main_section, dry_run) finally: lockfile.release() except LockTimeout: log.name("command").critical( "Your taskrc repository is currently locked. " "Remove the file at %s if you are sure no other " "bugwarrior processes are currently running." % (lockfile_path) ) except: log.name("command").trace("error").critical("oh noes")
def pull(dry_run, flavor, interactive, debug): """ Pull down tasks from forges and add them to your taskwarrior tasks. Relies on configuration in bugwarriorrc """ try: main_section = _get_section_name(flavor) config = _try_load_config(main_section, interactive) lockfile_path = os.path.join(get_data_path(config, main_section), 'bugwarrior.lockfile') lockfile = PIDLockFile(lockfile_path) lockfile.acquire(timeout=10) try: # Get all the issues. This can take a while. issue_generator = aggregate_issues(config, main_section, debug) # Stuff them in the taskwarrior db as necessary synchronize(issue_generator, config, main_section, dry_run) finally: lockfile.release() except LockTimeout: log.critical( 'Your taskrc repository is currently locked. ' 'Remove the file at %s if you are sure no other ' 'bugwarrior processes are currently running.' % ( lockfile_path ) ) except RuntimeError as e: log.critical("Aborted (%s)" % e)
def pull(): """ Pull down tasks from forges and add them to your taskwarrior tasks. Relies on configuration in ~/.bugwarriorrc """ twiggy.quickSetup() try: # Load our config file config = load_config() tw_config = TaskWarriorBase.load_config(get_taskrc_path(config)) lockfile_path = os.path.join( os.path.expanduser(tw_config['data']['location']), 'bugwarrior.lockfile') lockfile = PIDLockFile(lockfile_path) lockfile.acquire(timeout=10) try: # Get all the issues. This can take a while. issue_generator = aggregate_issues(config) # Stuff them in the taskwarrior db as necessary synchronize(issue_generator, config) finally: lockfile.release() except LockTimeout: log.name('command').critical( 'Your taskrc repository is currently locked. ' 'Remove the file at %s if you are sure no other ' 'bugwarrior processes are currently running.' % (lockfile_path)) except: log.name('command').trace('error').critical('oh noes')
def pull(dry_run, flavor, interactive, debug): """ Pull down tasks from forges and add them to your taskwarrior tasks. Relies on configuration in bugwarriorrc """ try: main_section = _get_section_name(flavor) config = _try_load_config(main_section, interactive) lockfile_path = os.path.join(get_data_path(config, main_section), 'bugwarrior.lockfile') lockfile = PIDLockFile(lockfile_path) lockfile.acquire(timeout=10) try: # Get all the issues. This can take a while. issue_generator = aggregate_issues(config, main_section, debug) # Stuff them in the taskwarrior db as necessary synchronize(issue_generator, config, main_section, dry_run) finally: lockfile.release() except LockTimeout: log.critical('Your taskrc repository is currently locked. ' 'Remove the file at %s if you are sure no other ' 'bugwarrior processes are currently running.' % (lockfile_path)) except RuntimeError as e: log.exception("Aborted (%s)" % e)
def run_with_lock(remove=False): lock = PIDLockFile( getattr( settings, "HYPERKITTY_JOBS_UPDATE_INDEX_LOCKFILE", os.path.join(gettempdir(), "hyperkitty-jobs-update-index.lock"))) try: lock.acquire(timeout=-1) except AlreadyLocked: if check_pid(lock.read_pid()): logger.warning("The job 'update_index' is already running") return else: lock.break_lock() lock.acquire(timeout=-1) except LockFailed as e: logger.warning( "Could not obtain a lock for the 'update_index' " "job (%s)", e) return try: update_index(remove=remove) except Exception as e: logger.exception("Failed to update the fulltext index: %s", e) finally: lock.release()
def run_maestral_daemon(config_name="maestral", run=True, log_to_stdout=False): """ Wraps :class:`maestral.main.Maestral` as Pyro daemon object, creates a new instance and start Pyro's event loop to listen for requests on a unix domain socket. This call will block until the event loop shuts down. This command will return silently if the daemon is already running. :param str config_name: The name of the Maestral configuration to use. :param bool run: If ``True``, start syncing automatically. Defaults to ``True``. :param bool log_to_stdout: If ``True``, write logs to stdout. Defaults to ``False``. """ from maestral.main import Maestral sock_name = sockpath_for_config(config_name) pid_name = pidpath_for_config(config_name) lockfile = PIDLockFile(pid_name) # acquire PID lock file try: lockfile.acquire(timeout=1) except AlreadyLocked: if is_pidfile_stale(lockfile): lockfile.break_lock() else: logger.debug(f"Maestral already running") return logger.debug(f"Starting Maestral daemon on socket '{sock_name}'") try: # clean up old socket, create new one try: os.remove(sock_name) except FileNotFoundError: pass daemon = Daemon(unixsocket=sock_name) # start Maestral as Pyro server ExposedMaestral = expose(Maestral) # mark stop_sync and shutdown_daemon as oneway methods # so that they don't block on call ExposedMaestral.stop_sync = oneway(ExposedMaestral.stop_sync) ExposedMaestral.shutdown_pyro_daemon = oneway( ExposedMaestral.shutdown_pyro_daemon) m = ExposedMaestral(config_name, run=run, log_to_stdout=log_to_stdout) daemon.register(m, f"maestral.{config_name}") daemon.requestLoop(loopCondition=m._loop_condition) daemon.close() except Exception: traceback.print_exc() finally: # remove PID lock lockfile.release()
def main(): serverCfg = piccolo.PiccoloServerConfig() # start logging handler = piccoloLogging(logfile=serverCfg.cfg['logging']['logfile'], debug=serverCfg.cfg['logging']['debug']) log = logging.getLogger("piccolo.server") if serverCfg.cfg['daemon']['daemon']: import daemon try: import lockfile except ImportError: print( "The 'lockfile' Python module is required to run Piccolo Server. Ensure that version 0.12 or later of lockfile is installed." ) sys.exit(1) try: from lockfile.pidlockfile import PIDLockFile except ImportError: print( "An outdated version of the 'lockfile' Python module is installed. Piccolo Server requires at least version 0.12 or later of lockfile." ) sys.exit(1) from lockfile import AlreadyLocked, NotLocked # create a pid file and tidy up if required pidfile = PIDLockFile(serverCfg.cfg['daemon']['pid_file'], timeout=-1) try: pidfile.acquire() except AlreadyLocked: try: os.kill(pidfile.read_pid(), 0) print('Process already running!') exit(1) except OSError: #No process with locked PID print('PID file exists but process is dead') pidfile.break_lock() try: pidfile.release() except NotLocked: pass pstd = open(serverCfg.cfg['daemon']['logfile'], 'w') with daemon.DaemonContext(pidfile=pidfile, files_preserve=[handler.stream], stderr=pstd): # start piccolo piccolo_server(serverCfg) else: # start piccolo piccolo_server(serverCfg)
def wrapped_f(): _ensure_path_exists(os.path.dirname(filename)) lock = PIDLockFile(filename, timeout=timeout) try: lock.acquire() except lockfile.LockTimeout: if suppress_error: logger.info('Unable to acquire lock: %s', filename) # could continue, but probably safer to quit? os._exit(os.EX_OK) # sys.exit raises an exception else: raise else: logger.info('Acquired lock: %s', filename) f() lock.release() logger.info('Released lock: %s', filename)
def wrapped(*args, **kwargs): logging.debug('Start daemon') if not pid_file and not force_daemon: if signal_map: for key in signal_map.keys(): signal.signal(key, signal_map[key]) logging.debug('Daemons pid: %s', os.getpid()) f(*args, **kwargs) if clean: clean() return if pid_file and pid_file not in ['-']: pid_path = os.path.abspath(pid_file) # clean old pids pidfile = PIDLockFile(pid_path, timeout=-1) try: pidfile.acquire() pidfile.release() except (AlreadyLocked, LockTimeout): try: os.kill(pidfile.read_pid(), 0) logging.warn('Process already running!') exit(2) except OSError: #No process with locked PID pidfile.break_lock() pidfile = PIDLockFile(pid_path, timeout=-1) context = _daemon.DaemonContext( pidfile=pidfile ) else: context = _daemon.DaemonContext() if signal_map: context.signal_map = signal_map context.open() with context: logging.debug('Daemons pid: %s', os.getpid()) f(*args, **kwargs) if clean: clean()
def pull(): """ Pull down tasks from forges and add them to your taskwarrior tasks. Relies on configuration in ~/.bugwarriorrc """ twiggy.quickSetup() try: # Load our config file config = load_config() tw_config = TaskWarriorBase.load_config(get_taskrc_path(config)) lockfile_path = os.path.join( os.path.expanduser( tw_config['data']['location'] ), 'bugwarrior.lockfile' ) lockfile = PIDLockFile(lockfile_path) lockfile.acquire(timeout=10) try: # Get all the issues. This can take a while. issue_generator = aggregate_issues(config) # Stuff them in the taskwarrior db as necessary synchronize(issue_generator, config) finally: lockfile.release() except LockTimeout: log.name('command').critical( 'Your taskrc repository is currently locked. ' 'Remove the file at %s if you are sure no other ' 'bugwarrior processes are currently running.' % ( lockfile_path ) ) except: log.name('command').trace('error').critical('oh noes')
def run_with_lock(remove=False): lock = PIDLockFile(getattr( settings, "HYPERKITTY_JOBS_UPDATE_INDEX_LOCKFILE", os.path.join(gettempdir(), "hyperkitty-jobs-update-index.lock"))) try: lock.acquire(timeout=-1) except AlreadyLocked: if check_pid(lock.read_pid()): logger.warning("The job 'update_index' is already running") return else: lock.break_lock() lock.acquire(timeout=-1) except LockFailed as e: logger.warning("Could not obtain a lock for the 'update_index' " "job (%s)", e) return try: update_index(remove=remove) except Exception as e: # pylint: disable-msg=broad-except logger.exception("Failed to update the fulltext index: %s", e) finally: lock.release()
try: old.wait(10) except psutil.TimeoutExpired: old.kill() except psutil.AccessDenied: pass except psutil.NoSuchProcess: pass pidlock.break_lock() pidlock.acquire(timeout=10) application = PermalinkServer() http_server = tornado.httpserver.HTTPServer(application, xheaders=True) http_server.listen(options.port) def handler(signum, frame): tornado.ioloop.IOLoop.instance().stop() signal.signal(signal.SIGHUP, handler) signal.signal(signal.SIGINT, handler) signal.signal(signal.SIGTERM, handler) try: from systemd.daemon import notify notify('READY=1\nMAINPID={}'.format(os.getpid()), True) except ImportError: pass tornado.ioloop.IOLoop.instance().start() pidlock.release()
logger.error("The process seems to be SageCell, but " "can not be stopped. Its command line: %s" % old.cmdline()) else: logger.info("Process does not seem to be SageCell.") except psutil.NoSuchProcess: logger.info("No such process exist anymore.") logger.info("Breaking old lock.") pidlock.break_lock() pidlock.acquire(timeout=10) app = SageCellServer(args.baseurl, args.dir) listen = {'port': args.port, 'xheaders': True} if args.interface is not None: listen['address'] = get_ip_address(args.interface) logger.info("Listening configuration: %s", listen) def handler(signum, frame): logger.info("Received %s, shutting down...", signum) app.kernel_dealer.stop() app.ioloop.stop() signal.signal(signal.SIGHUP, handler) signal.signal(signal.SIGINT, handler) signal.signal(signal.SIGTERM, handler) app.listen(**listen) app.ioloop.start() pidlock.release() logger.info('SageCell server stopped')
def run_maestral_daemon(config_name='maestral', run=True, log_to_stdout=False): """ Wraps :class:`maestral.main.Maestral` as Pyro daemon object, creates a new instance and start Pyro's event loop to listen for requests on a unix domain socket. This call will block until the event loop shuts down. This command will return silently if the daemon is already running. :param str config_name: The name of the Maestral configuration to use. :param bool run: If ``True``, start syncing automatically. Defaults to ``True``. :param bool log_to_stdout: If ``True``, write logs to stdout. Defaults to ``False``. """ import threading from maestral.main import Maestral sock_name = sockpath_for_config(config_name) pid_name = pidpath_for_config(config_name) lockfile = PIDLockFile(pid_name) if threading.current_thread() is threading.main_thread(): signal.signal(signal.SIGTERM, _sigterm_handler) # acquire PID lock file try: lockfile.acquire(timeout=1) except (AlreadyLocked, LockTimeout): if is_pidfile_stale(lockfile): lockfile.break_lock() else: logger.debug(f'Maestral already running') return # Nice ourselves give other processes priority. We will likely only # have significant CPU usage in case of many concurrent downloads. os.nice(10) logger.debug(f'Starting Maestral daemon on socket "{sock_name}"') try: # clean up old socket try: os.remove(sock_name) except FileNotFoundError: pass daemon = Daemon(unixsocket=sock_name) # start Maestral as Pyro server ExposedMaestral = expose(Maestral) # mark stop_sync and shutdown_daemon as one way # methods so that they don't block on call ExposedMaestral.stop_sync = oneway(ExposedMaestral.stop_sync) ExposedMaestral.pause_sync = oneway(ExposedMaestral.pause_sync) ExposedMaestral.shutdown_pyro_daemon = oneway( ExposedMaestral.shutdown_pyro_daemon) m = ExposedMaestral(config_name, run=run, log_to_stdout=log_to_stdout) daemon.register(m, f'maestral.{config_name}') daemon.requestLoop(loopCondition=m._loop_condition) daemon.close() except Exception: traceback.print_exc() except (KeyboardInterrupt, SystemExit): logger.info('Received system exit') sys.exit(0) finally: lockfile.release()