Exemplo n.º 1
0
def run_with_lock(remove=False):
    lock = PIDLockFile(
        getattr(
            settings, "HYPERKITTY_JOBS_UPDATE_INDEX_LOCKFILE",
            os.path.join(gettempdir(), "hyperkitty-jobs-update-index.lock")))
    try:
        lock.acquire(timeout=-1)
    except AlreadyLocked:
        if check_pid(lock.read_pid()):
            logger.warning("The job 'update_index' is already running")
            return
        else:
            lock.break_lock()
            lock.acquire(timeout=-1)
    except LockFailed as e:
        logger.warning(
            "Could not obtain a lock for the 'update_index' "
            "job (%s)", e)
        return
    try:
        update_index(remove=remove)
    except Exception as e:
        logger.exception("Failed to update the fulltext index: %s", e)
    finally:
        lock.release()
Exemplo n.º 2
0
def pull(dry_run, flavor, interactive, debug):
    """ Pull down tasks from forges and add them to your taskwarrior tasks.

    Relies on configuration in bugwarriorrc
    """

    try:
        main_section = _get_section_name(flavor)
        config = _try_load_config(main_section, interactive)

        lockfile_path = os.path.join(get_data_path(config, main_section),
                                     'bugwarrior.lockfile')
        lockfile = PIDLockFile(lockfile_path)
        lockfile.acquire(timeout=10)
        try:
            # Get all the issues.  This can take a while.
            issue_generator = aggregate_issues(config, main_section, debug)

            # Stuff them in the taskwarrior db as necessary
            synchronize(issue_generator, config, main_section, dry_run)
        finally:
            lockfile.release()
    except LockTimeout:
        log.critical('Your taskrc repository is currently locked. '
                     'Remove the file at %s if you are sure no other '
                     'bugwarrior processes are currently running.' %
                     (lockfile_path))
    except RuntimeError as e:
        log.exception("Aborted (%s)" % e)
Exemplo n.º 3
0
def pull():
    """ Pull down tasks from forges and add them to your taskwarrior tasks.

    Relies on configuration in ~/.bugwarriorrc
    """
    twiggy.quickSetup()
    try:
        # Load our config file
        config = load_config()

        tw_config = TaskWarriorBase.load_config(get_taskrc_path(config))
        lockfile_path = os.path.join(
            os.path.expanduser(tw_config['data']['location']),
            'bugwarrior.lockfile')

        lockfile = PIDLockFile(lockfile_path)
        lockfile.acquire(timeout=10)
        try:
            # Get all the issues.  This can take a while.
            issue_generator = aggregate_issues(config)

            # Stuff them in the taskwarrior db as necessary
            synchronize(issue_generator, config)
        finally:
            lockfile.release()
    except LockTimeout:
        log.name('command').critical(
            'Your taskrc repository is currently locked. '
            'Remove the file at %s if you are sure no other '
            'bugwarrior processes are currently running.' % (lockfile_path))
    except:
        log.name('command').trace('error').critical('oh noes')
Exemplo n.º 4
0
def pull(dry_run, flavor, interactive, debug):
    """ Pull down tasks from forges and add them to your taskwarrior tasks.

    Relies on configuration in bugwarriorrc
    """

    try:
        main_section = _get_section_name(flavor)
        config = _try_load_config(main_section, interactive)

        lockfile_path = os.path.join(get_data_path(config, main_section),
                                     'bugwarrior.lockfile')
        lockfile = PIDLockFile(lockfile_path)
        lockfile.acquire(timeout=10)
        try:
            # Get all the issues.  This can take a while.
            issue_generator = aggregate_issues(config, main_section, debug)

            # Stuff them in the taskwarrior db as necessary
            synchronize(issue_generator, config, main_section, dry_run)
        finally:
            lockfile.release()
    except LockTimeout:
        log.critical(
            'Your taskrc repository is currently locked. '
            'Remove the file at %s if you are sure no other '
            'bugwarrior processes are currently running.' % (
                lockfile_path
            )
        )
    except RuntimeError as e:
        log.critical("Aborted (%s)" % e)
Exemplo n.º 5
0
def pull(dry_run, flavor):
    """ Pull down tasks from forges and add them to your taskwarrior tasks.

    Relies on configuration in bugwarriorrc
    """
    twiggy.quickSetup()
    try:
        main_section = _get_section_name(flavor)

        # Load our config file
        config = load_config(main_section)

        tw_config = TaskWarriorBase.load_config(get_taskrc_path(config, main_section))
        lockfile_path = os.path.join(os.path.expanduser(tw_config["data"]["location"]), "bugwarrior.lockfile")

        lockfile = PIDLockFile(lockfile_path)
        lockfile.acquire(timeout=10)
        try:
            # Get all the issues.  This can take a while.
            issue_generator = aggregate_issues(config, main_section)

            # Stuff them in the taskwarrior db as necessary
            synchronize(issue_generator, config, main_section, dry_run)
        finally:
            lockfile.release()
    except LockTimeout:
        log.name("command").critical(
            "Your taskrc repository is currently locked. "
            "Remove the file at %s if you are sure no other "
            "bugwarrior processes are currently running." % (lockfile_path)
        )
    except:
        log.name("command").trace("error").critical("oh noes")
Exemplo n.º 6
0
def run_maestral_daemon(config_name="maestral", run=True, log_to_stdout=False):
    """
    Wraps :class:`maestral.main.Maestral` as Pyro daemon object, creates a new instance
    and start Pyro's event loop to listen for requests on a unix domain socket. This call
    will block until the event loop shuts down.

    This command will return silently if the daemon is already running.

    :param str config_name: The name of the Maestral configuration to use.
    :param bool run: If ``True``, start syncing automatically. Defaults to ``True``.
    :param bool log_to_stdout: If ``True``, write logs to stdout. Defaults to ``False``.
    """

    from maestral.main import Maestral

    sock_name = sockpath_for_config(config_name)
    pid_name = pidpath_for_config(config_name)

    lockfile = PIDLockFile(pid_name)

    # acquire PID lock file

    try:
        lockfile.acquire(timeout=1)
    except AlreadyLocked:
        if is_pidfile_stale(lockfile):
            lockfile.break_lock()
        else:
            logger.debug(f"Maestral already running")
            return

    logger.debug(f"Starting Maestral daemon on socket '{sock_name}'")

    try:
        # clean up old socket, create new one
        try:
            os.remove(sock_name)
        except FileNotFoundError:
            pass

        daemon = Daemon(unixsocket=sock_name)

        # start Maestral as Pyro server
        ExposedMaestral = expose(Maestral)
        # mark stop_sync and shutdown_daemon as oneway methods
        # so that they don't block on call
        ExposedMaestral.stop_sync = oneway(ExposedMaestral.stop_sync)
        ExposedMaestral.shutdown_pyro_daemon = oneway(
            ExposedMaestral.shutdown_pyro_daemon)
        m = ExposedMaestral(config_name, run=run, log_to_stdout=log_to_stdout)

        daemon.register(m, f"maestral.{config_name}")
        daemon.requestLoop(loopCondition=m._loop_condition)
        daemon.close()
    except Exception:
        traceback.print_exc()
    finally:
        # remove PID lock
        lockfile.release()
Exemplo n.º 7
0
 def acquire(self, timeout=None):
     owner = self.read_pid()
     if owner is not None and owner != os.getpid(
     ) and self.process_alive(owner) is False:
         log.warn(
             "Breaking lock '%s' since owning process %i is dead." %
             (self.lock_file, owner))
         self.break_lock()
     PIDLockFile.acquire(self, timeout)
Exemplo n.º 8
0
def get_lock(workdir):
    pidfile = PIDLockFile(os.path.join(workdir, 'lobster.pid'), timeout=-1)
    try:
        pidfile.acquire()
    except AlreadyLocked:
        print "Another instance of lobster is accessing {0}".format(workdir)
        raise
    pidfile.break_lock()
    return pidfile
Exemplo n.º 9
0
def main():
    serverCfg = piccolo.PiccoloServerConfig()

    # start logging
    handler = piccoloLogging(logfile=serverCfg.cfg['logging']['logfile'],
                             debug=serverCfg.cfg['logging']['debug'])
    log = logging.getLogger("piccolo.server")

    if serverCfg.cfg['daemon']['daemon']:
        import daemon
        try:
            import lockfile
        except ImportError:
            print(
                "The 'lockfile' Python module is required to run Piccolo Server. Ensure that version 0.12 or later of lockfile is installed."
            )
            sys.exit(1)
        try:
            from lockfile.pidlockfile import PIDLockFile
        except ImportError:
            print(
                "An outdated version of the 'lockfile' Python module is installed. Piccolo Server requires at least version 0.12 or later of lockfile."
            )
            sys.exit(1)
        from lockfile import AlreadyLocked, NotLocked

        # create a pid file and tidy up if required
        pidfile = PIDLockFile(serverCfg.cfg['daemon']['pid_file'], timeout=-1)
        try:
            pidfile.acquire()
        except AlreadyLocked:
            try:
                os.kill(pidfile.read_pid(), 0)
                print('Process already running!')
                exit(1)
            except OSError:  #No process with locked PID
                print('PID file exists but process is dead')
                pidfile.break_lock()
        try:
            pidfile.release()
        except NotLocked:
            pass

        pstd = open(serverCfg.cfg['daemon']['logfile'], 'w')
        with daemon.DaemonContext(pidfile=pidfile,
                                  files_preserve=[handler.stream],
                                  stderr=pstd):
            # start piccolo
            piccolo_server(serverCfg)
    else:
        # start piccolo
        piccolo_server(serverCfg)
Exemplo n.º 10
0
def get_lock(workdir, force=False):
    from lockfile.pidlockfile import PIDLockFile
    from lockfile import AlreadyLocked

    pidfile = PIDLockFile(os.path.join(workdir, 'lobster.pid'), timeout=-1)
    try:
        pidfile.acquire()
    except AlreadyLocked:
        if not force:
            logger.error("another instance of lobster is accessing {0}".format(workdir))
            raise
    pidfile.break_lock()
    return pidfile
Exemplo n.º 11
0
def get_lock(workdir, force=False):
    from lockfile.pidlockfile import PIDLockFile
    from lockfile import AlreadyLocked

    pidfile = PIDLockFile(os.path.join(workdir, 'lobster.pid'), timeout=-1)
    try:
        pidfile.acquire()
    except AlreadyLocked:
        if not force:
            logger.error(
                "another instance of lobster is accessing {0}".format(workdir))
            raise
    pidfile.break_lock()
    return pidfile
Exemplo n.º 12
0
 def wrapped_f():
   _ensure_path_exists(os.path.dirname(filename))
   lock = PIDLockFile(filename, timeout=timeout)
   try:
     lock.acquire()
   except lockfile.LockTimeout:
     if suppress_error:
       logger.info('Unable to acquire lock: %s', filename)
       # could continue, but probably safer to quit?
       os._exit(os.EX_OK)  # sys.exit raises an exception
     else:
       raise
   else:
     logger.info('Acquired lock: %s', filename)
     f()
     lock.release()
     logger.info('Released lock: %s', filename)
Exemplo n.º 13
0
        def wrapped(*args, **kwargs):
            logging.debug('Start daemon')
            if not pid_file and not force_daemon:
                if signal_map:
                    for key in signal_map.keys():
                        signal.signal(key, signal_map[key])
                logging.debug('Daemons pid: %s', os.getpid())
                f(*args, **kwargs)
                if clean:
                    clean()
                return
            if pid_file and pid_file not in ['-']:
                pid_path = os.path.abspath(pid_file)

                # clean old pids
                pidfile = PIDLockFile(pid_path, timeout=-1)
                try:
                    pidfile.acquire()
                    pidfile.release()
                except (AlreadyLocked, LockTimeout):
                    try:
                        os.kill(pidfile.read_pid(), 0)
                        logging.warn('Process already running!')
                        exit(2)
                    except OSError:  #No process with locked PID
                        pidfile.break_lock()

                pidfile = PIDLockFile(pid_path, timeout=-1)

                context = _daemon.DaemonContext(
                    pidfile=pidfile
                )
            else:
                context = _daemon.DaemonContext()

            if signal_map:
                context.signal_map = signal_map

            context.open()
            with context:
                logging.debug('Daemons pid: %s', os.getpid())
                f(*args, **kwargs)
                if clean:
                    clean()
Exemplo n.º 14
0
def putioCheck():
    """ Should probably be in a class """
    global instance 
    instance = putioDaemon()
    instance.getinputs(sys.argv[1:])
    instance.readconfig()
    instance.setuplogging()
    pidfile = PIDLockFile(instance.pidfile, timeout=-1)
    try:
        pidfile.acquire()
    except AlreadyLocked:
        try: 
            os.kill(pidfile.read_pid(),0)
            print 'Process already running!'
            exit (1)
        except OSError:
            pidfile.break_lock()
    except: 
        print "Something failed:", sys.exc_info()
        exit (1)
    logging.debug('Listen is %s', instance.listen)
    if instance.listen:
       WebServer()
    signal.signal(signal.SIGTERM,handler)
    while True:
        if os.path.exists(instance.torrentdir):
            onlyfiles = [ f for f in os.listdir(instance.torrentdir) if os.path.isfile(os.path.join(instance.torrentdir,f))] 
            if len(onlyfiles):  
                client = putio.Client(instance.token)
                for torrent in onlyfiles:
                    logging.info('working on %s', torrent) 
                    # if we are listening then use the callback_url
                    callback_url = None
                    if instance.listen:
                       callback_url = 'http://'+instance.callback+'/'+instance.httppath+'/api/'+instance.token
                    logging.info('Calling add_torrent for %s with %s',torrent,callback_url)
                    client.Transfer.add_torrent(instance.torrentdir+"/"+torrent, callback_url=callback_url)
                    os.remove(instance.torrentdir+"/"+torrent)
        time.sleep(5)
Exemplo n.º 15
0
def pull():
    """ Pull down tasks from forges and add them to your taskwarrior tasks.

    Relies on configuration in ~/.bugwarriorrc
    """
    twiggy.quickSetup()
    try:
        # Load our config file
        config = load_config()

        tw_config = TaskWarriorBase.load_config(get_taskrc_path(config))
        lockfile_path = os.path.join(
            os.path.expanduser(
                tw_config['data']['location']
            ),
            'bugwarrior.lockfile'
        )

        lockfile = PIDLockFile(lockfile_path)
        lockfile.acquire(timeout=10)
        try:
            # Get all the issues.  This can take a while.
            issue_generator = aggregate_issues(config)

            # Stuff them in the taskwarrior db as necessary
            synchronize(issue_generator, config)
        finally:
            lockfile.release()
    except LockTimeout:
        log.name('command').critical(
            'Your taskrc repository is currently locked. '
            'Remove the file at %s if you are sure no other '
            'bugwarrior processes are currently running.' % (
                lockfile_path
            )
        )
    except:
        log.name('command').trace('error').critical('oh noes')
Exemplo n.º 16
0
def run_with_lock(remove=False):
    lock = PIDLockFile(getattr(
        settings, "HYPERKITTY_JOBS_UPDATE_INDEX_LOCKFILE",
        os.path.join(gettempdir(), "hyperkitty-jobs-update-index.lock")))
    try:
        lock.acquire(timeout=-1)
    except AlreadyLocked:
        if check_pid(lock.read_pid()):
            logger.warning("The job 'update_index' is already running")
            return
        else:
            lock.break_lock()
            lock.acquire(timeout=-1)
    except LockFailed as e:
        logger.warning("Could not obtain a lock for the 'update_index' "
                       "job (%s)", e)
        return
    try:
        update_index(remove=remove)
    except Exception as e: # pylint: disable-msg=broad-except
        logger.exception("Failed to update the fulltext index: %s", e)
    finally:
        lock.release()
Exemplo n.º 17
0
def _get_lock(theargs, stage):
    """Create lock file to prevent this process from running on same data.

       This uses ``PIDLockFile`` to create a pid lock file in celppdir
       directory named celprunner.<stage>.lockpid
       If pid exists it is assumed the lock is held otherwise lock
       is broken and recreated

       :param theargs: return value from argparse and should contain
                       theargs.celppdir should be set to path
       :param stage: set to stage that is being run
       :return: ``PIDLockFile`` upon success
       :raises: LockException: If there was a problem locking
       :raises: Exception: If valid pid lock file already exists
       """
    mylockfile = os.path.join(theargs.celppdir,
                              "celpprunner." + stage + ".lockpid")
    logger.debug("Looking for lock file: " + mylockfile)
    lock = PIDLockFile(mylockfile, timeout=10)

    if lock.i_am_locking():
        logger.debug("My process id" + str(lock.read_pid()) +
                     " had the lock so I am breaking")
        lock.break_lock()
        lock.acquire(timeout=10)
        return lock

    if lock.is_locked():
        logger.debug("Lock file exists checking pid")
        if psutil.pid_exists(lock.read_pid()):
            raise Exception("celpprunner with pid " + str(lock.read_pid()) +
                            " is running")

    lock.break_lock()
    logger.info("Acquiring lock")
    lock.acquire(timeout=10)
    return lock
Exemplo n.º 18
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('interface',
                        nargs='?',
                        help='interface to configure with DHCP')
    parser.add_argument('-v',
                        '--verbose',
                        help='Set logging level to debug',
                        action='store_true')
    parser.add_argument('--version',
                        action='version',
                        help='version',
                        version='%(prog)s ' + __version__)
    parser.add_argument('-s',
                        '--delay_selecting',
                        help='Selecting starts after a ramdon delay.',
                        action='store_true')
    # options to looks like dhclient
    parser.add_argument(
        '-sf',
        metavar='script-file',
        nargs='?',
        const=SCRIPT_PATH,
        help='Path to the network configuration script invoked by '
        'dhcpcanon when it gets a lease. Without this option '
        'dhcpcanon will configure the network by itself.'
        'If unspecified, the '
        'default /sbin/dhcpcanon-script is used, which is a copy of'
        'dhclient-script(8) for a description of this file.'
        'If dhcpcanon is running with NetworkManager, it will'
        'be called with the script nm-dhcp-helper.')
    parser.add_argument(
        '-pf',
        metavar='pid-file',
        nargs='?',
        const=PID_PATH,
        help='Path to the process ID file. If unspecified, the'
        'default /var/run/dhcpcanon.pid is used. '
        'This option is used by NetworkManager to check whether '
        'dhcpcanon is already running.')
    args = parser.parse_args()
    logger.debug('args %s', args)

    # do not put interfaces in promiscuous mode
    conf.sniff_promisc = conf.promisc = 0
    conf.checkIPaddr = 1

    if args.verbose:
        logger.setLevel(logging.DEBUG)
    logger.debug('args %s', args)
    if args.interface:
        conf.iface = args.interface
    logger.debug('interface %s' % conf.iface)
    if args.pf is not None:
        # This is only needed for nm
        pf = PIDLockFile(args.pf, timeout=5)
        try:
            pf.acquire()
            logger.debug('using pid file %s', pf)
        except AlreadyLocked as e:
            pf.break_lock()
            pf.acquire()
        except (LockTimeout, LockFailed) as e:
            logger.error(e)
    dhcpcap = DHCPCAPFSM(iface=conf.iface,
                         server_port=SERVER_PORT,
                         client_port=CLIENT_PORT,
                         scriptfile=args.sf,
                         delay_selecting=args.delay_selecting)
    dhcpcap.run()
Exemplo n.º 19
0
            try:
                old = psutil.Process(old_pid)
                if os.path.basename(__file__) in old.cmdline():
                    try:
                        old.terminate()
                        try:
                            old.wait(10)
                        except psutil.TimeoutExpired:
                            old.kill()
                    except psutil.AccessDenied:
                        pass
            except psutil.NoSuchProcess:
                pass
        pidlock.break_lock()

    pidlock.acquire(timeout=10)
    application = PermalinkServer()
    http_server = tornado.httpserver.HTTPServer(application, xheaders=True)
    http_server.listen(options.port)

    def handler(signum, frame):
        tornado.ioloop.IOLoop.instance().stop()

    signal.signal(signal.SIGHUP, handler)
    signal.signal(signal.SIGINT, handler)
    signal.signal(signal.SIGTERM, handler)

    try:
        from systemd.daemon import notify
        notify('READY=1\nMAINPID={}'.format(os.getpid()), True)
    except ImportError:
Exemplo n.º 20
0
                            old.wait(10)
                        except psutil.TimeoutExpired:
                            logger.info("Trying to kill old instance.")
                            old.kill()
                    except psutil.AccessDenied:
                        logger.error("The process seems to be SageCell, but "
                                     "can not be stopped. Its command line: %s"
                                     % old.cmdline())
                else:
                    logger.info("Process does not seem to be SageCell.")
            except psutil.NoSuchProcess:
                logger.info("No such process exist anymore.")
        logger.info("Breaking old lock.")
        pidlock.break_lock()
        
    pidlock.acquire(timeout=10)
    app = SageCellServer(args.baseurl, args.dir)
    listen = {'port': args.port, 'xheaders': True}
    if args.interface is not None:
        listen['address'] = get_ip_address(args.interface)
    logger.info("Listening configuration: %s", listen)

    def handler(signum, frame):
        logger.info("Received %s, shutting down...", signum)
        app.kernel_dealer.stop()
        app.ioloop.stop()
    
    signal.signal(signal.SIGHUP, handler)
    signal.signal(signal.SIGINT, handler)
    signal.signal(signal.SIGTERM, handler)
Exemplo n.º 21
0
def run_maestral_daemon(config_name='maestral', run=True, log_to_stdout=False):
    """
    Wraps :class:`maestral.main.Maestral` as Pyro daemon object, creates a new instance
    and start Pyro's event loop to listen for requests on a unix domain socket. This call
    will block until the event loop shuts down.

    This command will return silently if the daemon is already running.

    :param str config_name: The name of the Maestral configuration to use.
    :param bool run: If ``True``, start syncing automatically. Defaults to ``True``.
    :param bool log_to_stdout: If ``True``, write logs to stdout. Defaults to ``False``.
    """
    import threading
    from maestral.main import Maestral

    sock_name = sockpath_for_config(config_name)
    pid_name = pidpath_for_config(config_name)

    lockfile = PIDLockFile(pid_name)

    if threading.current_thread() is threading.main_thread():
        signal.signal(signal.SIGTERM, _sigterm_handler)

    # acquire PID lock file

    try:
        lockfile.acquire(timeout=1)
    except (AlreadyLocked, LockTimeout):
        if is_pidfile_stale(lockfile):
            lockfile.break_lock()
        else:
            logger.debug(f'Maestral already running')
            return

    # Nice ourselves give other processes priority. We will likely only
    # have significant CPU usage in case of many concurrent downloads.
    os.nice(10)

    logger.debug(f'Starting Maestral daemon on socket "{sock_name}"')

    try:
        # clean up old socket
        try:
            os.remove(sock_name)
        except FileNotFoundError:
            pass

        daemon = Daemon(unixsocket=sock_name)

        # start Maestral as Pyro server
        ExposedMaestral = expose(Maestral)
        # mark stop_sync and shutdown_daemon as one way
        # methods so that they don't block on call
        ExposedMaestral.stop_sync = oneway(ExposedMaestral.stop_sync)
        ExposedMaestral.pause_sync = oneway(ExposedMaestral.pause_sync)
        ExposedMaestral.shutdown_pyro_daemon = oneway(
            ExposedMaestral.shutdown_pyro_daemon)
        m = ExposedMaestral(config_name, run=run, log_to_stdout=log_to_stdout)

        daemon.register(m, f'maestral.{config_name}')
        daemon.requestLoop(loopCondition=m._loop_condition)
        daemon.close()
    except Exception:
        traceback.print_exc()
    except (KeyboardInterrupt, SystemExit):
        logger.info('Received system exit')
        sys.exit(0)
    finally:
        lockfile.release()
Exemplo n.º 22
0
 def acquire(self, *args, **kwargs):
     kwargs.update({'timeout': 0})
     PIDLockFile.acquire(self, *args, **kwargs)
Exemplo n.º 23
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('interface', nargs='?',
                        help='interface to configure with DHCP')
    parser.add_argument('-v', '--verbose',
                        help='Set logging level to debug',
                        action='store_true')
    parser.add_argument('--version', action='version',
                        help='version',
                        version='%(prog)s ' + __version__)
    parser.add_argument('-s', '--delay_selecting',
                        help='Selecting starts after a ramdon delay.',
                        action='store_true')
    # options to looks like dhclient
    parser.add_argument(
        '-sf', metavar='script-file', nargs='?',
        const=SCRIPT_PATH,
        help='Path to the network configuration script invoked by '
             'dhcpcanon when it gets a lease. Without this option '
             'dhcpcanon will configure the network by itself.'
             'If unspecified, the '
             'default /sbin/dhcpcanon-script is used, which is a copy of'
             'dhclient-script(8) for a description of this file.'
             'If dhcpcanon is running with NetworkManager, it will'
             'be called with the script nm-dhcp-helper.')
    parser.add_argument(
        '-pf', metavar='pid-file', nargs='?',
        const=PID_PATH,
        help='Path to the process ID file. If unspecified, the'
             'default /var/run/dhcpcanon.pid is used. '
             'This option is used by NetworkManager to check whether '
             'dhcpcanon is already running.')
    args = parser.parse_args()
    logger.debug('args %s', args)

    # do not put interfaces in promiscuous mode
    conf.sniff_promisc = conf.promisc = 0
    conf.checkIPaddr = 1

    if args.verbose:
        logger.setLevel(logging.DEBUG)
    logger.debug('args %s', args)
    if args.interface:
        conf.iface = args.interface
    logger.debug('interface %s' % conf.iface)
    if args.pf is not None:
        # This is only needed for nm
        pf = PIDLockFile(args.pf, timeout=5)
        try:
            pf.acquire()
            logger.debug('using pid file %s', pf)
        except AlreadyLocked as e:
            pf.break_lock()
            pf.acquire()
        except (LockTimeout, LockFailed) as e:
            logger.error(e)
    dhcpcap = DHCPCAPFSM(iface=conf.iface,
                         server_port=SERVER_PORT,
                         client_port=CLIENT_PORT,
                         scriptfile=args.sf,
                         delay_selecting=args.delay_selecting)
    dhcpcap.run()