コード例 #1
0
def main():
    p = create_arg_parser()
    args = p.parse_args()
    conf = config.get_config(args)
    config.configure_logging(args, conf)
    def_args = [args, conf]
    def_kwargs = {}
    known_commands = {
        'parse': {
            'f': parse.main,
            'a': def_args,
            'kw': def_kwargs
        },
        'plot': {
            'f': plot.main,
            'a': def_args,
            'kw': def_kwargs
        },
        'simulate': {
            'f': simulate.main,
            'a': def_args,
            'kw': def_kwargs
        },
        'statistics': {
            'f': statistics.main,
            'a': def_args,
            'kw': def_kwargs
        },
    }
    if args.command not in known_commands:
        p.print_help()
        return
    rand.init(args.seed)
    c = known_commands[args.command]
    exit(c['f'](*c['a'], **c['kw']))
コード例 #2
0
def main() -> None:
    argparser = make_argparser()
    args_ = argparser.parse_args()

    log_level = LOG_LEVELS.get(args_.verbose, logging.DEBUG)
    config.configure_logging(log_level)

    logger.debug(f"Parsed CLI args: {extractns(args_)}")

    if not hasattr(args_, "request"):
        argparser.print_help()
        sys.exit()

    args = merge_config(args_, USERCFG)
    REQUEST_HANDLERS[args["request"]](args)
コード例 #3
0
ファイル: cli.py プロジェクト: AlirezaErshadi/axrelay
def parse_config(argv, optparser, require_config=True):
    from ConfigParser import RawConfigParser

    opts, args = optparser.parse_args(argv)

    # Setup logging.
    logging_opts = {
        'version': 1,
        'disable_existing_loggers': False,

        'handlers': {
            'console': {
                'class': 'logging.StreamHandler',
                'formatter': 'default',
            },
        },

        'root': {
            'level': opts.loglevel,
            'handlers': ['console', ]
        },

        'formatters': {
            'default': {
                'format': '%(asctime)s %(levelname)-8s %(message)s',
            }
        }
    }
    if (opts.logfile is not None):
        logging_opts['handlers']['file'] = {
            'class': 'logging.handlers.WatchedFileHandler',
            'filename': opts.logfile,
            'formatter': 'default'
        }
        logging_opts['root']['handlers'].append('file')

    configure_logging(logging_opts)

    config = RawConfigParser()
    if not config.read(opts.config_file) and require_config:
        msg = "Could not read config file %s" % opts.config_file
        log.error(msg)
        sys.exit(msg)

    return opts, args, config
コード例 #4
0
def main():
    p = create_arg_parser()
    args = p.parse_args()
    conf = config.get_config(args)
    config.configure_logging(args, conf)
    def_args = [args, conf]
    def_kwargs = {}
    known_commands = {
        'client': {
            'f': client.main,
            'a': def_args,
            'kw': def_kwargs
        },
        'server': {
            'f': server.main,
            'a': def_args,
            'kw': def_kwargs
        },
    }
    if args.command not in known_commands:
        p.print_help()
        return
    c = known_commands[args.command]
    exit(c['f'](*c['a'], **c['kw']))
コード例 #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-c',
        '--config',
        type=pathlib.Path,
        help='Load this configuration file instead of the default files.')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Show configuration before starting, '
                        'and asyncio task status at shutdown.')
    parser.add_argument('-V',
                        '--version',
                        action='store_true',
                        help='Show the version of Flamenco Worker and stops.')
    parser.add_argument(
        '-r',
        '--reregister',
        action='store_true',
        help="Erases authentication information and re-registers this worker "
        "at the Manager. WARNING: this can cause duplicate worker information "
        "in the Manager's database.")
    parser.add_argument(
        '-d',
        '--debug',
        action='store_true',
        help="Enables debug logging for Flamenco Worker's own log entries. "
        "Edit the logging config in flamenco-worker.cfg "
        "for more powerful options.")
    parser.add_argument(
        '-t',
        '--test',
        action='store_true',
        help="Starts up in testing mode, in which only a handful of "
        "test-specific task types are accepted. This overrides the task_types "
        "in the configuration file.")
    parser.add_argument('-1',
                        '--single',
                        action='store_true',
                        help="Runs a single tasks, then exits.")
    args = parser.parse_args()

    if args.version:
        from . import __version__
        print(__version__)
        raise SystemExit()

    # Load configuration
    from . import config
    confparser = config.load_config(args.config, args.verbose, args.test)
    config.configure_logging(confparser, enable_debug=args.debug)

    log = logging.getLogger(__name__)
    log.debug('Starting, pid=%d', os.getpid())

    log_startup()

    if args.test:
        log.warning('Test mode enabled, overriding task_types=%r',
                    confparser.value('task_types'))

    if args.reregister:
        log.warning(
            'Erasing worker_id and worker_secret so we can attempt re-registration.'
        )
        confparser.erase('worker_id')
        confparser.erase('worker_secret')

    if args.single:
        log.info(
            'Running in single-task mode, will stop after performing one task.'
        )

    # Find the Manager using UPnP/SSDP if we have no manager_url.
    if not confparser.value('manager_url'):
        from . import ssdp_discover

        try:
            manager_url = ssdp_discover.find_flamenco_manager()
        except ssdp_discover.DiscoveryFailed:
            log.fatal('Unable to find Flamenco Manager via UPnP/SSDP.')
            raise SystemExit(1)

        log.info('Found Flamenco Manager at %s', manager_url)
        confparser.setvalue('manager_url', manager_url)

    # Patch AsyncIO
    from . import patch_asyncio
    patch_asyncio.patch_asyncio()

    # Construct the AsyncIO loop
    loop = construct_asyncio_loop()
    if args.verbose:
        log.debug('Enabling AsyncIO debugging')
        loop.set_debug(True)
    shutdown_future = loop.create_future()

    # Piece all the components together.
    from . import runner, worker, upstream, upstream_update_queue, may_i_run, __version__

    fmanager = upstream.FlamencoManager(
        manager_url=confparser.value('manager_url'),
        flamenco_worker_version=__version__,
    )

    tuqueue = upstream_update_queue.TaskUpdateQueue(
        db_fname=confparser.value('task_update_queue_db'),
        manager=fmanager,
        shutdown_future=shutdown_future,
    )
    trunner = runner.TaskRunner(
        shutdown_future=shutdown_future,
        subprocess_pid_file=confparser.value('subprocess_pid_file'),
    )

    pretask_check_params = parse_pretask_check_config(confparser, log)

    fworker = worker.FlamencoWorker(
        manager=fmanager,
        trunner=trunner,
        tuqueue=tuqueue,
        task_types=confparser.value('task_types').split(),
        worker_id=confparser.value('worker_id'),
        worker_secret=confparser.value('worker_secret'),
        worker_registration_secret=confparser.value(
            'worker_registration_secret'),
        loop=loop,
        shutdown_future=shutdown_future,
        push_log_max_interval=confparser.interval_secs(
            'push_log_max_interval_seconds'),
        push_log_max_entries=confparser.value('push_log_max_entries', int),
        push_act_max_interval=confparser.interval_secs(
            'push_act_max_interval_seconds'),
        initial_state='testing' if args.test else 'awake',
        run_single_task=args.single,
        pretask_check_params=pretask_check_params,
    )

    mir = may_i_run.MayIRun(
        manager=fmanager,
        worker=fworker,
        poll_interval=confparser.interval_secs('may_i_run_interval_seconds'),
        loop=loop,
    )

    def shutdown(signum, stackframe):
        """Perform a clean shutdown."""

        # Raise an exception, so that the exception is bubbled upwards, until
        # the asyncio loop stops executing the current task. Only then can we
        # run things like loop.run_until_complete(mir_work_task).
        log.warning('Shutting down due to signal %i', signum)
        raise KeyboardInterrupt()

    def sleep(signum, stackframe):
        log.warning('Going asleep due to signal %i', signum)
        fworker.go_to_state_asleep()

    def wakeup(signum, stackframe):
        log.warning('Waking up due to signal %i', signum)
        fworker.go_to_state_awake()

    # Shut down cleanly upon TERM signal
    import signal
    signal.signal(signal.SIGTERM, shutdown)
    signal.signal(signal.SIGINT, shutdown)

    if hasattr(signal, 'SIGUSR1'):
        # Windows doesn't have USR1/2 signals.
        signal.signal(signal.SIGUSR1, sleep)
        signal.signal(signal.SIGUSR2, wakeup)

    if hasattr(signal, 'SIGPOLL'):
        # Not sure how cross-platform SIGPOLL is.
        signal.signal(signal.SIGPOLL, asyncio_report_tasks)

    # Start asynchronous tasks.
    asyncio.ensure_future(tuqueue.work(loop=loop))
    mir_work_task = asyncio.ensure_future(mir.work())

    def do_clean_shutdown():
        shutdown_future.cancel()
        mir_work_task.cancel()
        try:
            loop.run_until_complete(asyncio.wait_for(mir_work_task, 5))
        except requests.exceptions.ConnectionError:
            log.warning(
                "Unable to connect to HTTP server, but that's fine as we're shutting down."
            )
        except asyncio.TimeoutError:
            log.debug("Timeout waiting for may-I-run task, "
                      "but that's fine as we're shutting down.")
        except KeyboardInterrupt:
            log.info(
                'Keyboard interrupt while shutting down, ignoring as we are shutting down.'
            )

        fworker.shutdown()

        async def stop_loop():
            log.info('Waiting to give tasks the time to stop gracefully')
            await asyncio.sleep(1)
            loop.stop()

        loop.run_until_complete(stop_loop())

    try:
        loop.run_until_complete(fworker.startup())
        fworker.mainloop()
    except worker.UnableToRegisterError:
        # The worker will have logged something, we'll just shut down cleanly.
        pass
    except KeyboardInterrupt:
        do_clean_shutdown()
    except:
        log.exception('Uncaught exception!')
    else:
        do_clean_shutdown()

    # Report on the asyncio task status
    if args.verbose:
        asyncio_report_tasks()

    log.warning('Closing asyncio loop')
    loop.close()
    log.warning('Flamenco Worker is shut down')