Esempio n. 1
0
def worker(args):
    env = os.environ.copy()
    env['AIRFLOW_HOME'] = settings.AIRFLOW_HOME

    # Celery worker
    from airflow.executors.celery_executor import app as celery_app
    from celery.bin import worker

    worker = worker.worker(app=celery_app)
    options = {
        'optimization': 'fair',
        'O': 'fair',
        'queues': args.queues,
        'concurrency': args.concurrency,
    }

    if args.daemon:
        pid, stdout, stderr, log_file = setup_locations(
            "worker", args.pid, args.stdout, args.stderr, args.log_file)
        handle = setup_logging(log_file)
        stdout = open(stdout, 'w+')
        stderr = open(stderr, 'w+')

        ctx = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pid, -1),
            files_preserve=[handle],
            stdout=stdout,
            stderr=stderr,
        )
        with ctx:
            sp = subprocess.Popen(['airflow', 'serve_logs'], env=env)
            worker.run(**options)
            sp.kill()

        stdout.close()
        stderr.close()
    else:
        signal.signal(signal.SIGINT, sigint_handler)
        signal.signal(signal.SIGTERM, sigint_handler)

        sp = subprocess.Popen(['airflow', 'serve_logs'], env=env)

        worker.run(**options)
        sp.kill()
Esempio n. 2
0
def flower(args):
    """Starts Flower, Celery monitoring tool"""
    broker = conf.get('celery', 'BROKER_URL')
    address = '--address={}'.format(args.hostname)
    port = '--port={}'.format(args.port)
    api = ''  # pylint: disable=redefined-outer-name
    if args.broker_api:
        api = '--broker_api=' + args.broker_api

    url_prefix = ''
    if args.url_prefix:
        url_prefix = '--url-prefix=' + args.url_prefix

    basic_auth = ''
    if args.basic_auth:
        basic_auth = '--basic_auth=' + args.basic_auth

    flower_conf = ''
    if args.flower_conf:
        flower_conf = '--conf=' + args.flower_conf

    if args.daemon:
        pid, stdout, stderr, _ = setup_locations("flower", args.pid, args.stdout, args.stderr, args.log_file)
        stdout = open(stdout, 'w+')
        stderr = open(stderr, 'w+')

        ctx = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pid, -1),
            stdout=stdout,
            stderr=stderr,
        )

        with ctx:
            os.execvp("flower", ['flower', '-b',
                                 broker, address, port, api, flower_conf, url_prefix, basic_auth])

        stdout.close()
        stderr.close()
    else:
        signal.signal(signal.SIGINT, sigint_handler)
        signal.signal(signal.SIGTERM, sigint_handler)

        os.execvp("flower", ['flower', '-b',
                             broker, address, port, api, flower_conf, url_prefix, basic_auth])
Esempio n. 3
0
    def __init__(self):
        NetworkCore.__init__(self)

        #: The :class:`Bcfg2.Server.SSLServer.XMLRPCServer` instance
        #: powering this server core
        self.server = None

        daemon_args = dict(uid=Bcfg2.Options.setup.daemon_uid,
                           gid=Bcfg2.Options.setup.daemon_gid,
                           umask=int(Bcfg2.Options.setup.umask, 8),
                           detach_process=True,
                           files_preserve=self._logfilehandles())
        if Bcfg2.Options.setup.daemon:
            daemon_args['pidfile'] = TimeoutPIDLockFile(
                Bcfg2.Options.setup.daemon, acquire_timeout=5)
        #: The :class:`daemon.DaemonContext` used to drop
        #: privileges, write the PID file (with :class:`PidFile`),
        #: and daemonize this core.
        self.context = daemon.DaemonContext(**daemon_args)
Esempio n. 4
0
def flower(args):
    """Starts Flower, Celery monitoring tool"""
    options = [
        "flower",
        conf.get('celery', 'BROKER_URL'),
        f"--address={args.hostname}",
        f"--port={args.port}",
    ]

    if args.broker_api:
        options.append(f"--broker-api={args.broker_api}")

    if args.url_prefix:
        options.append(f"--url-prefix={args.url_prefix}")

    if args.basic_auth:
        options.append(f"--basic-auth={args.basic_auth}")

    if args.flower_conf:
        options.append(f"--conf={args.flower_conf}")

    if args.daemon:
        pidfile, stdout, stderr, _ = setup_locations(
            process="flower",
            pid=args.pid,
            stdout=args.stdout,
            stderr=args.stderr,
            log=args.log_file,
        )
        with open(stdout, "a") as stdout, open(stderr, "a") as stderr:
            stdout.truncate(0)
            stderr.truncate(0)

            ctx = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pidfile, -1),
                stdout=stdout,
                stderr=stderr,
                umask=int(settings.DAEMON_UMASK, 8),
            )
            with ctx:
                celery_app.start(options)
    else:
        celery_app.start(options)
Esempio n. 5
0
def kerberos(args):
    """Start a kerberos ticket renewer"""
    print(settings.HEADER)

    if args.daemon:
        pid, stdout, stderr, _ = setup_locations(
            "kerberos", args.pid, args.stdout, args.stderr, args.log_file
        )
        with open(stdout, 'w+') as stdout_handle, open(stderr, 'w+') as stderr_handle:
            ctx = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pid, -1),
                stdout=stdout_handle,
                stderr=stderr_handle,
            )

            with ctx:
                krb.run(principal=args.principal, keytab=args.keytab)
    else:
        krb.run(principal=args.principal, keytab=args.keytab)
def scheduler(args):
    """Starts Airflow Scheduler"""
    print(settings.HEADER)
    job = SchedulerJob(
        subdir=process_subdir(args.subdir),
        num_runs=args.num_runs,
        do_pickle=args.do_pickle,
    )
    scheduler_name = SchedulerFactory.get_scheduler_name()
    if scheduler_name == SchedulerFactory.DEFAULT_SCHEDULER:
        pass
    elif scheduler_name == SchedulerFactory.EVENT_BASED_SCHEDULER:
        job = EventBasedSchedulerJob(dag_directory=process_subdir(args.subdir),
                                     server_uri=args.server_uri)
    else:
        scheduler_class = SchedulerFactory.get_default_scheduler()
        job = scheduler_class()

    if args.daemon:
        pid, stdout, stderr, log_file = setup_locations(
            "scheduler", args.pid, args.stdout, args.stderr, args.log_file)
        handle = setup_logging(log_file)
        stdout = open(stdout, 'w+')
        stderr = open(stderr, 'w+')

        ctx = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pid, -1),
            files_preserve=[handle],
            stdout=stdout,
            stderr=stderr,
        )
        with ctx:
            job.run()

        stdout.close()
        stderr.close()
    else:
        signal.signal(signal.SIGINT, sigint_handler)
        signal.signal(signal.SIGTERM, sigint_handler)
        signal.signal(signal.SIGQUIT, sigquit_handler)
        job.run()
def flower(args):
    """Starts Flower, Celery monitoring tool"""
    options = [
        conf.get('celery', 'BROKER_URL'),
        f"--address={args.hostname}",
        f"--port={args.port}",
    ]

    if args.broker_api:
        options.append(f"--broker-api={args.broker_api}")

    if args.url_prefix:
        options.append(f"--url-prefix={args.url_prefix}")

    if args.basic_auth:
        options.append(f"--basic-auth={args.basic_auth}")

    if args.flower_conf:
        options.append(f"--conf={args.flower_conf}")

    flower_cmd = FlowerCommand()

    if args.daemon:
        pidfile, stdout, stderr, _ = setup_locations(
            process="flower",
            pid=args.pid,
            stdout=args.stdout,
            stderr=args.stderr,
            log=args.log_file,
        )
        with open(stdout, "w+") as stdout, open(stderr, "w+") as stderr:
            ctx = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pidfile, -1),
                stdout=stdout,
                stderr=stderr,
            )
            with ctx:
                flower_cmd.execute_from_commandline(argv=options)
    else:
        flower_cmd.execute_from_commandline(argv=options)
Esempio n. 8
0
def kerberos(args):  # noqa
    print(settings.HEADER)
    import airflow.security.kerberos

    if args.daemon:
        pid, stdout, stderr, log_file = setup_locations("kerberos", args.pid, args.stdout, args.stderr, args.log_file)
        stdout = open(stdout, 'w+')
        stderr = open(stderr, 'w+')

        ctx = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pid, -1),
            stdout=stdout,
            stderr=stderr,
        )

        with ctx:
            airflow.security.kerberos.run()

        stdout.close()
        stderr.close()
    else:
        airflow.security.kerberos.run()
Esempio n. 9
0
def triggerer(args):
    """Starts Airflow Triggerer"""
    settings.MASK_SECRETS_IN_LOGS = True
    print(settings.HEADER)
    job = TriggererJob(capacity=args.capacity)

    if args.daemon:
        pid, stdout, stderr, log_file = setup_locations(
            "triggerer", args.pid, args.stdout, args.stderr, args.log_file)
        handle = setup_logging(log_file)
        with open(stdout, 'w+') as stdout_handle, open(stderr,
                                                       'w+') as stderr_handle:
            ctx = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pid, -1),
                files_preserve=[handle],
                stdout=stdout_handle,
                stderr=stderr_handle,
            )
            with ctx:
                job.run()

    else:
        # There is a bug in CPython (fixed in March 2022 but not yet released) that
        # makes async.io handle SIGTERM improperly by using async unsafe
        # functions and hanging the triggerer receive SIGPIPE while handling
        # SIGTERN/SIGINT and deadlocking itself. Until the bug is handled
        # we should rather rely on standard handling of the signals rather than
        # adding our own signal handlers. Seems that even if our signal handler
        # just run exit(0) - it caused a race condition that led to the hanging.
        #
        # More details:
        #   * https://bugs.python.org/issue39622
        #   * https://github.com/python/cpython/issues/83803
        #
        # signal.signal(signal.SIGINT, sigint_handler)
        # signal.signal(signal.SIGTERM, sigint_handler)
        signal.signal(signal.SIGQUIT, sigquit_handler)
        job.run()
Esempio n. 10
0
def scheduler(args):
    """Starts Airflow Scheduler"""
    print(settings.HEADER)

    if args.daemon:
        pid, stdout, stderr, log_file = setup_locations(
            "scheduler", args.pid, args.stdout, args.stderr, args.log_file
        )
        handle = setup_logging(log_file)
        with open(stdout, 'w+') as stdout_handle, open(stderr, 'w+') as stderr_handle:
            ctx = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pid, -1),
                files_preserve=[handle],
                stdout=stdout_handle,
                stderr=stderr_handle,
            )
            with ctx:
                _run_scheduler_job(args=args)
    else:
        signal.signal(signal.SIGINT, sigint_handler)
        signal.signal(signal.SIGTERM, sigint_handler)
        signal.signal(signal.SIGQUIT, sigquit_handler)
        _run_scheduler_job(args=args)
Esempio n. 11
0
def flower(args):
    broka = conf.get('celery', 'BROKER_URL')
    address = '--address={}'.format(args.hostname)
    port = '--port={}'.format(args.port)
    api = ''
    if args.broker_api:
        api = '--broker_api=' + args.broker_api

    flower_conf = ''
    if args.flower_conf:
        flower_conf = '--conf=' + args.flower_conf

    if args.daemon:
        pid, stdout, stderr, log_file = setup_locations(
            "flower", args.pid, args.stdout, args.stderr, args.log_file)
        stdout = open(stdout, 'w+')
        stderr = open(stderr, 'w+')

        ctx = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pid, -1),
            stdout=stdout,
            stderr=stderr,
        )

        with ctx:
            os.execvp("flower",
                      ['flower', '-b', broka, address, port, api, flower_conf])

        stdout.close()
        stderr.close()
    else:
        signal.signal(signal.SIGINT, sigint_handler)
        signal.signal(signal.SIGTERM, sigint_handler)

        os.execvp("flower",
                  ['flower', '-b', broka, address, port, api, flower_conf])
Esempio n. 12
0
def main():
    parser = argparse.ArgumentParser(description='manage ansible execution')

    parser.add_argument('--version', action='version', version=VERSION)

    parser.add_argument('command', choices=['run', 'start', 'stop', 'is-alive'])

    parser.add_argument('private_data_dir',
                        help='Base directory containing Runner metadata (project, inventory, etc')

    group = parser.add_mutually_exclusive_group()

    group.add_argument("-m", "--module", default=DEFAULT_RUNNER_MODULE,
                       help="Invoke an Ansible module directly without a playbook")

    group.add_argument("-p", "--playbook", default=DEFAULT_RUNNER_PLAYBOOK,
                       help="The name of the playbook to execute")

    group.add_argument("-r", "--role", default=DEFAULT_RUNNER_ROLE,
                       help="Invoke an Ansible role directly without a playbook")

    parser.add_argument("--hosts",
                        help="Define the set of hosts to execute against")

    parser.add_argument("-i", "--ident",
                        default=uuid4(),
                        help="An identifier that will be used when generating the"
                             "artifacts directory and can be used to uniquely identify a playbook run")

    parser.add_argument("--rotate-artifacts",
                        default=0,
                        type=int,
                        help="Automatically clean up old artifact directories after a given number has been created, the default is 0 which disables rotation")

    parser.add_argument("--roles-path", default=DEFAULT_ROLES_PATH,
                        help="Path to the Ansible roles directory")

    parser.add_argument("--role-vars",
                        help="Variables to pass to the role at runtime")

    parser.add_argument("--role-skip-facts", action="store_true", default=False,
                        help="Disable fact collection when executing a role directly")
    parser.add_argument("--artifact-dir",
                        help="Optional Path for the artifact root directory, by default it is located inside the private data dir")

    parser.add_argument("--inventory",
                        help="Override the default inventory location in private_data_dir")

    parser.add_argument("-j", "--json", action="store_true",
                        help="Output the json event structure to stdout instead of Ansible output")

    parser.add_argument("-v", action="count",
                        help="Increase the verbosity with multiple v's (up to 5) of the ansible-playbook output")

    parser.add_argument("-q", "--quiet", action="store_true",
                        help="Disable all output")

    parser.add_argument("--cmdline",
                        help="Command line options to pass to ansible-playbook at execution time")
    parser.add_argument("--debug", action="store_true",
                        help="Enable Runner debug output logging")

    parser.add_argument("--logfile",
                        help="Log output messages to a file")

    parser.add_argument("-a", "--args", dest='module_args',
                        help="Module arguments")

    args = parser.parse_args()

    output.configure()

    # enable or disable debug mode
    output.set_debug('enable' if args.debug else 'disable')

    # set the output logfile
    if args.logfile:
        output.set_logfile(args.logfile)

    output.debug('starting debug logging')

    # get the absolute path for start since it is a daemon
    args.private_data_dir = os.path.abspath(args.private_data_dir)

    pidfile = os.path.join(args.private_data_dir, 'pid')

    try:
        os.makedirs(args.private_data_dir)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(args.private_data_dir):
            pass
        else:
            raise

    if args.command != 'run':
        stderr_path = os.path.join(args.private_data_dir, 'daemon.log')
        if not os.path.exists(stderr_path):
            os.close(os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
        stderr = open(stderr_path, 'w+')

    if args.command in ('start', 'run'):

        if args.command == 'start':
            import daemon
            from daemon.pidfile import TimeoutPIDLockFile
            context = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pidfile),
                stderr=stderr
            )
        else:
            context = threading.Lock()

        with context:
            with role_manager(args) as args:
                run_options = dict(private_data_dir=args.private_data_dir,
                                   ident=args.ident,
                                   playbook=args.playbook,
                                   module=args.module,
                                   module_args=args.module_args,
                                   host_pattern=args.hosts,
                                   verbosity=args.v,
                                   quiet=args.quiet,
                                   rotate_artifacts=args.rotate_artifacts,
                                   ignore_logging=False,
                                   json_mode=args.json)

                if args.hosts is not None:
                    run_options.update(inventory=args.hosts)

                if args.cmdline:
                    run_options['cmdline'] = args.cmdline

                res = run(**run_options)
            sys.exit(res.rc)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        sys.exit(1)

    if args.command == 'stop':
        try:
            with open(os.path.join(args.private_data_dir, 'args'), 'r') as args:
                Runner.handle_termination(pid)
        except IOError:
            Runner.handle_termination(pid)

    elif args.command == 'is-alive':
        try:
            os.kill(pid, signal.SIG_DFL)
            sys.exit(0)
        except OSError:
            sys.exit(1)
Esempio n. 13
0
def main(sys_args=None):
    """Main entry point for ansible-runner executable

    When the ```ansible-runner``` command is executed, this function
    is the main entry point that is called and executed.

    :param sys_args: List of arguments to be parsed by the parser
    :type sys_args: list

    :returns: an instance of SystemExit
    :rtype: SystemExit
    """

    parser = argparse.ArgumentParser(
        prog='ansible-runner',
        description=
        "Use 'ansible-runner' (with no arguments) to see basic usage")
    subparser = parser.add_subparsers(
        help="Command to invoke",
        dest='command',
        description="COMMAND PRIVATE_DATA_DIR [ARGS]")
    add_args_to_parser(parser, DEFAULT_CLI_ARGS['generic_args'])
    subparser.required = True

    # positional options
    run_subparser = subparser.add_parser(
        'run', help="Run ansible-runner in the foreground")
    add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['positional_args'])
    start_subparser = subparser.add_parser(
        'start', help="Start an ansible-runner process in the background")
    add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['positional_args'])
    stop_subparser = subparser.add_parser(
        'stop',
        help="Stop an ansible-runner process that's running in the background")
    add_args_to_parser(stop_subparser, DEFAULT_CLI_ARGS['positional_args'])
    isalive_subparser = subparser.add_parser(
        'is-alive',
        help=
        "Check if a an ansible-runner process in the background is still running."
    )
    add_args_to_parser(isalive_subparser, DEFAULT_CLI_ARGS['positional_args'])

    # streaming commands
    transmit_subparser = subparser.add_parser(
        'transmit', help="Send a job to a remote ansible-runner process")
    add_args_to_parser(transmit_subparser, DEFAULT_CLI_ARGS['positional_args'])

    worker_subparser = subparser.add_parser(
        'worker', help="Execute work streamed from a controlling instance")
    worker_subparser.add_argument(
        "--private-data-dir",
        help="base directory containing the ansible-runner metadata "
        "(project, inventory, env, etc)",
    )

    process_subparser = subparser.add_parser(
        'process',
        help=
        "Receive the output of remote ansible-runner work and distribute the results"
    )
    add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['positional_args'])

    # adhoc command exec
    adhoc_subparser = subparser.add_parser(
        'adhoc', help="Run ansible adhoc commands in an Execution Environment")
    adhoc_subparser.add_argument(
        "--private-data-dir",
        help="base directory containing the ansible-runner metadata "
        "(project, inventory, env, etc)",
    )
    add_args_to_parser(adhoc_subparser, DEFAULT_CLI_ARGS['execenv_cli_group'])

    # playbook command exec
    playbook_subparser = subparser.add_parser(
        'playbook',
        help="Run ansible-playbook commands in an Execution Environment")
    playbook_subparser.add_argument(
        "--private-data-dir",
        help="base directory containing the ansible-runner metadata "
        "(project, inventory, env, etc)",
    )
    add_args_to_parser(playbook_subparser,
                       DEFAULT_CLI_ARGS['execenv_cli_group'])

    # generic args for all subparsers
    add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(stop_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(isalive_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(adhoc_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(playbook_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(transmit_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(worker_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['generic_args'])

    # runner group
    ansible_runner_group_options = (
        "Ansible Runner Options",
        "configuration options for controlling the ansible-runner "
        "runtime environment.",
    )
    base_runner_group = parser.add_argument_group(
        *ansible_runner_group_options)
    run_runner_group = run_subparser.add_argument_group(
        *ansible_runner_group_options)
    start_runner_group = start_subparser.add_argument_group(
        *ansible_runner_group_options)
    stop_runner_group = stop_subparser.add_argument_group(
        *ansible_runner_group_options)
    isalive_runner_group = isalive_subparser.add_argument_group(
        *ansible_runner_group_options)
    transmit_runner_group = transmit_subparser.add_argument_group(
        *ansible_runner_group_options)
    add_args_to_parser(base_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(run_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(start_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(stop_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(isalive_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(transmit_runner_group, DEFAULT_CLI_ARGS['runner_group'])

    # mutually exclusive group
    run_mutually_exclusive_group = run_subparser.add_mutually_exclusive_group()
    start_mutually_exclusive_group = start_subparser.add_mutually_exclusive_group(
    )
    stop_mutually_exclusive_group = stop_subparser.add_mutually_exclusive_group(
    )
    isalive_mutually_exclusive_group = isalive_subparser.add_mutually_exclusive_group(
    )
    transmit_mutually_exclusive_group = transmit_subparser.add_mutually_exclusive_group(
    )
    add_args_to_parser(run_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(start_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(stop_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(isalive_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(transmit_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])

    # ansible options
    ansible_options = (
        "Ansible Options",
        "control the ansible[-playbook] execution environment",
    )
    run_ansible_group = run_subparser.add_argument_group(*ansible_options)
    start_ansible_group = start_subparser.add_argument_group(*ansible_options)
    stop_ansible_group = stop_subparser.add_argument_group(*ansible_options)
    isalive_ansible_group = isalive_subparser.add_argument_group(
        *ansible_options)
    transmit_ansible_group = transmit_subparser.add_argument_group(
        *ansible_options)
    add_args_to_parser(run_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(start_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(stop_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(isalive_ansible_group,
                       DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(transmit_ansible_group,
                       DEFAULT_CLI_ARGS['ansible_group'])

    # roles group
    roles_group_options = (
        "Ansible Role Options",
        "configuration options for directly executing Ansible roles",
    )
    run_roles_group = run_subparser.add_argument_group(*roles_group_options)
    start_roles_group = start_subparser.add_argument_group(
        *roles_group_options)
    stop_roles_group = stop_subparser.add_argument_group(*roles_group_options)
    isalive_roles_group = isalive_subparser.add_argument_group(
        *roles_group_options)
    transmit_roles_group = transmit_subparser.add_argument_group(
        *roles_group_options)
    add_args_to_parser(run_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(start_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(stop_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(isalive_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(transmit_roles_group, DEFAULT_CLI_ARGS['roles_group'])

    # modules groups

    modules_group_options = (
        "Ansible Module Options",
        "configuration options for directly executing Ansible modules",
    )
    run_modules_group = run_subparser.add_argument_group(
        *modules_group_options)
    start_modules_group = start_subparser.add_argument_group(
        *modules_group_options)
    stop_modules_group = stop_subparser.add_argument_group(
        *modules_group_options)
    isalive_modules_group = isalive_subparser.add_argument_group(
        *modules_group_options)
    transmit_modules_group = transmit_subparser.add_argument_group(
        *modules_group_options)
    add_args_to_parser(run_modules_group, DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(start_modules_group, DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(stop_modules_group, DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(isalive_modules_group,
                       DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(transmit_modules_group,
                       DEFAULT_CLI_ARGS['modules_group'])

    # playbook options
    playbook_group_options = (
        "Ansible Playbook Options",
        "configuation options for executing Ansible playbooks",
    )
    run_playbook_group = run_subparser.add_argument_group(
        *playbook_group_options)
    start_playbook_group = start_subparser.add_argument_group(
        *playbook_group_options)
    stop_playbook_group = stop_subparser.add_argument_group(
        *playbook_group_options)
    isalive_playbook_group = isalive_subparser.add_argument_group(
        *playbook_group_options)
    transmit_playbook_group = transmit_subparser.add_argument_group(
        *playbook_group_options)
    add_args_to_parser(run_playbook_group, DEFAULT_CLI_ARGS['playbook_group'])
    add_args_to_parser(start_playbook_group,
                       DEFAULT_CLI_ARGS['playbook_group'])
    add_args_to_parser(stop_playbook_group, DEFAULT_CLI_ARGS['playbook_group'])
    add_args_to_parser(isalive_playbook_group,
                       DEFAULT_CLI_ARGS['playbook_group'])
    add_args_to_parser(transmit_playbook_group,
                       DEFAULT_CLI_ARGS['playbook_group'])

    # container group
    container_group_options = (
        "Ansible Container Options",
        "configuation options for executing Ansible playbooks",
    )
    run_container_group = run_subparser.add_argument_group(
        *container_group_options)
    start_container_group = start_subparser.add_argument_group(
        *container_group_options)
    stop_container_group = stop_subparser.add_argument_group(
        *container_group_options)
    isalive_container_group = isalive_subparser.add_argument_group(
        *container_group_options)
    transmit_container_group = transmit_subparser.add_argument_group(
        *container_group_options)
    adhoc_container_group = adhoc_subparser.add_argument_group(
        *container_group_options)
    playbook_container_group = playbook_subparser.add_argument_group(
        *container_group_options)
    add_args_to_parser(run_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(start_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(stop_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(isalive_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(transmit_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(adhoc_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(playbook_container_group,
                       DEFAULT_CLI_ARGS['container_group'])

    if len(sys.argv) == 1:
        parser.print_usage()
        print_common_usage()
        parser.exit(status=0)

    if ('playbook' in sys.argv) or ('adhoc' in sys.argv):
        args, leftover_args = parser.parse_known_args(sys_args)
    else:
        args = parser.parse_args(sys_args)

    vargs = vars(args)

    # FIXME - Probably a more elegant way to handle this.
    # set some state about CLI Exec Env
    cli_execenv_cmd = ""

    if vargs.get('command') in ('adhoc', 'playbook'):
        cli_execenv_cmd = vargs.get('command')

        if not leftover_args:
            parser.exit(
                status=1,
                message=
                "The {} subcommand requires arguments to pass to Ansible inside the container.\n"
                .format(vargs.get('command')))

    if vargs.get('command') in ('worker', 'process', 'adhoc', 'playbook'):
        if not vargs.get('private_data_dir'):
            temp_private_dir = tempfile.mkdtemp()
            vargs['private_data_dir'] = temp_private_dir
            if vargs.get('keep_files', False):
                print("ANSIBLE-RUNNER: keeping temporary data directory: {}".
                      format(temp_private_dir))
            else:

                @atexit.register
                def conditonally_clean_cli_execenv_tempdir():
                    shutil.rmtree(temp_private_dir)

    if vargs.get('command') in ('start', 'run', 'transmit'):
        if vargs.get('hosts') and not (vargs.get('module')
                                       or vargs.get('role')):
            parser.exit(
                status=1,
                message="The --hosts option can only be used with -m or -r\n")
        if not (vargs.get('module')
                or vargs.get('role')) and not vargs.get('playbook'):
            parser.exit(
                status=1,
                message=
                "The -p option must be specified when not using -m or -r\n")

    output.configure()

    # enable or disable debug mode
    output.set_debug('enable' if vargs.get('debug') else 'disable')

    # set the output logfile
    if ('logfile' in args) and vargs.get('logfile'):
        output.set_logfile(vargs.get('logfile'))

    output.debug('starting debug logging')

    # get the absolute path for start since it is a daemon
    vargs['private_data_dir'] = os.path.abspath(vargs.get('private_data_dir'))

    pidfile = os.path.join(vargs.get('private_data_dir'), 'pid')

    try:
        os.makedirs(vargs.get('private_data_dir'), mode=0o700)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(
                vargs.get('private_data_dir')):
            pass
        else:
            raise

    stderr_path = None
    context = None
    if vargs.get('command') not in ('run', 'transmit', 'worker', 'adhoc',
                                    'playbook'):
        stderr_path = os.path.join(vargs.get('private_data_dir'), 'daemon.log')
        if not os.path.exists(stderr_path):
            os.close(
                os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))

    if vargs.get('command') in ('start', 'run', 'transmit', 'worker',
                                'process', 'adhoc', 'playbook'):

        if vargs.get('command') == 'start':
            import daemon
            from daemon.pidfile import TimeoutPIDLockFile
            context = daemon.DaemonContext(pidfile=TimeoutPIDLockFile(pidfile))
        else:
            context = threading.Lock()

        streamer = None
        if vargs.get('command') in ('transmit', 'worker', 'process'):
            streamer = vargs.get('command')

        with context:
            with role_manager(vargs) as vargs:
                run_options = dict(
                    private_data_dir=vargs.get('private_data_dir'),
                    ident=vargs.get('ident'),
                    binary=vargs.get('binary'),
                    playbook=vargs.get('playbook'),
                    module=vargs.get('module'),
                    module_args=vargs.get('module_args'),
                    host_pattern=vargs.get('hosts'),
                    verbosity=vargs.get('v'),
                    quiet=vargs.get('quiet'),
                    rotate_artifacts=vargs.get('rotate_artifacts'),
                    ignore_logging=False,
                    json_mode=vargs.get('json'),
                    omit_event_data=vargs.get('omit_event_data'),
                    only_failed_event_data=vargs.get('only_failed_event_data'),
                    inventory=vargs.get('inventory'),
                    forks=vargs.get('forks'),
                    project_dir=vargs.get('project_dir'),
                    artifact_dir=vargs.get('artifact_dir'),
                    roles_path=[vargs.get('roles_path')]
                    if vargs.get('roles_path') else None,
                    process_isolation=vargs.get('process_isolation'),
                    process_isolation_executable=vargs.get(
                        'process_isolation_executable'),
                    process_isolation_path=vargs.get('process_isolation_path'),
                    process_isolation_hide_paths=vargs.get(
                        'process_isolation_hide_paths'),
                    process_isolation_show_paths=vargs.get(
                        'process_isolation_show_paths'),
                    process_isolation_ro_paths=vargs.get(
                        'process_isolation_ro_paths'),
                    container_image=vargs.get('container_image'),
                    container_volume_mounts=vargs.get(
                        'container_volume_mounts'),
                    container_options=vargs.get('container_options'),
                    directory_isolation_base_path=vargs.get(
                        'directory_isolation_base_path'),
                    resource_profiling=vargs.get('resource_profiling'),
                    resource_profiling_base_cgroup=vargs.get(
                        'resource_profiling_base_cgroup'),
                    resource_profiling_cpu_poll_interval=vargs.get(
                        'resource_profiling_cpu_poll_interval'),
                    resource_profiling_memory_poll_interval=vargs.get(
                        'resource_profiling_memory_poll_interval'),
                    resource_profiling_pid_poll_interval=vargs.get(
                        'resource_profiling_pid_poll_interval'),
                    resource_profiling_results_dir=vargs.get(
                        'resource_profiling_results_dir'),
                    limit=vargs.get('limit'),
                    streamer=streamer,
                    cli_execenv_cmd=cli_execenv_cmd)
                if vargs.get('command') in ('adhoc', 'playbook'):
                    run_options['cmdline'] = sys.argv[sys.argv.
                                                      index(leftover_args[0]):]
                    run_options['process_isolation'] = True
                    run_options['process_isolation_executable'] = vargs.get(
                        'container_runtime')

                try:
                    res = run(**run_options)
                except Exception:
                    exc = traceback.format_exc()
                    if stderr_path:
                        open(stderr_path, 'w+').write(exc)
                    else:
                        sys.stderr.write(exc)
                    return 1
            return (res.rc)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        return (1)

    if vargs.get('command') == 'stop':
        Runner.handle_termination(pid, pidfile=pidfile)
        return (0)

    elif vargs.get('command') == 'is-alive':
        try:
            os.kill(pid, signal.SIG_DFL)
            return (0)
        except OSError:
            return (1)
Esempio n. 14
0
def webserver(args):
    """Starts Airflow Webserver"""
    print(settings.HEADER)

    access_logfile = args.access_logfile or conf.get('webserver', 'access_logfile')
    error_logfile = args.error_logfile or conf.get('webserver', 'error_logfile')
    num_workers = args.workers or conf.get('webserver', 'workers')
    worker_timeout = (args.worker_timeout or
                      conf.get('webserver', 'web_server_worker_timeout'))
    ssl_cert = args.ssl_cert or conf.get('webserver', 'web_server_ssl_cert')
    ssl_key = args.ssl_key or conf.get('webserver', 'web_server_ssl_key')
    if not ssl_cert and ssl_key:
        raise AirflowException(
            'An SSL certificate must also be provided for use with ' + ssl_key)
    if ssl_cert and not ssl_key:
        raise AirflowException(
            'An SSL key must also be provided for use with ' + ssl_cert)

    if args.debug:
        print(
            "Starting the web server on port {0} and host {1}.".format(
                args.port, args.hostname))
        app, _ = create_app(None, testing=conf.getboolean('core', 'unit_test_mode'))
        app.run(debug=True, use_reloader=not app.config['TESTING'],
                port=args.port, host=args.hostname,
                ssl_context=(ssl_cert, ssl_key) if ssl_cert and ssl_key else None)
    else:
        os.environ['SKIP_DAGS_PARSING'] = 'True'
        app = cached_app(None)
        pid, stdout, stderr, log_file = setup_locations(
            "webserver", args.pid, args.stdout, args.stderr, args.log_file)
        os.environ.pop('SKIP_DAGS_PARSING')
        if args.daemon:
            handle = setup_logging(log_file)
            stdout = open(stdout, 'w+')
            stderr = open(stderr, 'w+')

        print(
            textwrap.dedent('''\
                Running the Gunicorn Server with:
                Workers: {num_workers} {workerclass}
                Host: {hostname}:{port}
                Timeout: {worker_timeout}
                Logfiles: {access_logfile} {error_logfile}
                =================================================================\
            '''.format(num_workers=num_workers, workerclass=args.workerclass,
                       hostname=args.hostname, port=args.port,
                       worker_timeout=worker_timeout, access_logfile=access_logfile,
                       error_logfile=error_logfile)))

        run_args = [
            'gunicorn',
            '-w', str(num_workers),
            '-k', str(args.workerclass),
            '-t', str(worker_timeout),
            '-b', args.hostname + ':' + str(args.port),
            '-n', 'airflow-webserver',
            '-p', str(pid),
            '-c', 'python:airflow.www.gunicorn_config',
        ]

        if args.access_logfile:
            run_args += ['--access-logfile', str(args.access_logfile)]

        if args.error_logfile:
            run_args += ['--error-logfile', str(args.error_logfile)]

        if args.daemon:
            run_args += ['-D']

        if ssl_cert:
            run_args += ['--certfile', ssl_cert, '--keyfile', ssl_key]

        webserver_module = 'www'
        run_args += ["airflow." + webserver_module + ".app:cached_app()"]

        gunicorn_master_proc = None

        def kill_proc(dummy_signum, dummy_frame):  # pylint: disable=unused-argument
            gunicorn_master_proc.terminate()
            gunicorn_master_proc.wait()
            sys.exit(0)

        def monitor_gunicorn(gunicorn_master_proc):
            # These run forever until SIG{INT, TERM, KILL, ...} signal is sent
            if conf.getint('webserver', 'worker_refresh_interval') > 0:
                master_timeout = conf.getint('webserver', 'web_server_master_timeout')
                restart_workers(gunicorn_master_proc, num_workers, master_timeout)
            else:
                while gunicorn_master_proc.poll() is None:
                    time.sleep(1)

                sys.exit(gunicorn_master_proc.returncode)

        if args.daemon:
            base, ext = os.path.splitext(pid)
            ctx = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(base + "-monitor" + ext, -1),
                files_preserve=[handle],
                stdout=stdout,
                stderr=stderr,
                signal_map={
                    signal.SIGINT: kill_proc,
                    signal.SIGTERM: kill_proc
                },
            )
            with ctx:
                subprocess.Popen(run_args, close_fds=True)

                # Reading pid file directly, since Popen#pid doesn't
                # seem to return the right value with DaemonContext.
                while True:
                    try:
                        with open(pid) as file:
                            gunicorn_master_proc_pid = int(file.read())
                            break
                    except OSError:
                        log.debug("Waiting for gunicorn's pid file to be created.")
                        time.sleep(0.1)

                gunicorn_master_proc = psutil.Process(gunicorn_master_proc_pid)
                monitor_gunicorn(gunicorn_master_proc)

            stdout.close()
            stderr.close()
        else:
            gunicorn_master_proc = subprocess.Popen(run_args, close_fds=True)

            signal.signal(signal.SIGINT, kill_proc)
            signal.signal(signal.SIGTERM, kill_proc)

            monitor_gunicorn(gunicorn_master_proc)
def webserver(args):
    """Starts Airflow Webserver"""
    print(settings.HEADER)

    # Check for old/insecure config, and fail safe (i.e. don't launch) if the config is wildly insecure.
    if conf.get('webserver', 'secret_key') == 'temporary_key':
        from rich import print as rich_print

        rich_print(
            "[red][bold]ERROR:[/bold] The `secret_key` setting under the webserver config has an insecure "
            "value - Airflow has failed safe and refuses to start. Please change this value to a new, "
            "per-environment, randomly generated string, for example using this command `[cyan]openssl rand "
            "-hex 30[/cyan]`",
            file=sys.stderr,
        )
        sys.exit(1)

    access_logfile = args.access_logfile or conf.get('webserver',
                                                     'access_logfile')
    error_logfile = args.error_logfile or conf.get('webserver',
                                                   'error_logfile')
    access_logformat = args.access_logformat or conf.get(
        'webserver', 'access_logformat')
    num_workers = args.workers or conf.get('webserver', 'workers')
    worker_timeout = args.worker_timeout or conf.get(
        'webserver', 'web_server_worker_timeout')
    ssl_cert = args.ssl_cert or conf.get('webserver', 'web_server_ssl_cert')
    ssl_key = args.ssl_key or conf.get('webserver', 'web_server_ssl_key')
    if not ssl_cert and ssl_key:
        raise AirflowException(
            'An SSL certificate must also be provided for use with ' + ssl_key)
    if ssl_cert and not ssl_key:
        raise AirflowException(
            'An SSL key must also be provided for use with ' + ssl_cert)

    if args.debug:
        print(
            f"Starting the web server on port {args.port} and host {args.hostname}."
        )
        app = create_app(testing=conf.getboolean('core', 'unit_test_mode'))
        app.run(
            debug=True,
            use_reloader=not app.config['TESTING'],
            port=args.port,
            host=args.hostname,
            ssl_context=(ssl_cert, ssl_key) if ssl_cert and ssl_key else None,
        )
    else:

        pid_file, stdout, stderr, log_file = setup_locations(
            "webserver", args.pid, args.stdout, args.stderr, args.log_file)

        # Check if webserver is already running if not, remove old pidfile
        check_if_pidfile_process_is_running(pid_file=pid_file,
                                            process_name="webserver")

        print(
            textwrap.dedent(f'''\
                Running the Gunicorn Server with:
                Workers: {num_workers} {args.workerclass}
                Host: {args.hostname}:{args.port}
                Timeout: {worker_timeout}
                Logfiles: {access_logfile} {error_logfile}
                Access Logformat: {access_logformat}
                ================================================================='''
                            ))

        run_args = [
            sys.executable,
            '-m',
            'gunicorn',
            '--workers',
            str(num_workers),
            '--worker-class',
            str(args.workerclass),
            '--timeout',
            str(worker_timeout),
            '--bind',
            args.hostname + ':' + str(args.port),
            '--name',
            'airflow-webserver',
            '--pid',
            pid_file,
            '--config',
            'python:airflow.www.gunicorn_config',
        ]

        if args.access_logfile:
            run_args += ['--access-logfile', str(args.access_logfile)]

        if args.error_logfile:
            run_args += ['--error-logfile', str(args.error_logfile)]

        if args.access_logformat and args.access_logformat.strip():
            run_args += ['--access-logformat', str(args.access_logformat)]

        if args.daemon:
            run_args += ['--daemon']

        if ssl_cert:
            run_args += ['--certfile', ssl_cert, '--keyfile', ssl_key]

        run_args += ["airflow.www.app:cached_app()"]

        gunicorn_master_proc = None

        def kill_proc(signum, _):
            log.info("Received signal: %s. Closing gunicorn.", signum)
            gunicorn_master_proc.terminate()
            with suppress(TimeoutError):
                gunicorn_master_proc.wait(timeout=30)
            if gunicorn_master_proc.poll() is not None:
                gunicorn_master_proc.kill()
            sys.exit(0)

        def monitor_gunicorn(gunicorn_master_pid: int):
            # Register signal handlers
            signal.signal(signal.SIGINT, kill_proc)
            signal.signal(signal.SIGTERM, kill_proc)

            # These run forever until SIG{INT, TERM, KILL, ...} signal is sent
            GunicornMonitor(
                gunicorn_master_pid=gunicorn_master_pid,
                num_workers_expected=num_workers,
                master_timeout=conf.getint('webserver',
                                           'web_server_master_timeout'),
                worker_refresh_interval=conf.getint('webserver',
                                                    'worker_refresh_interval',
                                                    fallback=30),
                worker_refresh_batch_size=conf.getint(
                    'webserver', 'worker_refresh_batch_size', fallback=1),
                reload_on_plugin_change=conf.getboolean(
                    'webserver', 'reload_on_plugin_change', fallback=False),
            ).start()

        if args.daemon:
            # This makes possible errors get reported before daemonization
            os.environ['SKIP_DAGS_PARSING'] = 'True'
            app = create_app(None)
            os.environ.pop('SKIP_DAGS_PARSING')

            handle = setup_logging(log_file)

            base, ext = os.path.splitext(pid_file)
            with open(stdout, 'w+') as stdout, open(stderr, 'w+') as stderr:
                ctx = daemon.DaemonContext(
                    pidfile=TimeoutPIDLockFile(f"{base}-monitor{ext}", -1),
                    files_preserve=[handle],
                    stdout=stdout,
                    stderr=stderr,
                )
                with ctx:
                    subprocess.Popen(run_args, close_fds=True)

                    # Reading pid of gunicorn master as it will be different that
                    # the one of process spawned above.
                    while True:
                        sleep(0.1)
                        gunicorn_master_proc_pid = read_pid_from_pidfile(
                            pid_file)
                        if gunicorn_master_proc_pid:
                            break

                    # Run Gunicorn monitor
                    gunicorn_master_proc = psutil.Process(
                        gunicorn_master_proc_pid)
                    monitor_gunicorn(gunicorn_master_proc.pid)

        else:
            with subprocess.Popen(run_args,
                                  close_fds=True) as gunicorn_master_proc:
                monitor_gunicorn(gunicorn_master_proc.pid)
Esempio n. 16
0
def main():
    # Workaround for development
    modpath = os.path.realpath(os.path.join(
        os.path.dirname(os.path.realpath(__file__)),
        '..',
    ))
    if modpath not in sys.path:
        sys.path.insert(0, modpath)

    parser = argparse.ArgumentParser()
    parser.add_argument('restart', nargs='?')
    parser.add_argument('--foreground', '-f', action='store_true')
    parser.add_argument('--disable-loop-monitor', '-L', action='store_true')
    parser.add_argument('--plugins-dirs', '-p', action='append')
    parser.add_argument('--debug-level', choices=[
        'TRACE',
        'DEBUG',
        'INFO',
        'WARN',
        'ERROR',
    ])
    parser.add_argument('--log-handler', choices=[
        'console',
        'file',
    ])
    args = parser.parse_args()

    #  Logger
    if args.log_handler:
        log_handlers = [args.log_handler]
    else:
        log_handlers = ['console' if args.foreground else 'file']

    if args.debug_level is None and args.foreground:
        debug_level = 'TRACE'
    else:
        debug_level = args.debug_level or 'DEBUG'

    _logger = logger.Logger('middleware', debug_level)
    get_logger = _logger.getLogger()

    pidpath = '/var/run/middlewared.pid'

    if args.restart:
        if os.path.exists(pidpath):
            with open(pidpath, 'r') as f:
                pid = int(f.read().strip())
            try:
                os.kill(pid, 15)
            except ProcessLookupError as e:
                if e.errno != errno.ESRCH:
                    raise

    if not args.foreground:
        _logger.configure_logging('file')
        daemonc = DaemonContext(
            pidfile=TimeoutPIDLockFile(pidpath),
            detach_process=True,
            stdout=logger.LoggerStream(get_logger),
            stderr=logger.LoggerStream(get_logger),
        )
        daemonc.open()
    elif 'file' in log_handlers:
        _logger.configure_logging('file')
        sys.stdout = sys.stderr = _logger.stream()
    elif 'console' in log_handlers:
        _logger.configure_logging('console')
    else:
        _logger.configure_logging('file')

    setproctitle.setproctitle('middlewared')
    # Workaround to tell django to not set up logging on its own
    os.environ['MIDDLEWARED'] = str(os.getpid())

    if args.foreground:
        with open(pidpath, "w") as _pidfile:
            _pidfile.write(f"{str(os.getpid())}\n")

    Middleware(
        loop_monitor=not args.disable_loop_monitor,
        plugins_dirs=args.plugins_dirs,
        debug_level=debug_level,
    ).run()
    if not args.foreground:
        daemonc.close()
Esempio n. 17
0
def worker(args):
    """Starts Airflow Celery worker"""
    env = os.environ.copy()
    env['AIRFLOW_HOME'] = settings.AIRFLOW_HOME

    if not settings.validate_session():
        print("Worker exiting... database connection precheck failed! ")
        sys.exit(1)

    # Celery worker
    from airflow.executors.celery_executor import app as celery_app
    from celery.bin import worker  # pylint: disable=redefined-outer-name

    autoscale = args.autoscale
    if autoscale is None and conf.has_option("celery", "worker_autoscale"):
        autoscale = conf.get("celery", "worker_autoscale")
    worker = worker.worker(app=celery_app)   # pylint: disable=redefined-outer-name
    options = {
        'optimization': 'fair',
        'O': 'fair',
        'queues': args.queues,
        'concurrency': args.concurrency,
        'autoscale': autoscale,
        'hostname': args.celery_hostname,
        'loglevel': conf.get('core', 'LOGGING_LEVEL'),
    }

    if conf.has_option("celery", "pool"):
        options["pool"] = conf.get("celery", "pool")

    if args.daemon:
        pid, stdout, stderr, log_file = setup_locations("worker",
                                                        args.pid,
                                                        args.stdout,
                                                        args.stderr,
                                                        args.log_file)
        handle = setup_logging(log_file)
        stdout = open(stdout, 'w+')
        stderr = open(stderr, 'w+')

        ctx = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pid, -1),
            files_preserve=[handle],
            stdout=stdout,
            stderr=stderr,
        )
        with ctx:
            sub_proc = subprocess.Popen(['airflow', 'serve_logs'], env=env, close_fds=True)
            worker.run(**options)
            sub_proc.kill()

        stdout.close()
        stderr.close()
    else:
        signal.signal(signal.SIGINT, sigint_handler)
        signal.signal(signal.SIGTERM, sigint_handler)

        sub_proc = subprocess.Popen(['airflow', 'serve_logs'], env=env, close_fds=True)

        worker.run(**options)
        sub_proc.kill()
Esempio n. 18
0
def run_manager():
    parser = argparse.ArgumentParser(description='Multipile', )
    parser.add_argument('-r', '--root_path', help='The rootpath for mailpile')
    parser.add_argument('-m',
                        '--mailpile_bin',
                        help='The mailpile executable',
                        default='mailpile')
    parser.add_argument('-b',
                        '--backend',
                        help='the backend to use',
                        default='fork',
                        choices=['fork', 'docker'])
    parser.add_argument('--bind',
                        help="bind to interface. Default 127.0.0.1",
                        default='127.0.0.1')
    parser.add_argument('--sslcert',
                        help='The SSL certficate to use',
                        default=None)
    parser.add_argument('--sslkey', help='The SSL key to use', default=None)
    parser.add_argument('--debug',
                        help='Set log level to debug',
                        default=False,
                        action='store_true')
    parser.add_argument(
        '--daemon',
        help='start in daemon mode and put process into background',
        default=False,
        action='store_true')
    parser.add_argument('--pidfile',
                        help='path for pid file. By default none is created',
                        default=None)
    parser.add_argument('--log-config',
                        help='Provide a python logging config file',
                        default=None)
    parser.add_argument(
        '--leap-provider',
        '-lp',
        help='Specify the LEAP provider this dispatcher will connect to',
        default='localhost')
    parser.add_argument(
        '--leap-provider-ca',
        '-lpc',
        dest='leap_provider_ca',
        help='Specify the LEAP provider CA to use to validate connections',
        default=True)
    parser.add_argument(
        '--leap-provider-fingerprint',
        '-lpf',
        dest='leap_provider_fingerprint',
        help=
        'Specify the LEAP provider fingerprint to use to validate connections',
        default=None)
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--mailpile-virtualenv',
                       help='Use specified virtual env for mailpile',
                       default=None)
    group.add_argument('--auto-mailpile-virtualenv',
                       dest='auto_venv',
                       help='Boostrap virtualenv for mailpile',
                       default=False,
                       action='store_true')

    args = parser.parse_args(args=filter_args())

    if args.sslcert:
        ssl_config = SSLConfig(args.sslcert, args.sslkey,
                               latest_available_ssl_version())
    else:
        ssl_config = None

    venv = args.mailpile_virtualenv
    mailpile_bin = args.mailpile_bin

    if args.auto_venv:
        venv, mailpile_bin = prepare_venv(args.root_path)

    if args.root_path is None or not os.path.isdir(args.root_path):
        raise ValueError('root path %s not found!' % args.root_path)

    log_level = logging.DEBUG if args.debug else logging.INFO
    log_config = args.log_config

    provider_ca = args.leap_provider_ca if args.leap_provider_fingerprint is None else False

    manager = DispatcherManager(
        args.root_path,
        mailpile_bin,
        ssl_config,
        args.leap_provider,
        mailpile_virtualenv=venv,
        provider=args.backend,
        leap_provider_ca=provider_ca,
        leap_provider_fingerprint=args.leap_provider_fingerprint,
        bindaddr=args.bind)

    if args.daemon:
        pidfile = TimeoutPIDLockFile(
            args.pidfile,
            acquire_timeout=PID_ACQUIRE_TIMEOUT_IN_S) if args.pidfile else None
        can_use_pidfile(pidfile)
        with daemon.DaemonContext(pidfile=pidfile):
            # init logging only after we have spawned the sub process. Otherwise there might be some hickups
            init_logging('manager', level=log_level, config_file=log_config)
            manager.serve_forever()
    else:
        init_logging('manager', level=log_level, config_file=log_config)
        manager.serve_forever()
Esempio n. 19
0
def main():
    global SETUID_USER, SETGID_GROUP
    options = parse_args()
    SETUID_USER = options.user
    SETGID_GROUP = options.group
    root = desktop.lib.paths.get_run_root()
    log_dir = os.path.join(root, options.log_dir)

    if options.show_supervisees:
        for name, supervisee in get_supervisees().items():
            if name not in options.supervisee_exclusions:
                print(name)
        sys.exit(0)

    # Let our children know
    os.environ['DESKTOP_LOG_DIR'] = log_dir

    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    setup_user_info()

    pid_file = os.path.abspath(os.path.join(root, options.pid_file))
    pidfile_context = TimeoutPIDLockFile(pid_file, LOCKFILE_TIMEOUT)

    existing_pid = pidfile_context.read_pid()
    if existing_pid:
        cmdline = get_pid_cmdline(existing_pid)
        if not cmdline.strip():
            # pid is not actually running
            pidfile_context.break_lock()
        else:
            LOG.error(
                "Pid file %s indicates that Hue is already running (pid %d)" %
                (pid_file, existing_pid))
            sys.exit(1)
    elif pidfile_context.is_locked():
        # If there's no pidfile but there is a lock, it's a strange situation,
        # but we should break the lock because it doesn't seem to be actually running
        logging.warn("No existing pid file, but lock exists. Breaking lock.")
        pidfile_context.break_lock()

    if options.daemonize:
        if sys.version_info[0] > 2:
            outfile = open_file(os.path.join(log_dir, 'supervisor.out'), 'ba+',
                                0)
        else:
            outfile = open_file(os.path.join(log_dir, 'supervisor.out'), 'a+',
                                0)
        context = daemon.DaemonContext(
            working_directory=root,
            pidfile=pidfile_context,
            stdout=outfile,
            stderr=outfile,
        )

        context.signal_map = {
            signal.SIGTERM: sig_handler,
        }

        context.open()
    os.umask(0o22)

    # Log initialization must come after daemonization, which closes all open files.
    # Log statements before this point goes to stderr.
    _init_log(log_dir)

    sups = []
    try:
        for name, supervisee in get_supervisees().items():

            if name in options.supervisee_exclusions:
                continue

            if supervisee.drop_root:
                preexec_fn = drop_privileges
            else:
                preexec_fn = None

            if options.daemonize:
                if sys.version_info[0] > 2:
                    log_stdout = open_file(
                        os.path.join(log_dir, name + '.out'), 'ba+', 0)
                else:
                    log_stdout = open_file(
                        os.path.join(log_dir, name + '.out'), 'a+', 0)
                log_stderr = log_stdout
            else:
                # Passing None to subprocess.Popen later makes the subprocess inherit the
                # standard fds from the supervisor
                log_stdout = None
                log_stderr = None
            sup = Supervisor(supervisee.cmdv,
                             stdout=log_stdout,
                             stderr=log_stderr,
                             preexec_fn=preexec_fn)
            sup.start()
            sups.append(sup)

        wait_loop(sups, options)
    except BaseException as ex:
        LOG.exception("Exception in supervisor main loop")
        shutdown(sups)  # shutdown() exits the process

    return 0
Esempio n. 20
0
    handler = logging.handlers.SysLogHandler(address='/dev/log')
    logger.addHandler(handler)
    return logger


cfg = None
logger = None
check_thread = None
updater = None
pid = None

if ((len(sys.argv) == 3)
        and (sys.argv[2] in ['start', 'stop', 'reload', 'status'])):
    cfg = readConf(sys.argv[1])
    updater = Updater(token=cfg['TOKEN'], use_context=True)
    pid = TimeoutPIDLockFile(cfg['PIDFILE'], cfg['LOCK_WAIT_TIMEOUT'])
    if sys.argv[2] == 'stop':
        if pid.is_locked():
            pidNumber = pid.read_pid()
            os.kill(pidNumber, signal.SIGHUP)
            sleep(15)
            if psutil.pid_exists(pidNumber):
                os.kill(pidNumber, signal.SIGTERM)
                sleep(5)
                if psutil.pid_exists(pidNumber) or pid.is_locked():
                    sys.stderr.write(cfg['MONITOR_NAME'] +
                                     " Bot can't be stopped")
                    sys.exit(1)
        sys.exit(0)
    elif sys.argv[2] == 'reload':
        if pid.is_locked():
Esempio n. 21
0
    private_data_dir = args.private_data_dir
    pidfile = os.path.join(private_data_dir, 'pid')

    if args.command == 'start':
        # create a file to log stderr in case the daemonized process throws
        # an exception before it gets to `pexpect.spawn`
        stderr_path = os.path.join(private_data_dir, 'artifacts', 'daemon.log')
        if not os.path.exists(stderr_path):
            os.mknod(stderr_path, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR)
        stderr = open(stderr_path, 'w+')

        import daemon
        from daemon.pidfile import TimeoutPIDLockFile
        context = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pidfile),
            stderr=stderr
        )
        with context:
            __run__(private_data_dir)
        sys.exit(0)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        sys.exit(1)

    if args.command == 'stop':
        try:
            with open(os.path.join(private_data_dir, 'args'), 'r') as args:
Esempio n. 22
0
def main(sys_args=None):
    """Main entry point for ansible-runner executable

    When the ```ansible-runner``` command is executed, this function
    is the main entry point that is called and executed.

    :param sys_args: List of arguments to be parsed by the parser
    :type sys_args: list

    :returns: an instance of SystemExit
    :rtype: SystemExit
    """

    parser = AnsibleRunnerArgumentParser(
        prog='ansible-runner',
        description=
        "Use 'ansible-runner' (with no arguments) to see basic usage")
    subparser = parser.add_subparsers(
        help="Command to invoke",
        dest='command',
        description="COMMAND PRIVATE_DATA_DIR [ARGS]")
    add_args_to_parser(parser, DEFAULT_CLI_ARGS['generic_args'])
    subparser.required = True

    # positional options
    run_subparser = subparser.add_parser(
        'run', help="Run ansible-runner in the foreground")
    add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['positional_args'])
    add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['playbook_group'])
    start_subparser = subparser.add_parser(
        'start', help="Start an ansible-runner process in the background")
    add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['positional_args'])
    add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['playbook_group'])
    stop_subparser = subparser.add_parser(
        'stop',
        help="Stop an ansible-runner process that's running in the background")
    add_args_to_parser(stop_subparser, DEFAULT_CLI_ARGS['positional_args'])
    isalive_subparser = subparser.add_parser(
        'is-alive',
        help=
        "Check if a an ansible-runner process in the background is still running."
    )
    add_args_to_parser(isalive_subparser, DEFAULT_CLI_ARGS['positional_args'])

    # streaming commands
    transmit_subparser = subparser.add_parser(
        'transmit', help="Send a job to a remote ansible-runner process")
    add_args_to_parser(transmit_subparser, DEFAULT_CLI_ARGS['positional_args'])

    worker_subparser = subparser.add_parser(
        'worker', help="Execute work streamed from a controlling instance")
    worker_subcommands = worker_subparser.add_subparsers(
        help="Sub-sub command to invoke",
        dest='worker_subcommand',
        description="ansible-runner worker [sub-sub-command]",
    )
    cleanup_command = worker_subcommands.add_parser(
        'cleanup',
        help=
        "Cleanup private_data_dir patterns from prior jobs and supporting temporary folders.",
    )
    cleanup.add_cleanup_args(cleanup_command)

    worker_subparser.add_argument(
        "--private-data-dir",
        help="base directory containing the ansible-runner metadata "
        "(project, inventory, env, etc)",
    )

    worker_subparser.add_argument(
        "--worker-info",
        dest="worker_info",
        action="store_true",
        help=
        "show the execution node's Ansible Runner version along with its memory and CPU capacities"
    )
    worker_subparser.add_argument(
        "--delete",
        dest="delete_directory",
        action="store_true",
        default=False,
        help=
        ("Delete existing folder (and everything in it) in the location specified by --private-data-dir. "
         "The directory will be re-populated when the streamed data is unpacked. "
         "Using this will also assure that the directory is deleted when the job finishes."
         ))
    process_subparser = subparser.add_parser(
        'process',
        help=
        "Receive the output of remote ansible-runner work and distribute the results"
    )
    add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['positional_args'])

    # generic args for all subparsers
    add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(stop_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(isalive_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(transmit_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(worker_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['generic_args'])

    # runner group
    ansible_runner_group_options = (
        "Ansible Runner Options",
        "configuration options for controlling the ansible-runner "
        "runtime environment.",
    )
    base_runner_group = parser.add_argument_group(
        *ansible_runner_group_options)
    run_runner_group = run_subparser.add_argument_group(
        *ansible_runner_group_options)
    start_runner_group = start_subparser.add_argument_group(
        *ansible_runner_group_options)
    stop_runner_group = stop_subparser.add_argument_group(
        *ansible_runner_group_options)
    isalive_runner_group = isalive_subparser.add_argument_group(
        *ansible_runner_group_options)
    transmit_runner_group = transmit_subparser.add_argument_group(
        *ansible_runner_group_options)
    add_args_to_parser(base_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(run_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(start_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(stop_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(isalive_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(transmit_runner_group, DEFAULT_CLI_ARGS['runner_group'])

    # mutually exclusive group
    run_mutually_exclusive_group = run_subparser.add_mutually_exclusive_group()
    start_mutually_exclusive_group = start_subparser.add_mutually_exclusive_group(
    )
    stop_mutually_exclusive_group = stop_subparser.add_mutually_exclusive_group(
    )
    isalive_mutually_exclusive_group = isalive_subparser.add_mutually_exclusive_group(
    )
    transmit_mutually_exclusive_group = transmit_subparser.add_mutually_exclusive_group(
    )
    add_args_to_parser(run_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(start_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(stop_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(isalive_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(transmit_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])

    # ansible options
    ansible_options = (
        "Ansible Options",
        "control the ansible[-playbook] execution environment",
    )
    run_ansible_group = run_subparser.add_argument_group(*ansible_options)
    start_ansible_group = start_subparser.add_argument_group(*ansible_options)
    stop_ansible_group = stop_subparser.add_argument_group(*ansible_options)
    isalive_ansible_group = isalive_subparser.add_argument_group(
        *ansible_options)
    transmit_ansible_group = transmit_subparser.add_argument_group(
        *ansible_options)
    add_args_to_parser(run_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(start_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(stop_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(isalive_ansible_group,
                       DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(transmit_ansible_group,
                       DEFAULT_CLI_ARGS['ansible_group'])

    # roles group
    roles_group_options = (
        "Ansible Role Options",
        "configuration options for directly executing Ansible roles",
    )
    run_roles_group = run_subparser.add_argument_group(*roles_group_options)
    start_roles_group = start_subparser.add_argument_group(
        *roles_group_options)
    stop_roles_group = stop_subparser.add_argument_group(*roles_group_options)
    isalive_roles_group = isalive_subparser.add_argument_group(
        *roles_group_options)
    transmit_roles_group = transmit_subparser.add_argument_group(
        *roles_group_options)
    add_args_to_parser(run_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(start_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(stop_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(isalive_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(transmit_roles_group, DEFAULT_CLI_ARGS['roles_group'])

    # modules groups

    modules_group_options = (
        "Ansible Module Options",
        "configuration options for directly executing Ansible modules",
    )
    run_modules_group = run_subparser.add_argument_group(
        *modules_group_options)
    start_modules_group = start_subparser.add_argument_group(
        *modules_group_options)
    stop_modules_group = stop_subparser.add_argument_group(
        *modules_group_options)
    isalive_modules_group = isalive_subparser.add_argument_group(
        *modules_group_options)
    transmit_modules_group = transmit_subparser.add_argument_group(
        *modules_group_options)
    add_args_to_parser(run_modules_group, DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(start_modules_group, DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(stop_modules_group, DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(isalive_modules_group,
                       DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(transmit_modules_group,
                       DEFAULT_CLI_ARGS['modules_group'])

    # container group
    container_group_options = (
        "Ansible Container Options",
        "configuation options for executing Ansible playbooks",
    )
    run_container_group = run_subparser.add_argument_group(
        *container_group_options)
    start_container_group = start_subparser.add_argument_group(
        *container_group_options)
    stop_container_group = stop_subparser.add_argument_group(
        *container_group_options)
    isalive_container_group = isalive_subparser.add_argument_group(
        *container_group_options)
    transmit_container_group = transmit_subparser.add_argument_group(
        *container_group_options)
    add_args_to_parser(run_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(start_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(stop_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(isalive_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(transmit_container_group,
                       DEFAULT_CLI_ARGS['container_group'])

    args = parser.parse_args(sys_args)

    vargs = vars(args)

    if vargs.get('command') == 'worker':
        if vargs.get('worker_subcommand') == 'cleanup':
            cleanup.run_cleanup(vargs)
            parser.exit(0)
        if vargs.get('worker_info'):
            cpu = get_cpu_count()
            mem = get_mem_in_bytes()
            errors = []
            uuid = ensure_uuid()
            if not isinstance(mem, int):
                errors.append(mem)
                mem = None
            if "Could not find" in uuid:
                errors.append(uuid)
                uuid = None
            info = {
                'errors': errors,
                'mem_in_bytes': mem,
                'cpu_count': cpu,
                'runner_version': VERSION,
                'uuid': uuid,
            }
            print(safe_dump(info, default_flow_style=True))
            parser.exit(0)

        private_data_dir = vargs.get('private_data_dir')
        delete_directory = vargs.get('delete_directory', False)
        if private_data_dir and delete_directory:
            shutil.rmtree(private_data_dir, ignore_errors=True)
            register_for_cleanup(private_data_dir)
        elif private_data_dir is None:
            temp_private_dir = tempfile.mkdtemp()
            vargs['private_data_dir'] = temp_private_dir
            register_for_cleanup(temp_private_dir)

    if vargs.get('command') == 'process':
        # the process command is the final destination of artifacts, user expects private_data_dir to not be cleaned up
        if not vargs.get('private_data_dir'):
            temp_private_dir = tempfile.mkdtemp()
            vargs['private_data_dir'] = temp_private_dir

    if vargs.get('command') in ('start', 'run', 'transmit'):
        if vargs.get('hosts') and not (vargs.get('module')
                                       or vargs.get('role')):
            parser.exit(
                status=1,
                message="The --hosts option can only be used with -m or -r\n")
        if not (vargs.get('module')
                or vargs.get('role')) and not vargs.get('playbook'):
            parser.exit(
                status=1,
                message=
                "The -p option must be specified when not using -m or -r\n")

    output.configure()

    # enable or disable debug mode
    output.set_debug('enable' if vargs.get('debug') else 'disable')

    # set the output logfile
    if ('logfile' in args) and vargs.get('logfile'):
        output.set_logfile(vargs.get('logfile'))

    output.debug('starting debug logging')

    # get the absolute path for start since it is a daemon
    vargs['private_data_dir'] = os.path.abspath(vargs.get('private_data_dir'))

    pidfile = os.path.join(vargs.get('private_data_dir'), 'pid')

    try:
        os.makedirs(vargs.get('private_data_dir'), mode=0o700)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(
                vargs.get('private_data_dir')):
            pass
        else:
            raise

    stderr_path = None
    context = None
    if vargs.get('command') not in ('run', 'transmit', 'worker'):
        stderr_path = os.path.join(vargs.get('private_data_dir'), 'daemon.log')
        if not os.path.exists(stderr_path):
            os.close(
                os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))

    if vargs.get('command') in ('start', 'run', 'transmit', 'worker',
                                'process'):

        if vargs.get('command') == 'start':
            import daemon
            from daemon.pidfile import TimeoutPIDLockFile
            context = daemon.DaemonContext(pidfile=TimeoutPIDLockFile(pidfile))
        else:
            context = threading.Lock()

        streamer = None
        if vargs.get('command') in ('transmit', 'worker', 'process'):
            streamer = vargs.get('command')

        with context:
            with role_manager(vargs) as vargs:
                run_options = dict(
                    private_data_dir=vargs.get('private_data_dir'),
                    ident=vargs.get('ident'),
                    binary=vargs.get('binary'),
                    playbook=vargs.get('playbook'),
                    module=vargs.get('module'),
                    module_args=vargs.get('module_args'),
                    host_pattern=vargs.get('hosts'),
                    verbosity=vargs.get('v'),
                    quiet=vargs.get('quiet'),
                    rotate_artifacts=vargs.get('rotate_artifacts'),
                    ignore_logging=False,
                    json_mode=vargs.get('json'),
                    omit_event_data=vargs.get('omit_event_data'),
                    only_failed_event_data=vargs.get('only_failed_event_data'),
                    inventory=vargs.get('inventory'),
                    forks=vargs.get('forks'),
                    project_dir=vargs.get('project_dir'),
                    artifact_dir=vargs.get('artifact_dir'),
                    roles_path=[vargs.get('roles_path')]
                    if vargs.get('roles_path') else None,
                    process_isolation=vargs.get('process_isolation'),
                    process_isolation_executable=vargs.get(
                        'process_isolation_executable'),
                    process_isolation_path=vargs.get('process_isolation_path'),
                    process_isolation_hide_paths=vargs.get(
                        'process_isolation_hide_paths'),
                    process_isolation_show_paths=vargs.get(
                        'process_isolation_show_paths'),
                    process_isolation_ro_paths=vargs.get(
                        'process_isolation_ro_paths'),
                    container_image=vargs.get('container_image'),
                    container_volume_mounts=vargs.get(
                        'container_volume_mounts'),
                    container_options=vargs.get('container_options'),
                    directory_isolation_base_path=vargs.get(
                        'directory_isolation_base_path'),
                    resource_profiling=vargs.get('resource_profiling'),
                    resource_profiling_base_cgroup=vargs.get(
                        'resource_profiling_base_cgroup'),
                    resource_profiling_cpu_poll_interval=vargs.get(
                        'resource_profiling_cpu_poll_interval'),
                    resource_profiling_memory_poll_interval=vargs.get(
                        'resource_profiling_memory_poll_interval'),
                    resource_profiling_pid_poll_interval=vargs.get(
                        'resource_profiling_pid_poll_interval'),
                    resource_profiling_results_dir=vargs.get(
                        'resource_profiling_results_dir'),
                    cmdline=vargs.get('cmdline'),
                    limit=vargs.get('limit'),
                    streamer=streamer)
                try:
                    res = run(**run_options)
                except Exception:
                    exc = traceback.format_exc()
                    if stderr_path:
                        open(stderr_path, 'w+').write(exc)
                    else:
                        sys.stderr.write(exc)
                    return 1
            return (res.rc)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        return (1)

    if vargs.get('command') == 'stop':
        Runner.handle_termination(pid, pidfile=pidfile)
        return (0)

    elif vargs.get('command') == 'is-alive':
        try:
            os.kill(pid, signal.SIG_DFL)
            return (0)
        except OSError:
            return (1)
Esempio n. 23
0
def webserver(args):
    """Starts Airflow Webserver"""
    print(settings.HEADER)

    access_logfile = args.access_logfile or conf.get('webserver',
                                                     'access_logfile')
    error_logfile = args.error_logfile or conf.get('webserver',
                                                   'error_logfile')
    num_workers = args.workers or conf.get('webserver', 'workers')
    worker_timeout = args.worker_timeout or conf.get(
        'webserver', 'web_server_worker_timeout')
    ssl_cert = args.ssl_cert or conf.get('webserver', 'web_server_ssl_cert')
    ssl_key = args.ssl_key or conf.get('webserver', 'web_server_ssl_key')
    if not ssl_cert and ssl_key:
        raise AirflowException(
            'An SSL certificate must also be provided for use with ' + ssl_key)
    if ssl_cert and not ssl_key:
        raise AirflowException(
            'An SSL key must also be provided for use with ' + ssl_cert)

    if args.debug:
        print(
            f"Starting the web server on port {args.port} and host {args.hostname}."
        )
        app = create_app(testing=conf.getboolean('core', 'unit_test_mode'))
        app.run(
            debug=True,
            use_reloader=not app.config['TESTING'],
            port=args.port,
            host=args.hostname,
            ssl_context=(ssl_cert, ssl_key) if ssl_cert and ssl_key else None,
        )
    else:
        # This pre-warms the cache, and makes possible errors
        # get reported earlier (i.e. before demonization)
        os.environ['SKIP_DAGS_PARSING'] = 'True'
        app = cached_app(None)
        os.environ.pop('SKIP_DAGS_PARSING')

        pid_file, stdout, stderr, log_file = setup_locations(
            "webserver", args.pid, args.stdout, args.stderr, args.log_file)

        # Check if webserver is already running if not, remove old pidfile
        check_if_pidfile_process_is_running(pid_file=pid_file,
                                            process_name="webserver")

        print(
            textwrap.dedent('''\
                Running the Gunicorn Server with:
                Workers: {num_workers} {workerclass}
                Host: {hostname}:{port}
                Timeout: {worker_timeout}
                Logfiles: {access_logfile} {error_logfile}
                =================================================================\
            '''.format(
                num_workers=num_workers,
                workerclass=args.workerclass,
                hostname=args.hostname,
                port=args.port,
                worker_timeout=worker_timeout,
                access_logfile=access_logfile,
                error_logfile=error_logfile,
            )))

        run_args = [
            'gunicorn',
            '--workers',
            str(num_workers),
            '--worker-class',
            str(args.workerclass),
            '--timeout',
            str(worker_timeout),
            '--bind',
            args.hostname + ':' + str(args.port),
            '--name',
            'airflow-webserver',
            '--pid',
            pid_file,
            '--config',
            'python:airflow.www.gunicorn_config',
        ]

        if args.access_logfile:
            run_args += ['--access-logfile', str(args.access_logfile)]

        if args.error_logfile:
            run_args += ['--error-logfile', str(args.error_logfile)]

        if args.daemon:
            run_args += ['--daemon']

        if ssl_cert:
            run_args += ['--certfile', ssl_cert, '--keyfile', ssl_key]

        run_args += ["airflow.www.app:cached_app()"]

        gunicorn_master_proc = None

        def kill_proc(signum, _):  # pylint: disable=unused-argument
            log.info("Received signal: %s. Closing gunicorn.", signum)
            gunicorn_master_proc.terminate()
            with suppress(TimeoutError):
                gunicorn_master_proc.wait(timeout=30)
            if gunicorn_master_proc.poll() is not None:
                gunicorn_master_proc.kill()
            sys.exit(0)

        def monitor_gunicorn(gunicorn_master_pid: int):
            # Register signal handlers
            signal.signal(signal.SIGINT, kill_proc)
            signal.signal(signal.SIGTERM, kill_proc)

            # These run forever until SIG{INT, TERM, KILL, ...} signal is sent
            GunicornMonitor(
                gunicorn_master_pid=gunicorn_master_pid,
                num_workers_expected=num_workers,
                master_timeout=conf.getint('webserver',
                                           'web_server_master_timeout'),
                worker_refresh_interval=conf.getint('webserver',
                                                    'worker_refresh_interval',
                                                    fallback=30),
                worker_refresh_batch_size=conf.getint(
                    'webserver', 'worker_refresh_batch_size', fallback=1),
                reload_on_plugin_change=conf.getboolean(
                    'webserver', 'reload_on_plugin_change', fallback=False),
            ).start()

        if args.daemon:
            handle = setup_logging(log_file)

            base, ext = os.path.splitext(pid_file)
            with open(stdout, 'w+') as stdout, open(stderr, 'w+') as stderr:
                ctx = daemon.DaemonContext(
                    pidfile=TimeoutPIDLockFile(f"{base}-monitor{ext}", -1),
                    files_preserve=[handle],
                    stdout=stdout,
                    stderr=stderr,
                )
                with ctx:
                    subprocess.Popen(run_args, close_fds=True)

                    # Reading pid of gunicorn master as it will be different that
                    # the one of process spawned above.
                    while True:
                        sleep(0.1)
                        gunicorn_master_proc_pid = read_pid_from_pidfile(
                            pid_file)
                        if gunicorn_master_proc_pid:
                            break

                    # Run Gunicorn monitor
                    gunicorn_master_proc = psutil.Process(
                        gunicorn_master_proc_pid)
                    monitor_gunicorn(gunicorn_master_proc.pid)

        else:
            gunicorn_master_proc = subprocess.Popen(run_args, close_fds=True)
            monitor_gunicorn(gunicorn_master_proc.pid)
Esempio n. 24
0
def main(sys_args=None):
    """Main entry point for ansible-runner executable

    When the ```ansible-runner``` command is executed, this function
    is the main entry point that is called and executed.

    :param sys_args: List of arguments to be parsed by the parser
    :type sys_args: list

    :returns: an instance of SystemExit
    :rtype: SystemExit
    """
    parser = argparse.ArgumentParser(
        description=
        "Use 'ansible-runner' (with no arguments) to see basic usage")

    parser.add_argument('--version', action='version', version=VERSION)

    # positional options

    parser.add_argument(
        "command",
        choices=["run", "start", "stop", "is-alive"],
        metavar="COMMAND",
        help="command directive for controlling ansible-runner execution "
        "(one of 'run', 'start', 'stop', 'is-alive')"
        #help="command directive controlling ansible-runner execution"
    )

    parser.add_argument(
        'private_data_dir',
        help="base directory cotnaining the ansible-runner metadata "
        "(project, inventory, env, etc)")

    # mutually exclusive group

    group = parser.add_mutually_exclusive_group()

    group.add_argument(
        "-p",
        "--playbook",
        default=DEFAULT_RUNNER_PLAYBOOK,
        help="invoke an Ansible playbook from the ansible-runner project "
        "(See Ansible Playbook Options below)")

    group.add_argument(
        "-m",
        "--module",
        default=DEFAULT_RUNNER_MODULE,
        help="invoke an Ansible module directly without a playbook "
        "(See Ansible Module Options below)")

    group.add_argument(
        "-r",
        "--role",
        default=DEFAULT_RUNNER_ROLE,
        help="invoke an Ansible role directly without a playbook "
        "(See Ansible Role Options below)")

    # ansible-runner options

    runner_group = parser.add_argument_group(
        "Ansible Runner Options",
        "configuration options for controlling the ansible-runner "
        "runtime environment.")

    runner_group.add_argument(
        "--debug",
        action="store_true",
        help="enable ansible-runner debug output logging (default=False)")

    runner_group.add_argument(
        "--logfile", help="log output messages to a file (default=None)")

    runner_group.add_argument(
        "-b",
        "--binary",
        default=DEFAULT_RUNNER_BINARY,
        help="specifies the full path pointing to the Ansible binaries "
        "(default={})".format(DEFAULT_RUNNER_BINARY))

    runner_group.add_argument(
        "-i",
        "--ident",
        default=DEFAULT_UUID,
        help="an identifier that will be used when generating the artifacts "
        "directory and can be used to uniquely identify a playbook run "
        "(default={})".format(DEFAULT_UUID))

    runner_group.add_argument(
        "--rotate-artifacts",
        default=0,
        type=int,
        help="automatically clean up old artifact directories after a given "
        "number have been created (default=0, disabled)")

    runner_group.add_argument(
        "--artifact-dir",
        help="optional path for the artifact root directory "
        "(default=<private_data_dir>/artifacts)")

    runner_group.add_argument(
        "--project-dir",
        help="optional path for the location of the playbook content directory "
        "(default=<private_data_dir/project)")

    runner_group.add_argument(
        "--inventory",
        help="optional path for the location of the inventory content directory "
        "(default=<private_data_dir>/inventory)")

    runner_group.add_argument(
        "-j",
        "--json",
        action="store_true",
        help="output the JSON event structure to stdout instead of "
        "Ansible output (default=False)")

    runner_group.add_argument(
        "--omit-event-data",
        action="store_true",
        help="Omits including extra event data in the callback payloads "
        "or the Runner payload data files "
        "(status and stdout still included)")

    runner_group.add_argument(
        "--only-failed-event-data",
        action="store_true",
        help="Only adds extra event data for failed tasks in the callback "
        "payloads or the Runner payload data files "
        "(status and stdout still included for other events)")

    runner_group.add_argument(
        "-q",
        "--quiet",
        action="store_true",
        help="disable all messages sent to stdout/stderr (default=False)")

    runner_group.add_argument(
        "-v",
        action="count",
        help="increase the verbosity with multiple v's (up to 5) of the "
        "ansible-playbook output (default=None)")

    # ansible options

    ansible_group = parser.add_argument_group(
        "Ansible Options",
        "control the ansible[-playbook] execution environment")

    ansible_group.add_argument(
        "--limit",
        help="matches Ansible's ```--limit``` parameter to further constrain "
        "the inventory to be used (default=None)")

    ansible_group.add_argument(
        "--cmdline",
        help="command line options to pass to ansible-playbook at "
        "execution time (default=None)")

    ansible_group.add_argument(
        "--hosts",
        help="define the set of hosts to execute against (default=None) "
        "Note: this parameter only works with -m or -r")

    ansible_group.add_argument(
        "--forks",
        help="matches Ansible's ```--forks``` parameter to set the number "
        "of conconurent processes (default=None)")

    # roles group

    roles_group = parser.add_argument_group(
        "Ansible Role Options",
        "configuration options for directly executing Ansible roles")

    roles_group.add_argument(
        "--roles-path",
        default=DEFAULT_ROLES_PATH,
        help="path used to locate the role to be executed (default=None)")

    roles_group.add_argument(
        "--role-vars",
        help="set of variables to be passed to the role at run time in the "
        "form of 'key1=value1 key2=value2 keyN=valueN'(default=None)")

    roles_group.add_argument(
        "--role-skip-facts",
        action="store_true",
        default=False,
        help="disable fact collection when the role is executed (default=False)"
    )

    # modules groups

    modules_group = parser.add_argument_group(
        "Ansible Module Options",
        "configuration options for directly executing Ansible modules")

    modules_group.add_argument(
        "-a",
        "--args",
        dest='module_args',
        help="set of arguments to be passed to the module at run time in the "
        "form of 'key1=value1 key2=value2 keyN=valueN'(default=None)")

    # playbook options
    playbook_group = parser.add_argument_group(
        "Ansible Playbook Options",
        "configuation options for executing Ansible playbooks")

    playbook_group.add_argument(
        "--process-isolation",
        dest="process_isolation",
        action="store_true",
        help="limits what directories on the filesystem the playbook run "
        "has access to, defaults to /tmp (default=False)")

    playbook_group.add_argument(
        "--process-isolation-executable",
        dest="process_isolation_executable",
        default="bwrap",
        help="process isolation executable that will be used. (default=bwrap)")

    playbook_group.add_argument(
        "--process-isolation-path",
        dest="process_isolation_path",
        default="/tmp",
        help="path that an isolated playbook run will use for staging. "
        "(default=/tmp)")

    playbook_group.add_argument(
        "--process-isolation-hide-paths",
        dest="process_isolation_hide_paths",
        nargs='*',
        help="list of paths on the system that should be hidden from the "
        "playbook run (default=None)")

    playbook_group.add_argument(
        "--process-isolation-show-paths",
        dest="process_isolation_show_paths",
        nargs='*',
        help="list of paths on the system that should be exposed to the "
        "playbook run (default=None)")

    playbook_group.add_argument(
        "--process-isolation-ro-paths",
        dest="process_isolation_ro_paths",
        nargs='*',
        help="list of paths on the system that should be exposed to the "
        "playbook run as read-only (default=None)")

    playbook_group.add_argument(
        "--directory-isolation-base-path",
        dest="directory_isolation_base_path",
        help="copies the project directory to a location in this directory "
        "to prevent multiple simultaneous executions from conflicting "
        "(default=None)")

    playbook_group.add_argument(
        "--resource-profiling",
        dest='resource_profiling',
        action="store_true",
        help="Records resource utilization during playbook execution")

    playbook_group.add_argument(
        "--resource-profiling-base-cgroup",
        dest='resource_profiling_base_cgroup',
        default="ansible-runner",
        help=
        "Top-level cgroup used to collect information on resource utilization. Defaults to ansible-runner"
    )

    playbook_group.add_argument(
        "--resource-profiling-cpu-poll-interval",
        dest='resource_profiling_cpu_poll_interval',
        default=0.25,
        help=
        "Interval (in seconds) between CPU polling for determining CPU usage. Defaults to 0.25"
    )

    playbook_group.add_argument(
        "--resource-profiling-memory-poll-interval",
        dest='resource_profiling_memory_poll_interval',
        default=0.25,
        help=
        "Interval (in seconds) between memory polling for determining memory usage. Defaults to 0.25"
    )

    playbook_group.add_argument(
        "--resource-profiling-pid-poll-interval",
        dest='resource_profiling_pid_poll_interval',
        default=0.25,
        help=
        "Interval (in seconds) between polling PID count for determining number of processes used. Defaults to 0.25"
    )

    playbook_group.add_argument(
        "--resource-profiling-results-dir",
        dest='resource_profiling_results_dir',
        help=
        "Directory where profiling data files should be saved. Defaults to None (profiling_data folder under private data dir is used in this case)."
    )

    if len(sys.argv) == 1:
        parser.print_usage()
        print_common_usage()
        parser.exit(status=0)

    args = parser.parse_args(sys_args)

    if args.command in ('start', 'run'):
        if args.hosts and not (args.module or args.role):
            parser.exit(
                status=1,
                message="The --hosts option can only be used with -m or -r\n")
        if not (args.module or args.role) and not args.playbook:
            parser.exit(
                status=1,
                message=
                "The -p option must be specified when not using -m or -r\n")

    output.configure()

    # enable or disable debug mode
    output.set_debug('enable' if args.debug else 'disable')

    # set the output logfile
    if args.logfile:
        output.set_logfile(args.logfile)

    output.debug('starting debug logging')

    # get the absolute path for start since it is a daemon
    args.private_data_dir = os.path.abspath(args.private_data_dir)

    pidfile = os.path.join(args.private_data_dir, 'pid')

    try:
        os.makedirs(args.private_data_dir, mode=0o700)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(args.private_data_dir):
            pass
        else:
            raise

    stderr_path = None
    context = None
    if args.command != 'run':
        stderr_path = os.path.join(args.private_data_dir, 'daemon.log')
        if not os.path.exists(stderr_path):
            os.close(
                os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))

    if args.command in ('start', 'run'):

        if args.command == 'start':
            import daemon
            from daemon.pidfile import TimeoutPIDLockFile
            context = daemon.DaemonContext(pidfile=TimeoutPIDLockFile(pidfile))
        else:
            context = threading.Lock()

        with context:
            with role_manager(args) as args:
                run_options = dict(
                    private_data_dir=args.private_data_dir,
                    ident=args.ident,
                    binary=args.binary,
                    playbook=args.playbook,
                    module=args.module,
                    module_args=args.module_args,
                    host_pattern=args.hosts,
                    verbosity=args.v,
                    quiet=args.quiet,
                    rotate_artifacts=args.rotate_artifacts,
                    ignore_logging=False,
                    json_mode=args.json,
                    omit_event_data=args.omit_event_data,
                    only_failed_event_data=args.only_failed_event_data,
                    inventory=args.inventory,
                    forks=args.forks,
                    project_dir=args.project_dir,
                    artifact_dir=args.artifact_dir,
                    roles_path=[args.roles_path] if args.roles_path else None,
                    process_isolation=args.process_isolation,
                    process_isolation_executable=args.
                    process_isolation_executable,
                    process_isolation_path=args.process_isolation_path,
                    process_isolation_hide_paths=args.
                    process_isolation_hide_paths,
                    process_isolation_show_paths=args.
                    process_isolation_show_paths,
                    process_isolation_ro_paths=args.process_isolation_ro_paths,
                    directory_isolation_base_path=args.
                    directory_isolation_base_path,
                    resource_profiling=args.resource_profiling,
                    resource_profiling_base_cgroup=args.
                    resource_profiling_base_cgroup,
                    resource_profiling_cpu_poll_interval=args.
                    resource_profiling_cpu_poll_interval,
                    resource_profiling_memory_poll_interval=args.
                    resource_profiling_memory_poll_interval,
                    resource_profiling_pid_poll_interval=args.
                    resource_profiling_pid_poll_interval,
                    resource_profiling_results_dir=args.
                    resource_profiling_results_dir,
                    limit=args.limit)
                if args.cmdline:
                    run_options['cmdline'] = args.cmdline

                try:
                    res = run(**run_options)
                except Exception:
                    exc = traceback.format_exc()
                    if stderr_path:
                        open(stderr_path, 'w+').write(exc)
                    else:
                        sys.stderr.write(exc)
                    return 1
            return (res.rc)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        return (1)

    if args.command == 'stop':
        Runner.handle_termination(pid, pidfile=pidfile)
        return (0)

    elif args.command == 'is-alive':
        try:
            os.kill(pid, signal.SIG_DFL)
            return (0)
        except OSError:
            return (1)
Esempio n. 25
0
def main():
    args = parse_argv(sys.argv[1:])

    if args.version:
        print('Sendria %s' % __version__)
        sys.exit(0)

    # Do we just want to stop a running daemon?
    if args.stop:
        logger.get().msg(
            'stopping Sendria',
            debug='enabled' if args.debug else 'disabled',
            pidfile=str(args.pidfile) if args.pidfile else None,
        )
        stop(args.pidfile)
        sys.exit(0)

    logger.get().msg(
        'starting Sendria',
        debug='enabled' if args.debug else 'disabled',
        pidfile=str(args.pidfile) if args.pidfile else None,
        db=str(args.db),
        foreground='true' if args.foreground else 'false',
    )

    # Check if the static folder is writable
    if args.autobuild_assets and not os.access(STATIC_DIR, os.W_OK):
        exit_err('autobuilding assets requires write access to %s' %
                 STATIC_DIR)

    if not args.autobuild_assets and (not ASSETS_DIR.exists()
                                      or not list(ASSETS_DIR.glob('*'))):
        exit_err(
            'assets not found. Generate assets using: webassets -m sendria.build_assets build',
            0)

    daemon_kw = {}

    if args.foreground:
        # Do not detach and keep std streams open
        daemon_kw.update({
            'detach_process': False,
            'stdin': sys.stdin,
            'stdout': sys.stdout,
            'stderr': sys.stderr,
        })

    if args.pidfile:
        if args.pidfile.exists():
            pid = read_pidfile(args.pidfile)
            if not pid_exists(pid):
                logger.get().msg(
                    'deleting obsolete PID file (process %s does not exist)' %
                    pid,
                    pid=pid)
                args.pidfile.unlink()
        daemon_kw['pidfile'] = TimeoutPIDLockFile(str(args.pidfile), 5)

    # Unload threading module to avoid error on exit (it's loaded by lockfile)
    if 'threading' in sys.modules:
        del sys.modules['threading']

    context = daemon.DaemonContext(**daemon_kw)
    with context:
        loop = asyncio.get_event_loop()

        run_sendria_servers(loop, args)

        loop.run_forever()

    logger.get().msg('stop signal received')
    loop.close()

    logger.get().msg('terminating')
    sys.exit(0)
Esempio n. 26
0
def main(sys_args=None):
    parser = argparse.ArgumentParser(description='manage ansible execution')

    parser.add_argument('--version', action='version', version=VERSION)

    parser.add_argument('command',
                        choices=['run', 'start', 'stop', 'is-alive'])

    parser.add_argument(
        'private_data_dir',
        help=
        'Base directory containing Runner metadata (project, inventory, etc')

    group = parser.add_mutually_exclusive_group()

    group.add_argument(
        "-m",
        "--module",
        default=DEFAULT_RUNNER_MODULE,
        help="Invoke an Ansible module directly without a playbook")

    group.add_argument("-p",
                       "--playbook",
                       default=DEFAULT_RUNNER_PLAYBOOK,
                       help="The name of the playbook to execute")

    group.add_argument(
        "-r",
        "--role",
        default=DEFAULT_RUNNER_ROLE,
        help="Invoke an Ansible role directly without a playbook")

    parser.add_argument("-b",
                        "--binary",
                        default=DEFAULT_RUNNER_BINARY,
                        help="The full path to ansible[-playbook] binary")

    parser.add_argument("--hosts",
                        help="Define the set of hosts to execute against")

    parser.add_argument(
        "-i",
        "--ident",
        default=uuid4(),
        help="An identifier that will be used when generating the"
        "artifacts directory and can be used to uniquely identify a playbook run"
    )

    parser.add_argument(
        "--rotate-artifacts",
        default=0,
        type=int,
        help=
        "Automatically clean up old artifact directories after a given number has been created, the default is 0 which disables rotation"
    )

    parser.add_argument("--roles-path",
                        default=DEFAULT_ROLES_PATH,
                        help="Path to the Ansible roles directory")

    parser.add_argument("--role-vars",
                        help="Variables to pass to the role at runtime")

    parser.add_argument(
        "--role-skip-facts",
        action="store_true",
        default=False,
        help="Disable fact collection when executing a role directly")

    parser.add_argument(
        "--artifact-dir",
        help=
        "Optional Path for the artifact root directory, by default it is located inside the private data dir"
    )

    parser.add_argument(
        "--project-dir",
        help=
        "Optional Path for the location of the playbook content directory, by default this is 'project' inside the private data dir"
    )

    parser.add_argument(
        "--inventory",
        help="Override the default inventory location in private_data_dir")

    parser.add_argument("--forks", help="Set Ansible concurrency via Forks")

    parser.add_argument(
        "-j",
        "--json",
        action="store_true",
        help=
        "Output the json event structure to stdout instead of Ansible output")

    parser.add_argument(
        "-v",
        action="count",
        help=
        "Increase the verbosity with multiple v's (up to 5) of the ansible-playbook output"
    )

    parser.add_argument("-q",
                        "--quiet",
                        action="store_true",
                        help="Disable all output")

    parser.add_argument(
        "--cmdline",
        help=
        "Command line options to pass to ansible-playbook at execution time")

    parser.add_argument("--debug",
                        action="store_true",
                        help="Enable Runner debug output logging")

    parser.add_argument("--logfile", help="Log output messages to a file")

    parser.add_argument("-a",
                        "--args",
                        dest='module_args',
                        help="Module arguments")

    parser.add_argument(
        "--process-isolation",
        dest='process_isolation',
        action="store_true",
        help=
        "Limits what directories on the filesystem the playbook run has access to, defaults to /tmp"
    )

    parser.add_argument(
        "--process-isolation-executable",
        dest='process_isolation_executable',
        default="bwrap",
        help="Process isolation executable that will be used. Defaults to bwrap"
    )

    parser.add_argument(
        "--process-isolation-path",
        dest='process_isolation_path',
        default="/tmp",
        help=
        "Path that an isolated playbook run will use for staging. Defaults to /tmp"
    )

    parser.add_argument(
        "--process-isolation-hide-paths",
        dest='process_isolation_hide_paths',
        help=
        "List of paths on the system that should be hidden from the playbook run"
    )

    parser.add_argument(
        "--process-isolation-show-paths",
        dest='process_isolation_show_paths',
        help=
        "List of paths on the system that should be exposed to the playbook run"
    )

    parser.add_argument(
        "--process-isolation-ro-paths",
        dest='process_isolation_ro_paths',
        help=
        "List of paths on the system that should be exposed to the playbook run as read-only"
    )

    parser.add_argument(
        "--directory-isolation-base-path",
        dest='directory_isolation_base_path',
        help=
        "Copies the project directory to a location in this directory to prevent multiple simultaneous executions from conflicting"
    )

    parser.add_argument(
        "--limit",
        help=
        "Matches ansible's ``--limit`` parameter to further constrain the inventory to be used"
    )

    args = parser.parse_args(sys_args)

    output.configure()

    # enable or disable debug mode
    output.set_debug('enable' if args.debug else 'disable')

    # set the output logfile
    if args.logfile:
        output.set_logfile(args.logfile)

    output.debug('starting debug logging')

    # get the absolute path for start since it is a daemon
    args.private_data_dir = os.path.abspath(args.private_data_dir)

    pidfile = os.path.join(args.private_data_dir, 'pid')

    try:
        os.makedirs(args.private_data_dir, mode=0o700)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(args.private_data_dir):
            pass
        else:
            raise

    stderr_path = None
    if args.command != 'run':
        stderr_path = os.path.join(args.private_data_dir, 'daemon.log')
        if not os.path.exists(stderr_path):
            os.close(
                os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))

    if args.command in ('start', 'run'):

        if args.command == 'start':
            import daemon
            from daemon.pidfile import TimeoutPIDLockFile
            context = daemon.DaemonContext(pidfile=TimeoutPIDLockFile(pidfile))
        else:
            context = threading.Lock()

        with context:
            with role_manager(args) as args:
                run_options = dict(
                    private_data_dir=args.private_data_dir,
                    ident=args.ident,
                    binary=args.binary,
                    playbook=args.playbook,
                    module=args.module,
                    module_args=args.module_args,
                    host_pattern=args.hosts,
                    verbosity=args.v,
                    quiet=args.quiet,
                    rotate_artifacts=args.rotate_artifacts,
                    ignore_logging=False,
                    json_mode=args.json,
                    inventory=args.inventory,
                    forks=args.forks,
                    project_dir=args.project_dir,
                    roles_path=[args.roles_path] if args.roles_path else None,
                    process_isolation=args.process_isolation,
                    process_isolation_executable=args.
                    process_isolation_executable,
                    process_isolation_path=args.process_isolation_path,
                    process_isolation_hide_paths=args.
                    process_isolation_hide_paths,
                    process_isolation_show_paths=args.
                    process_isolation_show_paths,
                    process_isolation_ro_paths=args.process_isolation_ro_paths,
                    directory_isolation_base_path=args.
                    directory_isolation_base_path,
                    limit=args.limit)
                if args.cmdline:
                    run_options['cmdline'] = args.cmdline

                try:
                    res = run(**run_options)
                except Exception:
                    exc = traceback.format_exc()
                    if stderr_path:
                        open(stderr_path, 'w+').write(exc)
                    else:
                        sys.stderr.write(exc)
                    return 1
            return (res.rc)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        return (1)

    if args.command == 'stop':
        Runner.handle_termination(pid)
        return (0)

    elif args.command == 'is-alive':
        try:
            os.kill(pid, signal.SIG_DFL)
            return (0)
        except OSError:
            return (1)
Esempio n. 27
0
def main():
    #  Logger
    _logger = logger.Logger('middleware')
    get_logger = _logger.getLogger()

    # Workaround for development
    modpath = os.path.realpath(os.path.join(
        os.path.dirname(os.path.realpath(__file__)),
        '..',
    ))
    if modpath not in sys.path:
        sys.path.insert(0, modpath)

    parser = argparse.ArgumentParser()
    parser.add_argument('restart', nargs='?')
    parser.add_argument('--foreground', '-f', action='store_true')
    parser.add_argument('--debug-level', default='DEBUG', choices=[
        'DEBUG',
        'INFO',
        'WARN',
        'ERROR',
    ])
    parser.add_argument('--log-handler', choices=[
        'console',
        'file',
    ])
    args = parser.parse_args()

    if args.log_handler:
        log_handlers = [args.log_handler]
    else:
        log_handlers = ['console' if args.foreground else 'file']

    pidpath = '/var/run/middlewared.pid'

    if args.restart:
        if os.path.exists(pidpath):
            with open(pidpath, 'r') as f:
                pid = int(f.read().strip())
            os.kill(pid, 15)

    if not args.foreground:
        _logger.configure_logging('file')
        daemonc = DaemonContext(
            pidfile=TimeoutPIDLockFile(pidpath),
            detach_process=True,
            stdout=logger.LoggerStream(get_logger),
            stderr=logger.LoggerStream(get_logger),
        )
        daemonc.open()
    elif 'file' in log_handlers:
        _logger.configure_logging('file')
        sys.stdout = logger.LoggerStream(get_logger)
        sys.stderr = logger.LoggerStream(get_logger)
    elif 'console' in log_handlers:
        _logger.configure_logging('console')
    else:
        _logger.configure_logging('file')

    setproctitle.setproctitle('middlewared')
    # Workaround to tell django to not set up logging on its own
    os.environ['MIDDLEWARED'] = str(os.getpid())

    Middleware().run()
    if not args.foreground:
        daemonc.close()
Esempio n. 28
0
def main():
    parser = argparse.ArgumentParser(description='manage ansible execution')

    parser.add_argument('--version', action='version', version=VERSION)

    parser.add_argument('command', choices=['run', 'start', 'stop', 'is-alive'])

    parser.add_argument('private_data_dir',
                        help='Base directory containing Runner metadata (project, inventory, etc')

    group = parser.add_mutually_exclusive_group()

    group.add_argument("-p", "--playbook", default=DEFAULT_RUNNER_PLAYBOOK,
                       help="The name of the playbook to execute")

    group.add_argument("-r", "--role", default=DEFAULT_RUNNER_ROLE,
                       help="Invoke an Ansible role directly without a playbook")

    parser.add_argument("--hosts",
                        help="Define the set of hosts to execute against")

    parser.add_argument("-i", "--ident",
                        default=uuid4(),
                        help="An identifier that will be used when generating the"
                             "artifacts directory and can be used to uniquely identify a playbook run")

    parser.add_argument("--roles-path", default=DEFAULT_ROLES_PATH,
                        help="Path to the Ansible roles directory")

    parser.add_argument("--role-vars",
                        help="Variables to pass to the role at runtime")

    parser.add_argument("--role-skip-facts", action="store_true", default=False,
                        help="Disable fact collection when executing a role directly")

    parser.add_argument("--inventory")
    parser.add_argument("-j", "--json", action="store_true",
                        help="Output the json event structure to stdout instead of Ansible output")

    parser.add_argument("-v", action="count",
                        help="Increase the verbosity with multiple v's (up to 5) of the ansible-playbook output")

    args = parser.parse_args()

    pidfile = os.path.join(args.private_data_dir, 'pid')

    try:
        os.makedirs(args.private_data_dir)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(args.private_data_dir):
            pass
        else:
            raise

    if args.command != 'run':
        stderr_path = os.path.join(args.private_data_dir, 'daemon.log')
        if not os.path.exists(stderr_path):
            os.close(os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
        stderr = open(stderr_path, 'w+')

    if args.command in ('start', 'run'):
        if args.role:

            role = {'name': args.role}
            if args.role_vars:
                role_vars = {}
                for item in shlex.split(args.role_vars):
                    key, value = item.split('=')
                    role_vars[key] = value
                role['vars'] = role_vars

            kwargs = dict(private_data_dir=args.private_data_dir,
                          json_mode=args.json)

            playbook = None
            tmpvars = None

            rc = 255

            try:
                play = [{'hosts': args.hosts if args.hosts is not None else "all",
                         'gather_facts': not args.role_skip_facts,
                         'roles': [role]}]

                path = os.path.abspath(os.path.join(args.private_data_dir, 'project'))
                filename = str(uuid4().hex)

                playbook = dump_artifact(json.dumps(play), path, filename)
                kwargs['playbook'] = playbook
                print('using playbook file %s' % playbook)

                if args.inventory:
                    inventory_file = os.path.abspath(os.path.join(args.private_data_dir, 'inventory', args.inventory))
                    kwargs['inventory'] = inventory_file
                    print('using inventory file %s' % inventory_file)

                envvars = {}

                roles_path = args.roles_path or os.path.join(args.private_data_dir, 'roles')
                roles_path = os.path.abspath(roles_path)
                print('setting ANSIBLE_ROLES_PATH to %s' % roles_path)

                # since envvars will overwrite an existing envvars, capture
                # the content of the current envvars if it exists and
                # restore it once done
                envvars = {}
                curvars = os.path.join(args.private_data_dir, 'env/envvars')
                if os.path.exists(curvars):
                    with open(curvars, 'rb') as f:
                        tmpvars = f.read()
                        envvars = safe_load(tmpvars)
                envvars['ANSIBLE_ROLES_PATH'] = roles_path
                kwargs['envvars'] = envvars

                res = run(**kwargs)
                rc = res.rc

            finally:
                if playbook and os.path.isfile(playbook):
                    os.remove(playbook)

                # if a previous envvars existed in the private_data_dir,
                # restore the original file contents
                if tmpvars:
                    with open(curvars, 'wb') as f:
                        f.write(tmpvars)

            sys.exit(rc)

        elif args.command == 'start':
            import daemon
            from daemon.pidfile import TimeoutPIDLockFile
            context = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pidfile),
                stderr=stderr
            )
        else:
            context = threading.Lock()

        with context:
            run_options = dict(private_data_dir=args.private_data_dir,
                               ident=args.ident,
                               playbook=args.playbook,
                               verbosity=args.v,
                               json_mode=args.json)
            if args.hosts is not None:
                run_options.update(inventory=args.hosts)
            run(**run_options)
            sys.exit(0)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        sys.exit(1)

    if args.command == 'stop':
        try:
            with open(os.path.join(args.private_data_dir, 'args'), 'r') as args:
                Runner.handle_termination(pid, json.load(args), 'bwrap')
        except IOError:
            Runner.handle_termination(pid, [], 'bwrap')

    elif args.command == 'is-alive':
        try:
            os.kill(pid, signal.SIG_DFL)
            sys.exit(0)
        except OSError:
            sys.exit(1)
Esempio n. 29
0
File: run.py Progetto: MBcom/awx
    args = parser.parse_args()

    private_data_dir = args.private_data_dir
    pidfile = os.path.join(private_data_dir, 'pid')

    if args.command == 'start':
        # create a file to log stderr in case the daemonized process throws
        # an exception before it gets to `pexpect.spawn`
        stderr_path = os.path.join(private_data_dir, 'artifacts', 'daemon.log')
        if not os.path.exists(stderr_path):
            os.mknod(stderr_path, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR)
        stderr = open(stderr_path, 'w+')

        import daemon
        from daemon.pidfile import TimeoutPIDLockFile
        context = daemon.DaemonContext(pidfile=TimeoutPIDLockFile(pidfile),
                                       stderr=stderr)
        with context:
            __run__(private_data_dir)
        sys.exit(0)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        sys.exit(1)

    if args.command == 'stop':
        try:
            with open(os.path.join(private_data_dir, 'args'), 'r') as args:
                handle_termination(pid, json.load(args), 'bwrap')
Esempio n. 30
0
def main():
    parser = argparse.ArgumentParser(description='manage ansible execution')

    parser.add_argument('--version', action='version', version=VERSION)

    parser.add_argument('command', choices=['run', 'start', 'stop', 'is-alive'])

    parser.add_argument('private_data_dir',
                        help='Base directory containing Runner metadata (project, inventory, etc')

    group = parser.add_mutually_exclusive_group()

    group.add_argument("-p", "--playbook", default=DEFAULT_RUNNER_PLAYBOOK,
                       help="The name of the playbook to execute")

    group.add_argument("-r", "--role", default=DEFAULT_RUNNER_ROLE,
                       help="Invoke an Ansible role directly without a playbook")

    parser.add_argument("--hosts",
                        help="Define the set of hosts to execute against")

    parser.add_argument("-i", "--ident",
                        default=uuid4(),
                        help="An identifier that will be used when generating the"
                             "artifacts directory and can be used to uniquely identify a playbook run")

    parser.add_argument("--roles-path", default=DEFAULT_ROLES_PATH,
                        help="Path to the Ansible roles directory")

    parser.add_argument("--role-vars",
                        help="Variables to pass to the role at runtime")

    parser.add_argument("--role-skip-facts", action="store_true", default=False,
                        help="Disable fact collection when executing a role directly")
    parser.add_argument("--artifact-dir",
                        help="Optional Path for the artifact root directory, by default it is located inside the private data dir")

    parser.add_argument("--inventory",
                        help="Override the default inventory location in private_data_dir")

    parser.add_argument("-j", "--json", action="store_true",
                        help="Output the json event structure to stdout instead of Ansible output")

    parser.add_argument("-v", action="count",
                        help="Increase the verbosity with multiple v's (up to 5) of the ansible-playbook output")

    parser.add_argument("-q", "--quiet", action="store_true",
                        help="Disable all output")

    parser.add_argument("--cmdline",
                        help="Command line options to pass to ansible-playbook at execution time")
    parser.add_argument("--debug", action="store_true",
                        help="Enable debug output logging")

    parser.add_argument("--logfile",
                        help="Log output messages to a file")

    args = parser.parse_args()

    output.configure()

    # enable or disable debug mode
    output.set_debug('enable' if args.debug else 'disable')

    # set the output logfile
    if args.logfile:
        output.set_logfile(args.logfile)

    output.debug('starting debug logging')

    pidfile = os.path.join(args.private_data_dir, 'pid')

    try:
        os.makedirs(args.private_data_dir)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(args.private_data_dir):
            pass
        else:
            raise

    if args.command != 'run':
        stderr_path = os.path.join(args.private_data_dir, 'daemon.log')
        if not os.path.exists(stderr_path):
            os.close(os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
        stderr = open(stderr_path, 'w+')

    if args.command in ('start', 'run'):
        if args.role:
            role = {'name': args.role}
            if args.role_vars:
                role_vars = {}
                for item in shlex.split(args.role_vars):
                    key, value = item.split('=')
                    role_vars[key] = value
                role['vars'] = role_vars

            kwargs = dict(private_data_dir=args.private_data_dir,
                          json_mode=args.json)
            if args.artifact_dir:
                kwargs['artifact_dir'] = args.artifact_dir

            project_path = os.path.abspath(os.path.join(args.private_data_dir, 'project'))
            project_exists = os.path.exists(project_path)

            env_path = os.path.join(args.private_data_dir, 'env')
            env_exists = os.path.exists(env_path)

            envvars_path = os.path.join(args.private_data_dir, 'env/envvars')
            envvars_exists = os.path.exists(envvars_path)

            if args.cmdline:
                kwargs['cmdline'] = args.cmdline

            playbook = None
            tmpvars = None

            rc = 255
            errmsg = None

            try:
                play = [{'hosts': args.hosts if args.hosts is not None else "all",
                         'gather_facts': not args.role_skip_facts,
                         'roles': [role]}]

                filename = str(uuid4().hex)

                playbook = dump_artifact(json.dumps(play), project_path, filename)
                kwargs['playbook'] = playbook
                output.debug('using playbook file %s' % playbook)

                if args.inventory:
                    inventory_file = os.path.abspath(os.path.join(args.private_data_dir, 'inventory', args.inventory))
                    if not os.path.exists(inventory_file):
                        raise AnsibleRunnerException('location specified by --inventory does not exist')
                    kwargs['inventory'] = inventory_file
                    output.debug('using inventory file %s' % inventory_file)

                roles_path = args.roles_path or os.path.join(args.private_data_dir, 'roles')
                roles_path = os.path.abspath(roles_path)
                output.debug('setting ANSIBLE_ROLES_PATH to %s' % roles_path)

                envvars = {}
                if envvars_exists:
                    with open(envvars_path, 'rb') as f:
                        tmpvars = f.read()
                        envvars = safe_load(tmpvars)

                envvars['ANSIBLE_ROLES_PATH'] = roles_path
                kwargs['envvars'] = envvars

                res = run(**kwargs)
                rc = res.rc

            except AnsibleRunnerException as exc:
                errmsg = str(exc)

            finally:
                if not project_exists and os.path.exists(project_path):
                    logger.debug('removing dynamically generated project folder')
                    shutil.rmtree(project_path)
                elif playbook and os.path.isfile(playbook):
                    logger.debug('removing dynamically generated playbook')
                    os.remove(playbook)

                # if a previous envvars existed in the private_data_dir,
                # restore the original file contents
                if tmpvars:
                    with open(envvars_path, 'wb') as f:
                        f.write(tmpvars)
                elif not envvars_exists and os.path.exists(envvars_path):
                    logger.debug('removing dynamically generated envvars folder')
                    os.remove(envvars_path)

                # since ansible-runner created the env folder, remove it
                if not env_exists and os.path.exists(env_path):
                    logger.debug('removing dynamically generated env folder')
                    shutil.rmtree(env_path)

            if errmsg:
                print('ansible-runner: ERROR: %s' % errmsg)

            sys.exit(rc)

        elif args.command == 'start':
            import daemon
            from daemon.pidfile import TimeoutPIDLockFile
            context = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pidfile),
                stderr=stderr
            )
        else:
            context = threading.Lock()

        with context:
            run_options = dict(private_data_dir=args.private_data_dir,
                               ident=args.ident,
                               playbook=args.playbook,
                               verbosity=args.v,
                               quiet=args.quiet,
                               json_mode=args.json)

            if args.hosts is not None:
                run_options.update(inventory=args.hosts)

            if args.cmdline:
                run_options['cmdline'] = args.cmdline

            run(**run_options)
            sys.exit(0)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        sys.exit(1)

    if args.command == 'stop':
        try:
            with open(os.path.join(args.private_data_dir, 'args'), 'r') as args:
                Runner.handle_termination(pid, json.load(args), 'bwrap')
        except IOError:
            Runner.handle_termination(pid, [], 'bwrap')

    elif args.command == 'is-alive':
        try:
            os.kill(pid, signal.SIG_DFL)
            sys.exit(0)
        except OSError:
            sys.exit(1)