Esempio n. 1
0
	def stop(self):
		pidFile = TimeoutPIDLockFile(self._pidFile)

		if not self._isRunningAndBreak(pidFile):
			raise NotRunning()

		pid = pidFile.read_pid()
		try:
			os.kill(pid, signal.SIGTERM)
		except:
			pass
Esempio n. 2
0
    def _daemonize(self):
        import psutil
        from daemon import DaemonContext
        try:
            from daemon.pidfile import TimeoutPIDLockFile as PIDLockFile
        except:
            from daemon.pidlockfile import PIDLockFile

        pidlock = PIDLockFile('/var/run/fedmsg/%s.pid' % self.name)

        pid = pidlock.read_pid()
        if pid and not psutil.pid_exists(pid):
            self.log.warn("PID file exists but with no proc:  coup d'etat!")
            pidlock.break_lock()

        output = file('/var/log/fedmsg/%s.log' % self.name, 'a')
        daemon = DaemonContext(pidfile=pidlock, stdout=output, stderr=output)
        daemon.terminate = self._handle_signal

        with daemon:
            return self.run()
Esempio n. 3
0
def kerberos(args):
    """Start a kerberos ticket renewer"""
    print(settings.HEADER)

    if args.daemon:
        pid, stdout, stderr, _ = setup_locations(
            "kerberos", args.pid, args.stdout, args.stderr, args.log_file
        )
        with open(stdout, 'w+') as stdout_handle, open(stderr, 'w+') as stderr_handle:
            ctx = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pid, -1),
                stdout=stdout_handle,
                stderr=stderr_handle,
            )

            with ctx:
                krb.run(principal=args.principal, keytab=args.keytab)
    else:
        krb.run(principal=args.principal, keytab=args.keytab)
def scheduler(args):
    """Starts Airflow Scheduler"""
    print(settings.HEADER)
    job = SchedulerJob(
        subdir=process_subdir(args.subdir),
        num_runs=args.num_runs,
        do_pickle=args.do_pickle,
    )
    scheduler_name = SchedulerFactory.get_scheduler_name()
    if scheduler_name == SchedulerFactory.DEFAULT_SCHEDULER:
        pass
    elif scheduler_name == SchedulerFactory.EVENT_BASED_SCHEDULER:
        job = EventBasedSchedulerJob(dag_directory=process_subdir(args.subdir),
                                     server_uri=args.server_uri)
    else:
        scheduler_class = SchedulerFactory.get_default_scheduler()
        job = scheduler_class()

    if args.daemon:
        pid, stdout, stderr, log_file = setup_locations(
            "scheduler", args.pid, args.stdout, args.stderr, args.log_file)
        handle = setup_logging(log_file)
        stdout = open(stdout, 'w+')
        stderr = open(stderr, 'w+')

        ctx = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pid, -1),
            files_preserve=[handle],
            stdout=stdout,
            stderr=stderr,
        )
        with ctx:
            job.run()

        stdout.close()
        stderr.close()
    else:
        signal.signal(signal.SIGINT, sigint_handler)
        signal.signal(signal.SIGTERM, sigint_handler)
        signal.signal(signal.SIGQUIT, sigquit_handler)
        job.run()
Esempio n. 5
0
 def start_daemon(pidfile_path, log_path, acquire_timeout=5):
     pidfile = TimeoutPIDLockFile(pidfile_path,
                                  acquire_timeout=acquire_timeout)
     if is_pidfile_stale(pidfile):
         pidfile.break_lock()
     if pidfile.is_locked():
         pid = pidfile.read_pid()
         if pid is not None:
             puts(colored.red('Already running at pid: %d' % pid))
         else:
             puts(colored.red('Already running'))
         return None
     logfile = open(log_path, 'w+t')
     puts(colored.blue('Starting'))
     return DaemonContext(pidfile=pidfile, stdout=logfile, stderr=logfile)
def flower(args):
    """Starts Flower, Celery monitoring tool"""
    options = [
        conf.get('celery', 'BROKER_URL'),
        f"--address={args.hostname}",
        f"--port={args.port}",
    ]

    if args.broker_api:
        options.append(f"--broker-api={args.broker_api}")

    if args.url_prefix:
        options.append(f"--url-prefix={args.url_prefix}")

    if args.basic_auth:
        options.append(f"--basic-auth={args.basic_auth}")

    if args.flower_conf:
        options.append(f"--conf={args.flower_conf}")

    flower_cmd = FlowerCommand()

    if args.daemon:
        pidfile, stdout, stderr, _ = setup_locations(
            process="flower",
            pid=args.pid,
            stdout=args.stdout,
            stderr=args.stderr,
            log=args.log_file,
        )
        with open(stdout, "w+") as stdout, open(stderr, "w+") as stderr:
            ctx = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pidfile, -1),
                stdout=stdout,
                stderr=stderr,
            )
            with ctx:
                flower_cmd.execute_from_commandline(argv=options)
    else:
        flower_cmd.execute_from_commandline(argv=options)
Esempio n. 7
0
def kerberos(args):  # noqa
    print(settings.HEADER)
    import airflow.security.kerberos

    if args.daemon:
        pid, stdout, stderr, log_file = setup_locations("kerberos", args.pid, args.stdout, args.stderr, args.log_file)
        stdout = open(stdout, 'w+')
        stderr = open(stderr, 'w+')

        ctx = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pid, -1),
            stdout=stdout,
            stderr=stderr,
        )

        with ctx:
            airflow.security.kerberos.run()

        stdout.close()
        stderr.close()
    else:
        airflow.security.kerberos.run()
Esempio n. 8
0
def triggerer(args):
    """Starts Airflow Triggerer"""
    settings.MASK_SECRETS_IN_LOGS = True
    print(settings.HEADER)
    job = TriggererJob(capacity=args.capacity)

    if args.daemon:
        pid, stdout, stderr, log_file = setup_locations(
            "triggerer", args.pid, args.stdout, args.stderr, args.log_file)
        handle = setup_logging(log_file)
        with open(stdout, 'w+') as stdout_handle, open(stderr,
                                                       'w+') as stderr_handle:
            ctx = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pid, -1),
                files_preserve=[handle],
                stdout=stdout_handle,
                stderr=stderr_handle,
            )
            with ctx:
                job.run()

    else:
        # There is a bug in CPython (fixed in March 2022 but not yet released) that
        # makes async.io handle SIGTERM improperly by using async unsafe
        # functions and hanging the triggerer receive SIGPIPE while handling
        # SIGTERN/SIGINT and deadlocking itself. Until the bug is handled
        # we should rather rely on standard handling of the signals rather than
        # adding our own signal handlers. Seems that even if our signal handler
        # just run exit(0) - it caused a race condition that led to the hanging.
        #
        # More details:
        #   * https://bugs.python.org/issue39622
        #   * https://github.com/python/cpython/issues/83803
        #
        # signal.signal(signal.SIGINT, sigint_handler)
        # signal.signal(signal.SIGTERM, sigint_handler)
        signal.signal(signal.SIGQUIT, sigquit_handler)
        job.run()
Esempio n. 9
0
def scheduler(args):
    """Starts Airflow Scheduler"""
    print(settings.HEADER)

    if args.daemon:
        pid, stdout, stderr, log_file = setup_locations(
            "scheduler", args.pid, args.stdout, args.stderr, args.log_file
        )
        handle = setup_logging(log_file)
        with open(stdout, 'w+') as stdout_handle, open(stderr, 'w+') as stderr_handle:
            ctx = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pid, -1),
                files_preserve=[handle],
                stdout=stdout_handle,
                stderr=stderr_handle,
            )
            with ctx:
                _run_scheduler_job(args=args)
    else:
        signal.signal(signal.SIGINT, sigint_handler)
        signal.signal(signal.SIGTERM, sigint_handler)
        signal.signal(signal.SIGQUIT, sigquit_handler)
        _run_scheduler_job(args=args)
Esempio n. 10
0
def flower(args):
    broka = conf.get('celery', 'BROKER_URL')
    address = '--address={}'.format(args.hostname)
    port = '--port={}'.format(args.port)
    api = ''
    if args.broker_api:
        api = '--broker_api=' + args.broker_api

    flower_conf = ''
    if args.flower_conf:
        flower_conf = '--conf=' + args.flower_conf

    if args.daemon:
        pid, stdout, stderr, log_file = setup_locations(
            "flower", args.pid, args.stdout, args.stderr, args.log_file)
        stdout = open(stdout, 'w+')
        stderr = open(stderr, 'w+')

        ctx = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pid, -1),
            stdout=stdout,
            stderr=stderr,
        )

        with ctx:
            os.execvp("flower",
                      ['flower', '-b', broka, address, port, api, flower_conf])

        stdout.close()
        stderr.close()
    else:
        signal.signal(signal.SIGINT, sigint_handler)
        signal.signal(signal.SIGTERM, sigint_handler)

        os.execvp("flower",
                  ['flower', '-b', broka, address, port, api, flower_conf])
Esempio n. 11
0
def main(sys_args=None):
    """Main entry point for ansible-runner executable

    When the ```ansible-runner``` command is executed, this function
    is the main entry point that is called and executed.

    :param sys_args: List of arguments to be parsed by the parser
    :type sys_args: list

    :returns: an instance of SystemExit
    :rtype: SystemExit
    """
    parser = argparse.ArgumentParser(
        description=
        "Use 'ansible-runner' (with no arguments) to see basic usage")

    parser.add_argument('--version', action='version', version=VERSION)

    # positional options

    parser.add_argument(
        "command",
        choices=["run", "start", "stop", "is-alive"],
        metavar="COMMAND",
        help="command directive for controlling ansible-runner execution "
        "(one of 'run', 'start', 'stop', 'is-alive')"
        #help="command directive controlling ansible-runner execution"
    )

    parser.add_argument(
        'private_data_dir',
        help="base directory cotnaining the ansible-runner metadata "
        "(project, inventory, env, etc)")

    # mutually exclusive group

    group = parser.add_mutually_exclusive_group()

    group.add_argument(
        "-p",
        "--playbook",
        default=DEFAULT_RUNNER_PLAYBOOK,
        help="invoke an Ansible playbook from the ansible-runner project "
        "(See Ansible Playbook Options below)")

    group.add_argument(
        "-m",
        "--module",
        default=DEFAULT_RUNNER_MODULE,
        help="invoke an Ansible module directly without a playbook "
        "(See Ansible Module Options below)")

    group.add_argument(
        "-r",
        "--role",
        default=DEFAULT_RUNNER_ROLE,
        help="invoke an Ansible role directly without a playbook "
        "(See Ansible Role Options below)")

    # ansible-runner options

    runner_group = parser.add_argument_group(
        "Ansible Runner Options",
        "configuration options for controlling the ansible-runner "
        "runtime environment.")

    runner_group.add_argument(
        "--debug",
        action="store_true",
        help="enable ansible-runner debug output logging (default=False)")

    runner_group.add_argument(
        "--logfile", help="log output messages to a file (default=None)")

    runner_group.add_argument(
        "-b",
        "--binary",
        default=DEFAULT_RUNNER_BINARY,
        help="specifies the full path pointing to the Ansible binaries "
        "(default={})".format(DEFAULT_RUNNER_BINARY))

    runner_group.add_argument(
        "-i",
        "--ident",
        default=DEFAULT_UUID,
        help="an identifier that will be used when generating the artifacts "
        "directory and can be used to uniquely identify a playbook run "
        "(default={})".format(DEFAULT_UUID))

    runner_group.add_argument(
        "--rotate-artifacts",
        default=0,
        type=int,
        help="automatically clean up old artifact directories after a given "
        "number have been created (default=0, disabled)")

    runner_group.add_argument(
        "--artifact-dir",
        help="optional path for the artifact root directory "
        "(default=<private_data_dir>/artifacts)")

    runner_group.add_argument(
        "--project-dir",
        help="optional path for the location of the playbook content directory "
        "(default=<private_data_dir/project)")

    runner_group.add_argument(
        "--inventory",
        help="optional path for the location of the inventory content directory "
        "(default=<private_data_dir>/inventory)")

    runner_group.add_argument(
        "-j",
        "--json",
        action="store_true",
        help="output the JSON event structure to stdout instead of "
        "Ansible output (default=False)")

    runner_group.add_argument(
        "--omit-event-data",
        action="store_true",
        help="Omits including extra event data in the callback payloads "
        "or the Runner payload data files "
        "(status and stdout still included)")

    runner_group.add_argument(
        "--only-failed-event-data",
        action="store_true",
        help="Only adds extra event data for failed tasks in the callback "
        "payloads or the Runner payload data files "
        "(status and stdout still included for other events)")

    runner_group.add_argument(
        "-q",
        "--quiet",
        action="store_true",
        help="disable all messages sent to stdout/stderr (default=False)")

    runner_group.add_argument(
        "-v",
        action="count",
        help="increase the verbosity with multiple v's (up to 5) of the "
        "ansible-playbook output (default=None)")

    # ansible options

    ansible_group = parser.add_argument_group(
        "Ansible Options",
        "control the ansible[-playbook] execution environment")

    ansible_group.add_argument(
        "--limit",
        help="matches Ansible's ```--limit``` parameter to further constrain "
        "the inventory to be used (default=None)")

    ansible_group.add_argument(
        "--cmdline",
        help="command line options to pass to ansible-playbook at "
        "execution time (default=None)")

    ansible_group.add_argument(
        "--hosts",
        help="define the set of hosts to execute against (default=None) "
        "Note: this parameter only works with -m or -r")

    ansible_group.add_argument(
        "--forks",
        help="matches Ansible's ```--forks``` parameter to set the number "
        "of conconurent processes (default=None)")

    # roles group

    roles_group = parser.add_argument_group(
        "Ansible Role Options",
        "configuration options for directly executing Ansible roles")

    roles_group.add_argument(
        "--roles-path",
        default=DEFAULT_ROLES_PATH,
        help="path used to locate the role to be executed (default=None)")

    roles_group.add_argument(
        "--role-vars",
        help="set of variables to be passed to the role at run time in the "
        "form of 'key1=value1 key2=value2 keyN=valueN'(default=None)")

    roles_group.add_argument(
        "--role-skip-facts",
        action="store_true",
        default=False,
        help="disable fact collection when the role is executed (default=False)"
    )

    # modules groups

    modules_group = parser.add_argument_group(
        "Ansible Module Options",
        "configuration options for directly executing Ansible modules")

    modules_group.add_argument(
        "-a",
        "--args",
        dest='module_args',
        help="set of arguments to be passed to the module at run time in the "
        "form of 'key1=value1 key2=value2 keyN=valueN'(default=None)")

    # playbook options
    playbook_group = parser.add_argument_group(
        "Ansible Playbook Options",
        "configuation options for executing Ansible playbooks")

    playbook_group.add_argument(
        "--process-isolation",
        dest="process_isolation",
        action="store_true",
        help="limits what directories on the filesystem the playbook run "
        "has access to, defaults to /tmp (default=False)")

    playbook_group.add_argument(
        "--process-isolation-executable",
        dest="process_isolation_executable",
        default="bwrap",
        help="process isolation executable that will be used. (default=bwrap)")

    playbook_group.add_argument(
        "--process-isolation-path",
        dest="process_isolation_path",
        default="/tmp",
        help="path that an isolated playbook run will use for staging. "
        "(default=/tmp)")

    playbook_group.add_argument(
        "--process-isolation-hide-paths",
        dest="process_isolation_hide_paths",
        nargs='*',
        help="list of paths on the system that should be hidden from the "
        "playbook run (default=None)")

    playbook_group.add_argument(
        "--process-isolation-show-paths",
        dest="process_isolation_show_paths",
        nargs='*',
        help="list of paths on the system that should be exposed to the "
        "playbook run (default=None)")

    playbook_group.add_argument(
        "--process-isolation-ro-paths",
        dest="process_isolation_ro_paths",
        nargs='*',
        help="list of paths on the system that should be exposed to the "
        "playbook run as read-only (default=None)")

    playbook_group.add_argument(
        "--directory-isolation-base-path",
        dest="directory_isolation_base_path",
        help="copies the project directory to a location in this directory "
        "to prevent multiple simultaneous executions from conflicting "
        "(default=None)")

    playbook_group.add_argument(
        "--resource-profiling",
        dest='resource_profiling',
        action="store_true",
        help="Records resource utilization during playbook execution")

    playbook_group.add_argument(
        "--resource-profiling-base-cgroup",
        dest='resource_profiling_base_cgroup',
        default="ansible-runner",
        help=
        "Top-level cgroup used to collect information on resource utilization. Defaults to ansible-runner"
    )

    playbook_group.add_argument(
        "--resource-profiling-cpu-poll-interval",
        dest='resource_profiling_cpu_poll_interval',
        default=0.25,
        help=
        "Interval (in seconds) between CPU polling for determining CPU usage. Defaults to 0.25"
    )

    playbook_group.add_argument(
        "--resource-profiling-memory-poll-interval",
        dest='resource_profiling_memory_poll_interval',
        default=0.25,
        help=
        "Interval (in seconds) between memory polling for determining memory usage. Defaults to 0.25"
    )

    playbook_group.add_argument(
        "--resource-profiling-pid-poll-interval",
        dest='resource_profiling_pid_poll_interval',
        default=0.25,
        help=
        "Interval (in seconds) between polling PID count for determining number of processes used. Defaults to 0.25"
    )

    playbook_group.add_argument(
        "--resource-profiling-results-dir",
        dest='resource_profiling_results_dir',
        help=
        "Directory where profiling data files should be saved. Defaults to None (profiling_data folder under private data dir is used in this case)."
    )

    if len(sys.argv) == 1:
        parser.print_usage()
        print_common_usage()
        parser.exit(status=0)

    args = parser.parse_args(sys_args)

    if args.command in ('start', 'run'):
        if args.hosts and not (args.module or args.role):
            parser.exit(
                status=1,
                message="The --hosts option can only be used with -m or -r\n")
        if not (args.module or args.role) and not args.playbook:
            parser.exit(
                status=1,
                message=
                "The -p option must be specified when not using -m or -r\n")

    output.configure()

    # enable or disable debug mode
    output.set_debug('enable' if args.debug else 'disable')

    # set the output logfile
    if args.logfile:
        output.set_logfile(args.logfile)

    output.debug('starting debug logging')

    # get the absolute path for start since it is a daemon
    args.private_data_dir = os.path.abspath(args.private_data_dir)

    pidfile = os.path.join(args.private_data_dir, 'pid')

    try:
        os.makedirs(args.private_data_dir, mode=0o700)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(args.private_data_dir):
            pass
        else:
            raise

    stderr_path = None
    context = None
    if args.command != 'run':
        stderr_path = os.path.join(args.private_data_dir, 'daemon.log')
        if not os.path.exists(stderr_path):
            os.close(
                os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))

    if args.command in ('start', 'run'):

        if args.command == 'start':
            import daemon
            from daemon.pidfile import TimeoutPIDLockFile
            context = daemon.DaemonContext(pidfile=TimeoutPIDLockFile(pidfile))
        else:
            context = threading.Lock()

        with context:
            with role_manager(args) as args:
                run_options = dict(
                    private_data_dir=args.private_data_dir,
                    ident=args.ident,
                    binary=args.binary,
                    playbook=args.playbook,
                    module=args.module,
                    module_args=args.module_args,
                    host_pattern=args.hosts,
                    verbosity=args.v,
                    quiet=args.quiet,
                    rotate_artifacts=args.rotate_artifacts,
                    ignore_logging=False,
                    json_mode=args.json,
                    omit_event_data=args.omit_event_data,
                    only_failed_event_data=args.only_failed_event_data,
                    inventory=args.inventory,
                    forks=args.forks,
                    project_dir=args.project_dir,
                    artifact_dir=args.artifact_dir,
                    roles_path=[args.roles_path] if args.roles_path else None,
                    process_isolation=args.process_isolation,
                    process_isolation_executable=args.
                    process_isolation_executable,
                    process_isolation_path=args.process_isolation_path,
                    process_isolation_hide_paths=args.
                    process_isolation_hide_paths,
                    process_isolation_show_paths=args.
                    process_isolation_show_paths,
                    process_isolation_ro_paths=args.process_isolation_ro_paths,
                    directory_isolation_base_path=args.
                    directory_isolation_base_path,
                    resource_profiling=args.resource_profiling,
                    resource_profiling_base_cgroup=args.
                    resource_profiling_base_cgroup,
                    resource_profiling_cpu_poll_interval=args.
                    resource_profiling_cpu_poll_interval,
                    resource_profiling_memory_poll_interval=args.
                    resource_profiling_memory_poll_interval,
                    resource_profiling_pid_poll_interval=args.
                    resource_profiling_pid_poll_interval,
                    resource_profiling_results_dir=args.
                    resource_profiling_results_dir,
                    limit=args.limit)
                if args.cmdline:
                    run_options['cmdline'] = args.cmdline

                try:
                    res = run(**run_options)
                except Exception:
                    exc = traceback.format_exc()
                    if stderr_path:
                        open(stderr_path, 'w+').write(exc)
                    else:
                        sys.stderr.write(exc)
                    return 1
            return (res.rc)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        return (1)

    if args.command == 'stop':
        Runner.handle_termination(pid, pidfile=pidfile)
        return (0)

    elif args.command == 'is-alive':
        try:
            os.kill(pid, signal.SIG_DFL)
            return (0)
        except OSError:
            return (1)
Esempio n. 12
0
def main():
    parser = argparse.ArgumentParser(description='manage ansible execution')

    parser.add_argument('--version', action='version', version=VERSION)

    parser.add_argument('command', choices=['run', 'start', 'stop', 'is-alive'])

    parser.add_argument('private_data_dir',
                        help='Base directory containing Runner metadata (project, inventory, etc')

    group = parser.add_mutually_exclusive_group()

    group.add_argument("-p", "--playbook", default=DEFAULT_RUNNER_PLAYBOOK,
                       help="The name of the playbook to execute")

    group.add_argument("-r", "--role", default=DEFAULT_RUNNER_ROLE,
                       help="Invoke an Ansible role directly without a playbook")

    parser.add_argument("--hosts",
                        help="Define the set of hosts to execute against")

    parser.add_argument("-i", "--ident",
                        default=uuid4(),
                        help="An identifier that will be used when generating the"
                             "artifacts directory and can be used to uniquely identify a playbook run")

    parser.add_argument("--roles-path", default=DEFAULT_ROLES_PATH,
                        help="Path to the Ansible roles directory")

    parser.add_argument("--role-vars",
                        help="Variables to pass to the role at runtime")

    parser.add_argument("--role-skip-facts", action="store_true", default=False,
                        help="Disable fact collection when executing a role directly")
    parser.add_argument("--artifact-dir",
                        help="Optional Path for the artifact root directory, by default it is located inside the private data dir")

    parser.add_argument("--inventory",
                        help="Override the default inventory location in private_data_dir")

    parser.add_argument("-j", "--json", action="store_true",
                        help="Output the json event structure to stdout instead of Ansible output")

    parser.add_argument("-v", action="count",
                        help="Increase the verbosity with multiple v's (up to 5) of the ansible-playbook output")

    parser.add_argument("-q", "--quiet", action="store_true",
                        help="Disable all output")

    parser.add_argument("--cmdline",
                        help="Command line options to pass to ansible-playbook at execution time")
    parser.add_argument("--debug", action="store_true",
                        help="Enable debug output logging")

    parser.add_argument("--logfile",
                        help="Log output messages to a file")

    args = parser.parse_args()

    output.configure()

    # enable or disable debug mode
    output.set_debug('enable' if args.debug else 'disable')

    # set the output logfile
    if args.logfile:
        output.set_logfile(args.logfile)

    output.debug('starting debug logging')

    pidfile = os.path.join(args.private_data_dir, 'pid')

    try:
        os.makedirs(args.private_data_dir)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(args.private_data_dir):
            pass
        else:
            raise

    if args.command != 'run':
        stderr_path = os.path.join(args.private_data_dir, 'daemon.log')
        if not os.path.exists(stderr_path):
            os.close(os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
        stderr = open(stderr_path, 'w+')

    if args.command in ('start', 'run'):
        if args.role:
            role = {'name': args.role}
            if args.role_vars:
                role_vars = {}
                for item in shlex.split(args.role_vars):
                    key, value = item.split('=')
                    role_vars[key] = value
                role['vars'] = role_vars

            kwargs = dict(private_data_dir=args.private_data_dir,
                          json_mode=args.json)
            if args.artifact_dir:
                kwargs['artifact_dir'] = args.artifact_dir

            project_path = os.path.abspath(os.path.join(args.private_data_dir, 'project'))
            project_exists = os.path.exists(project_path)

            env_path = os.path.join(args.private_data_dir, 'env')
            env_exists = os.path.exists(env_path)

            envvars_path = os.path.join(args.private_data_dir, 'env/envvars')
            envvars_exists = os.path.exists(envvars_path)

            if args.cmdline:
                kwargs['cmdline'] = args.cmdline

            playbook = None
            tmpvars = None

            rc = 255
            errmsg = None

            try:
                play = [{'hosts': args.hosts if args.hosts is not None else "all",
                         'gather_facts': not args.role_skip_facts,
                         'roles': [role]}]

                filename = str(uuid4().hex)

                playbook = dump_artifact(json.dumps(play), project_path, filename)
                kwargs['playbook'] = playbook
                output.debug('using playbook file %s' % playbook)

                if args.inventory:
                    inventory_file = os.path.abspath(os.path.join(args.private_data_dir, 'inventory', args.inventory))
                    if not os.path.exists(inventory_file):
                        raise AnsibleRunnerException('location specified by --inventory does not exist')
                    kwargs['inventory'] = inventory_file
                    output.debug('using inventory file %s' % inventory_file)

                roles_path = args.roles_path or os.path.join(args.private_data_dir, 'roles')
                roles_path = os.path.abspath(roles_path)
                output.debug('setting ANSIBLE_ROLES_PATH to %s' % roles_path)

                envvars = {}
                if envvars_exists:
                    with open(envvars_path, 'rb') as f:
                        tmpvars = f.read()
                        envvars = safe_load(tmpvars)

                envvars['ANSIBLE_ROLES_PATH'] = roles_path
                kwargs['envvars'] = envvars

                res = run(**kwargs)
                rc = res.rc

            except AnsibleRunnerException as exc:
                errmsg = str(exc)

            finally:
                if not project_exists and os.path.exists(project_path):
                    logger.debug('removing dynamically generated project folder')
                    shutil.rmtree(project_path)
                elif playbook and os.path.isfile(playbook):
                    logger.debug('removing dynamically generated playbook')
                    os.remove(playbook)

                # if a previous envvars existed in the private_data_dir,
                # restore the original file contents
                if tmpvars:
                    with open(envvars_path, 'wb') as f:
                        f.write(tmpvars)
                elif not envvars_exists and os.path.exists(envvars_path):
                    logger.debug('removing dynamically generated envvars folder')
                    os.remove(envvars_path)

                # since ansible-runner created the env folder, remove it
                if not env_exists and os.path.exists(env_path):
                    logger.debug('removing dynamically generated env folder')
                    shutil.rmtree(env_path)

            if errmsg:
                print('ansible-runner: ERROR: %s' % errmsg)

            sys.exit(rc)

        elif args.command == 'start':
            import daemon
            from daemon.pidfile import TimeoutPIDLockFile
            context = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pidfile),
                stderr=stderr
            )
        else:
            context = threading.Lock()

        with context:
            run_options = dict(private_data_dir=args.private_data_dir,
                               ident=args.ident,
                               playbook=args.playbook,
                               verbosity=args.v,
                               quiet=args.quiet,
                               json_mode=args.json)

            if args.hosts is not None:
                run_options.update(inventory=args.hosts)

            if args.cmdline:
                run_options['cmdline'] = args.cmdline

            run(**run_options)
            sys.exit(0)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        sys.exit(1)

    if args.command == 'stop':
        try:
            with open(os.path.join(args.private_data_dir, 'args'), 'r') as args:
                Runner.handle_termination(pid, json.load(args), 'bwrap')
        except IOError:
            Runner.handle_termination(pid, [], 'bwrap')

    elif args.command == 'is-alive':
        try:
            os.kill(pid, signal.SIG_DFL)
            sys.exit(0)
        except OSError:
            sys.exit(1)
Esempio n. 13
0
def run_proxy():
    parser = argparse.ArgumentParser()
    parser.add_argument('-m', '--manager', help='hostname:port of the manager')
    parser.add_argument('--banner',
                        help='banner file to show on login screen',
                        default='_login_screen_message.html')
    parser.add_argument('--bind',
                        help="interface to bind to (default: 127.0.0.1)",
                        default='127.0.0.1')
    parser.add_argument('--sslcert',
                        help='proxy HTTP server SSL certificate',
                        default=None)
    parser.add_argument('--sslkey',
                        help='proxy HTTP server SSL key',
                        default=None)
    parser.add_argument('--fingerprint',
                        help='pin certificate to fingerprint',
                        default=None)
    parser.add_argument(
        '--disable-verifyhostname',
        help=
        'disable hostname verification; if fingerprint is specified it gets precedence',
        dest="verify_hostname",
        action='store_false',
        default=None)
    parser.add_argument('--debug',
                        help='set log level to debug and auto reload files',
                        default=False,
                        action='store_true')
    parser.add_argument('--log-config',
                        help='provide a python logging config file',
                        default=None)
    parser.add_argument(
        '--daemon',
        help='start in daemon mode and put process into background',
        default=False,
        action='store_true')
    parser.add_argument('--pidfile',
                        help='path for pid file. By default none is created',
                        default=None)

    args = parser.parse_args(args=filter_args())

    manager_hostname, manager_port = args.manager.split(':')
    certfile = args.sslcert if args.sslcert else None
    keyfile = args.sslkey if args.sslcert else None
    manager_cafile = certfile if args.fingerprint is None else False

    log_level = logging.DEBUG if args.debug else logging.INFO
    log_config = args.log_config

    client = PixelatedDispatcherClient(manager_hostname,
                                       manager_port,
                                       cacert=manager_cafile,
                                       fingerprint=args.fingerprint,
                                       assert_hostname=args.verify_hostname)
    client.validate_connection()

    dispatcher = DispatcherProxy(client,
                                 bindaddr=args.bind,
                                 keyfile=keyfile,
                                 certfile=certfile,
                                 banner=args.banner,
                                 debug=args.debug)

    if args.daemon:
        pidfile = TimeoutPIDLockFile(
            args.pidfile,
            acquire_timeout=PID_ACQUIRE_TIMEOUT_IN_S) if args.pidfile else None
        can_use_pidfile(pidfile)
        with daemon.DaemonContext(pidfile=pidfile):
            # init logging only after we have spawned the sub process. Otherwise there might be some hickups
            init_logging('proxy', level=log_level, config_file=log_config)
            dispatcher.serve_forever()
    else:
        init_logging('proxy', level=log_level, config_file=log_config)
        dispatcher.serve_forever()
def webserver(args):
    """Starts Airflow Webserver"""
    print(settings.HEADER)

    # Check for old/insecure config, and fail safe (i.e. don't launch) if the config is wildly insecure.
    if conf.get('webserver', 'secret_key') == 'temporary_key':
        from rich import print as rich_print

        rich_print(
            "[red][bold]ERROR:[/bold] The `secret_key` setting under the webserver config has an insecure "
            "value - Airflow has failed safe and refuses to start. Please change this value to a new, "
            "per-environment, randomly generated string, for example using this command `[cyan]openssl rand "
            "-hex 30[/cyan]`",
            file=sys.stderr,
        )
        sys.exit(1)

    access_logfile = args.access_logfile or conf.get('webserver',
                                                     'access_logfile')
    error_logfile = args.error_logfile or conf.get('webserver',
                                                   'error_logfile')
    access_logformat = args.access_logformat or conf.get(
        'webserver', 'access_logformat')
    num_workers = args.workers or conf.get('webserver', 'workers')
    worker_timeout = args.worker_timeout or conf.get(
        'webserver', 'web_server_worker_timeout')
    ssl_cert = args.ssl_cert or conf.get('webserver', 'web_server_ssl_cert')
    ssl_key = args.ssl_key or conf.get('webserver', 'web_server_ssl_key')
    if not ssl_cert and ssl_key:
        raise AirflowException(
            'An SSL certificate must also be provided for use with ' + ssl_key)
    if ssl_cert and not ssl_key:
        raise AirflowException(
            'An SSL key must also be provided for use with ' + ssl_cert)

    if args.debug:
        print(
            f"Starting the web server on port {args.port} and host {args.hostname}."
        )
        app = create_app(testing=conf.getboolean('core', 'unit_test_mode'))
        app.run(
            debug=True,
            use_reloader=not app.config['TESTING'],
            port=args.port,
            host=args.hostname,
            ssl_context=(ssl_cert, ssl_key) if ssl_cert and ssl_key else None,
        )
    else:

        pid_file, stdout, stderr, log_file = setup_locations(
            "webserver", args.pid, args.stdout, args.stderr, args.log_file)

        # Check if webserver is already running if not, remove old pidfile
        check_if_pidfile_process_is_running(pid_file=pid_file,
                                            process_name="webserver")

        print(
            textwrap.dedent(f'''\
                Running the Gunicorn Server with:
                Workers: {num_workers} {args.workerclass}
                Host: {args.hostname}:{args.port}
                Timeout: {worker_timeout}
                Logfiles: {access_logfile} {error_logfile}
                Access Logformat: {access_logformat}
                ================================================================='''
                            ))

        run_args = [
            sys.executable,
            '-m',
            'gunicorn',
            '--workers',
            str(num_workers),
            '--worker-class',
            str(args.workerclass),
            '--timeout',
            str(worker_timeout),
            '--bind',
            args.hostname + ':' + str(args.port),
            '--name',
            'airflow-webserver',
            '--pid',
            pid_file,
            '--config',
            'python:airflow.www.gunicorn_config',
        ]

        if args.access_logfile:
            run_args += ['--access-logfile', str(args.access_logfile)]

        if args.error_logfile:
            run_args += ['--error-logfile', str(args.error_logfile)]

        if args.access_logformat and args.access_logformat.strip():
            run_args += ['--access-logformat', str(args.access_logformat)]

        if args.daemon:
            run_args += ['--daemon']

        if ssl_cert:
            run_args += ['--certfile', ssl_cert, '--keyfile', ssl_key]

        run_args += ["airflow.www.app:cached_app()"]

        gunicorn_master_proc = None

        def kill_proc(signum, _):
            log.info("Received signal: %s. Closing gunicorn.", signum)
            gunicorn_master_proc.terminate()
            with suppress(TimeoutError):
                gunicorn_master_proc.wait(timeout=30)
            if gunicorn_master_proc.poll() is not None:
                gunicorn_master_proc.kill()
            sys.exit(0)

        def monitor_gunicorn(gunicorn_master_pid: int):
            # Register signal handlers
            signal.signal(signal.SIGINT, kill_proc)
            signal.signal(signal.SIGTERM, kill_proc)

            # These run forever until SIG{INT, TERM, KILL, ...} signal is sent
            GunicornMonitor(
                gunicorn_master_pid=gunicorn_master_pid,
                num_workers_expected=num_workers,
                master_timeout=conf.getint('webserver',
                                           'web_server_master_timeout'),
                worker_refresh_interval=conf.getint('webserver',
                                                    'worker_refresh_interval',
                                                    fallback=30),
                worker_refresh_batch_size=conf.getint(
                    'webserver', 'worker_refresh_batch_size', fallback=1),
                reload_on_plugin_change=conf.getboolean(
                    'webserver', 'reload_on_plugin_change', fallback=False),
            ).start()

        if args.daemon:
            # This makes possible errors get reported before daemonization
            os.environ['SKIP_DAGS_PARSING'] = 'True'
            app = create_app(None)
            os.environ.pop('SKIP_DAGS_PARSING')

            handle = setup_logging(log_file)

            base, ext = os.path.splitext(pid_file)
            with open(stdout, 'w+') as stdout, open(stderr, 'w+') as stderr:
                ctx = daemon.DaemonContext(
                    pidfile=TimeoutPIDLockFile(f"{base}-monitor{ext}", -1),
                    files_preserve=[handle],
                    stdout=stdout,
                    stderr=stderr,
                )
                with ctx:
                    subprocess.Popen(run_args, close_fds=True)

                    # Reading pid of gunicorn master as it will be different that
                    # the one of process spawned above.
                    while True:
                        sleep(0.1)
                        gunicorn_master_proc_pid = read_pid_from_pidfile(
                            pid_file)
                        if gunicorn_master_proc_pid:
                            break

                    # Run Gunicorn monitor
                    gunicorn_master_proc = psutil.Process(
                        gunicorn_master_proc_pid)
                    monitor_gunicorn(gunicorn_master_proc.pid)

        else:
            with subprocess.Popen(run_args,
                                  close_fds=True) as gunicorn_master_proc:
                monitor_gunicorn(gunicorn_master_proc.pid)
Esempio n. 15
0
class WebApp(object):
    '''TODO'''

    def __init__(self,
                 reqhandler,
                 description='',
                 support_ssl=True,
                 support_cors=True,
                 support_daemon=False,
                 auth_type='cookie',
                 db_bases={},
                 log_fmt=None):
        '''TODO

        reqhandler will be replaced
        '''

        if support_daemon:
            try:
                DaemonContext
            except NameError:
                exit('You need the python-daemon package '
                     'to use daemon mode.')
        for name, d in db_bases.items():
            if not is_str(name):
                exit('db_bases keys should be strings')
            if name.replace(' ', '') != name:
                exit('db_bases keys cannot contain spaces.')
            if 'base' not in d:
                exit("db_bases items should contain a 'base' key.")
            if not is_base(d['base']):
                exit('db_bases base should be a declarative base.')
        self._is_configured = False
        self._delete_tmp_userfile = False
        # access.log is for http.server (which writes to stderr)
        self.access_log = None
        self.httpd = self.url = self.pidlockfile = None
        self.reqhandler = reqhandler
        self.auth_type = auth_type
        self.db_bases = db_bases
        self.log_fmt = log_fmt
        if self.log_fmt is None:
            self.log_fmt = \
                '[%(asctime)s] %(name)s (%(threadName)s): %(message)s'
        self.conf = Conf(skip=['action',
                               'config',
                               'save_config',
                               'debug_log',
                               'add_users'])

        self.parser = argparse.ArgumentParser(
            formatter_class=argparse.ArgumentDefaultsHelpFormatter,
            description=description)
        if support_daemon:
            self.parser.add_argument(
                'action', nargs='?',
                choices=['start', 'stop', 'restart', 'status'],
                default='start',
                help='Action to take on the running daemon.')
        listen_parser = self.parser.add_argument_group(
            'Listen options')
        listen_parser.add_argument(
            '-a', '--address', dest='address',
            default='127.0.0.1', metavar='IP',
            help='Address of interface to bind to.')
        listen_parser.add_argument(
            '-p', '--port', dest='port', metavar='PORT', type=int,
            help=('HTTP port to listen on. Default is 80 if not '
                  'over SSL or 443 if over SSL.'))

        if support_ssl:
            ssl_parser = self.parser.add_argument_group('SSL options')
            ssl_parser.add_argument(
                '-s', '--ssl', dest='ssl', default=False,
                action='store_true', help='Use SSL.')
            ssl_parser.add_argument(
                '--no-ssl', dest='ssl', action='store_false',
                help=("Don't use SSL. This is the default, but can "
                      "be used to override configuration file "
                      "setting."))
            ssl_parser.add_argument(
                '--cert', dest='certfile', metavar='FILE',
                help='PEM file containing the server certificate.')
            ssl_parser.add_argument(
                '--key', dest='keyfile', metavar='FILE',
                help=('PEM file containing the private key for the '
                      'server certificate.'))

        if auth_type:
            auth_parser = self.parser.add_argument_group(
                'Authentication options')
            if auth_type == 'jwt':
                auth_parser.add_argument(
                    '--jwt-key', dest='jwt_key', metavar='PASSWORD',
                    help=('A password to use for symmetric signing '
                          'of JWT or a passphrase used to decrypt '
                          'the private key (for asymmetric '
                          'algorithms). If none is given and the '
                          'algorithm is symmetric, then a random '
                          'one is generated (meaning all JWT keys '
                          'become invalid upon restart). If none is '
                          'given and the algorithm is asymmetric, '
                          'the key must be decrypted. If "-" is '
                          'supplied, then it is read from stdin.'))
                auth_parser.add_argument(
                    '--jwt-priv-key', dest='jwt_priv_key',
                    help=('A private PEM key for use with asymmetric '
                          'JWT encodings.'))
                auth_parser.add_argument(
                    '--jwt-algo', dest='jwt_algo',
                    help=('The algorithm used to encode JWTs. '
                          'Default is HS256 is no private key is '
                          'given, otherwise RS256.'))
            auth_parser.add_argument(
                '--userfile', dest='userfile', metavar='FILE',
                help=('File containing one username:password[:roles] '
                      'per line. roles is an optional comma-'
                      'separated list of roles.'))
            auth_parser.add_argument(
                '--add-users', dest='add_users', action='store_true',
                default=False,
                help=('Prompt to create users. Entries will be '
                      'appended to --userfile if given. '
                      '--userfile-plain determines if the passwords '
                      'are hashed before written to the --userfile.'))
            auth_parser.add_argument(
                '--userfile-plain', dest='userfile_hashed',
                default=True, action='store_false',
                help='The passwords in userfile are in cleartext.')
            auth_parser.add_argument(
                '--userfile-hashed', dest='userfile_hashed',
                action='store_true',
                help=('The passwords in userfile are hashed. This is '
                      'the default, but can be used to override '
                      'configuration file setting.'))
            auth_parser.add_argument(
                '--hash-type', dest='userfile_hash_type', nargs='?',
                const=None, default=None,
                choices=reqhandler._supported_hashes,
                help=('The hashing algorithm to use. Specifying this '
                      'option without an argument overrides the one '
                      'in the configuration file and resets the '
                      'hashing to none (plaintext).'))

        if support_cors:
            cors_parser = self.parser.add_argument_group(
                'CORS options')
            cors_parser.add_argument(
                '-X', '--enable-cors', dest='cors',
                default=False, action='store_true',
                help='Enable CORS support.')
            cors_parser.add_argument(
                '--disable-cors', dest='cors', action='store_false',
                help=('Disable CORS support. This is the default, '
                      'but can be used to override configuration '
                      'file setting.'))
            cors_origin_parser = \
                cors_parser.add_mutually_exclusive_group()
            cors_origin_parser.add_argument(
                '--allowed-origins', dest='cors_origins',
                default=['*'], metavar='Origin', nargs='*',
                help='Allowed origins for CORS requests.')
            cors_origin_parser.add_argument(
                '--allow-any-origin', dest='cors_origins',
                action='store_const', const=['{ECHO}'],
                help=('Allow any origin, i.e. echo back '
                      'the requesting origin.'))
            cors_parser.add_argument(
                '--allowed-headers', dest='cors_headers',
                default=['Accept', 'Accept-Language',
                         'Content-Language', 'Content-Type',
                         'Authorization'],
                metavar='Header: Value', nargs='*',
                help='Headers allowed for CORS requests.')
            cors_parser.add_argument(
                '--allowed-methods', dest='cors_methods',
                default=['POST', 'GET', 'OPTIONS', 'HEAD'],
                metavar='Method', nargs='*',
                help=('Methods allowed for CORS requests. OPTIONS '
                      'to one of the special endpoints always '
                      'return the allowed methods of that endpoint.'))
            cors_parser.add_argument(
                '--allow-credentials', dest='cors_creds',
                default=False, action='store_true',
                help='Allow sending credentials with CORS requests')
            cors_parser.add_argument(
                '--disallow-credentials', dest='cors_creds',
                action='store_false',
                help=('Do not allow sending credentials with CORS '
                      'requests. This is the default, but can be '
                      'used to override configuration file setting.'))

        if db_bases:
            db_parser = self.parser.add_argument_group(
                'Database options')
            for name in self.db_bases.keys():
                db_parser.add_argument(
                    '--{}-dburl'.format(name),
                    dest='{}_dburl'.format(name),
                    metavar=('dialect://[username:password@]'
                             'host/database'),
                    help='URL of the {} database.'.format(name))

        misc_http_parser = self.parser.add_argument_group(
            'Other HTTP options')
        misc_http_parser.add_argument(
            '-H', '--headers', dest='headers',
            default=[], metavar='Header: Value', nargs='*',
            help='Additional headers to include in the response.')

        misc_server_parser = self.parser.add_argument_group(
            'Logging and process options')
        if support_daemon:
            misc_server_parser.add_argument(
                '-P', '--pidfile', dest='pidfile', metavar='FILE',
                default='/var/run/pyhttpd.pid',
                help='Path to pidfile when running in daemon mode.')
            misc_server_parser.add_argument(
                '-d', '--daemon', dest='daemonize',
                default=False, action='store_true',
                help='Run as a daemon.')
            misc_server_parser.add_argument(
                '-f', '--foreground', dest='daemonize',
                action='store_false',
                help=('Run in foreground. This is the default, '
                      'but can be used to override configuration '
                      'file setting.'))
        misc_server_parser.add_argument(
            '-c', '--config', dest='config', metavar='FILE',
            help=('Configuration file. Command-line options take '
                  'precedence.'))
        misc_server_parser.add_argument(
            '--save-config', dest='save_config',
            default=False, action='store_true',
            help='Update or create the configuration file.')
        misc_server_parser.add_argument(
            '-r', '--webroot', dest='webroot', metavar='DIR',
            default='/var/www/html',
            help=('Directory to serve files from. '
                  'Current working directory will be changed to it.'))
        misc_server_parser.add_argument(
            '--multithread', dest='multithread',
            default=True, action='store_true',
            help=('Use multi-threading support. This is the default, '
                  'but can be used to override configuration '
                  'file setting.'))
        misc_server_parser.add_argument(
            '--no-multithread', dest='multithread',
            action='store_false',
            help='Disable multi-threading support.')
        if support_daemon:
            misc_server_parser.add_argument(
                '-l', '--logdir', dest='logdir', metavar='DIR',
                help=('Directory that will hold the log files. '
                      'Default when running in daemon mode is '
                      '/var/log/pyhttpd. Default in foreground mode '
                      'is to print print all output to the console.'))
        else:
            misc_server_parser.add_argument(
                '-l', '--logdir', dest='logdir', metavar='DIR',
                help=('Directory that will hold the log files. '
                      'Default is to print print all output to '
                      'the console.'))
        misc_server_parser.add_argument(
            '--log', dest='log', nargs='+',
            action='append', default=[], metavar='PACKAGE [FILENAME]',
            help=('Enable logging output for the given package. '
                  'FILENAME will be stored in --logdir if given '
                  '(otherwise ignored and output goes to the '
                  'console). Default is <PACKAGE>.log. Only INFO '
                  'level messages go in FILENAME, WARNING and ERROR '
                  'go to error.log (or stderr if no --logdir). '
                  'Note that printing of request lines to '
                  'access.log (or stderr) is always enabled. '
                  'This option can be given multiple times.'))
        misc_server_parser.add_argument(
            '--request-log', dest='request_log', nargs='?',
            const='request.log', metavar='[FILENAME]',
            help=('Enable logging of full requests. FILENAME '
                  'defaults to request.log if not given.'))
        misc_server_parser.add_argument(
            '--debug-log', dest='debug_log', nargs='+',
            action='append', default=[], metavar='PACKAGE [FILENAME]',
            help=('Enable debugging output for the given package. '
                  'FILENAME defaults to debug.log. Note that this '
                  'option is not saved in the configuration file. '
                  'Otherwise the behaviour is similar as --log.'))

    def configure(self):
        '''TODO'''

        if self._is_configured:
            raise RuntimeError("'configure' can be called only once.")

        args = self.parser.parse_args()
        #### Load/save/update config file
        if args.config is not None:
            try:
                self.update_config(args.config)
            except FileNotFoundError as e:  # XXX
                if not args.save_config:
                    exit(e)
        # update without overriding values loaded from the conf file
        # with non-explicitly set values (defaults)
        self.parser.parse_args(namespace=self.conf)
        # do not raise AttributeError but return None for command-line
        # options which are not supported
        self.conf._error_on_missing = False
        self.action = self.conf.action
        if self.action is None:
            self.action = 'start'
        if self.conf.save_config:
            if self.conf.config is None:
                exit('--save-config requires --config.')
            self.save_config(self.conf.config)

        #### Preliminary checks and directory creation
        if self.conf.port is None:
            self.conf.port = 443 if self.conf.ssl else 80
        if self.conf.ssl:
            if self.conf.certfile is None \
                    or self.conf.keyfile is None:
                exit('--certfile and --keyfile must be given')
        if self.conf.daemonize or self.action != 'start':
            make_dirs(self.conf.pidfile, is_file=True)
            self.pidlockfile = TimeoutPIDLockFile(
                os.path.abspath(self.conf.pidfile), 3)
            if self.action in ['stop', 'status']:
                self.conf.daemonize = False
        if self.action in ['start', 'restart']:
            self._prepare_for_start()

        self.url = '{proto}://{addr}:{port}'.format(
            proto='https' if self.conf.ssl else 'http',
            addr=self.conf.address,
            port=self.conf.port)
        self._is_configured = True

    def update_config(self, conffile):
        '''TODO'''

        with open(conffile, 'r') as f:
            content_raw = f.read()
        # return an empty value for missing env variables
        env = AwesomeDict(os.environ).set_defaults({'.*': ''})
        content = Template(content_raw).substitute(env)
        try:
            settings = json.loads(content)
        except _JSONDecodeError as e:
            exit('Invalid configuration file: {}'.format(e))
        self.conf._update(settings)

    def save_config(self, conffile):
        '''TODO'''

        f = open(conffile, 'w')
        json.dump(self.conf._to_dict(), f, indent=2)
        f.close()

    def run(self):
        '''TODO'''

        if not self._is_configured:
            self.configure()
        if self.action == 'restart':
            self._stop()
            self.action = 'start'
        getattr(self, '_{}'.format(self.action))()

    def _prepare_for_start(self):
        def send_custom_headers(reqself):
            super(self.reqhandler, reqself).send_custom_headers()
            return self._send_cors_headers(reqself)

        #### Preliminary checks and directory creation
        if self.conf.logdir is None and self.conf.daemonize:
            self.conf.logdir = '/var/log/pyhttpd'
        if self.conf.logdir is not None:
            self.conf.logdir = os.path.abspath(
                self.conf.logdir).rstrip('/')
            make_dirs(self.conf.logdir, mode=0o700)
        # check webroot
        self.conf.webroot = self.conf.webroot.rstrip('/')
        #  self.conf.webroot = self.conf.webroot.strip('/')
        #  if not os.path.abspath(self.conf.webroot).startswith(
        #          os.getcwd()):
        #      exit('The given webroot is outside the current root')
        ensure_exists(self.conf.webroot, is_file=False)
        # check userfile
        if self.conf.userfile is not None \
                and not self.conf.add_users:
            ensure_exists(self.conf.userfile, is_file=True)
        for name in self.db_bases.keys():
            url = getattr(self.conf, '{}_dburl'.format(name))
            if not url:
                exit(('You must specify the {}_dburl '
                      'configuration option or the --{}-dburl '
                      'command-line option.').format(name, name))
            conn = parse_db_url(url)
            if not conn:
                exit('Invalid database URL: {}'.format(url))
            if conn['dialect'] == 'sqlite' and \
                    conn['database'] not in [':memory:', None]:
                make_dirs(conn['database'], is_file=True)

        #### Create the new request handler class
        attrs = {}
        if self.conf.cors:
            attrs.update({
                'send_custom_headers': send_custom_headers})
        if self.auth_type is not None:
            attrs.update({
                '_is_SSL': self.conf.ssl,
                '_pwd_type': self.conf.userfile_hash_type})
        self.reqhandler = type(
            '{}Custom'.format(self.reqhandler.__name__),
            (self.reqhandler,), attrs)

        #### Read users from stdin
        if self.conf.add_users:
            # if userfile is not given, use a temporary file
            if self.conf.userfile is None:
                self._delete_tmp_userfile = True
                self.conf.userfile = \
                    tempfile.NamedTemporaryFile(
                        mode='a', delete=False)
            else:
                self.conf.userfile = open(self.conf.userfile, 'a')
            sys.stdout.write('Creating users\n')
            transformer = None
            if self.conf.userfile_hash_type is not None \
                    and self.conf.userfile_hashed:
                # we need to hash the passwords here, since userfile
                # may already contain existing hashes
                transformer = getattr(
                    self.reqhandler,
                    '_transform_password_{}'.format(
                        self.conf.userfile_hash_type))
            while True:
                username = read_line('Enter username (blank to quit)')
                if not username:
                    break
                raw_password = read_line(
                    'Enter password (blank for random)')
                roles = read_line('Enter comma-separated roles')
                if not raw_password:
                    raw_password = randstr(16, skip=':')
                if transformer is not None:
                    password = transformer(raw_password)
                else:
                    password = raw_password
                self.conf.userfile.write(
                    '{}:{}:{}\n'.format(username, password, roles))
                sys.stdout.write(
                    'Created user {} with password {}\n'.format(
                        username, raw_password))
            self.conf.userfile.close()
            # close the file, so load_users_from_file can start
            # reading it from the start
            self.conf.userfile = self.conf.userfile.name

        #### JWT keys
        if self.auth_type == 'jwt':
            if self.conf.jwt_algo is None:
                if self.conf.jwt_priv_key is None:
                    self.conf.jwt_algo = 'HS256'
                else:
                    self.conf.jwt_algo = 'RS256'
            if self.conf.jwt_key is None \
                    and self.conf.jwt_algo.startswith('HS'):
                self.conf.jwt_key = randstr(16)
            elif self.conf.jwt_key == '-':
                self.conf.jwt_key = read_line('Enter JWT passphrase')
            self.reqhandler.set_JWT_keys(
                passphrase=self.conf.jwt_key,
                algorithm=self.conf.jwt_algo,
                privkey=self.conf.jwt_priv_key)

    def _start(self):
        if self.conf.logdir is not None:
            self.access_log = open('{}/{}'.format(
                self.conf.logdir, 'access.log'), 'ab')

        sys.stderr.write('Starting server on {}\n'.format(self.url))
        #### Daemonize
        if self.conf.daemonize:
            assert self.access_log is not None
            if self.pidlockfile.is_locked():
                exit('PID file {} already locked'.format(
                    self.pidlockfile.path))
            daemon = DaemonContext(
                working_directory=os.getcwd(),
                umask=0o077,
                pidfile=self.pidlockfile,
                signal_map={SIGTERM: None},  # let me handle signals
                stderr=self.access_log)
            daemon.open()

        #### Setup logging
        log_dest = {
            'REQUEST': [],
            'DEBUG': self.conf.debug_log,
            'INFO': self.conf.log,
            'ERROR': self.conf.log}
        if self.conf.logdir is not None:
            log_dest['INFO'].append([__name__, 'event.log'])
        if self.conf.request_log is not None:
            log_dest['REQUEST'].append(
                [MY_PKG_NAME, self.conf.request_log])
        self.loggers = get_loggers(log_dest,
                                   logdir=self.conf.logdir,
                                   fmt=self.log_fmt)

        #### Connect to the databases
        # This has to be done after daemonization because the sockets
        # may be closed
        for name, d in self.db_bases.items():
            base = d['base']
            url = getattr(self.conf, '{}_dburl'.format(name))
            session_kargs = d.get('session_args', {})
            engine_kargs = d.get('engine_args', {})
            cache = d.get('cache', False)
            DBConnection(base,
                         url,
                         session_kargs=session_kargs,
                         engine_kargs=engine_kargs)
            if cache:  # ETag support
                self.reqhandler.enable_client_cache(name, base)

        #### Load users
        if self.conf.userfile is not None:
            self.reqhandler.load_users_from_file(
                self.conf.userfile,
                plaintext=not self.conf.userfile_hashed)
            if self._delete_tmp_userfile:
                os.remove(self.conf.userfile)

        #### Create the server
        # This has to be done after daemonization because it binds to
        # the listening port at creation time
        srv_cls = HTTPServer
        if self.conf.multithread:
            srv_cls = ThreadingHTTPServer
        self.httpd = srv_cls((self.conf.address, self.conf.port),
                             self.reqhandler)
        if self.conf.ssl:
            self.httpd.socket = ssl.wrap_socket(
                self.httpd.socket,
                keyfile=self.conf.keyfile,
                certfile=self.conf.certfile,
                server_side=True)

        #### Setup signal handlers
        signal(SIGTERM, self._term_sighandler)
        signal(SIGINT, self._term_sighandler)

        #### Redirect stderr to access.log
        if self.conf.logdir is not None and not self.conf.daemonize:
            # running in foreground, but logging to logdir, redirect
            # stderr to access.log as http.server writes to stderr
            os.dup2(self.access_log.fileno(), sys.stderr.fileno())

        #### Change working directory and run
        os.chdir(self.conf.webroot)
        self._log_event('Starting server on {}'.format(self.url))
        self.httpd.serve_forever()

    def _stop(self):
        pid = self._get_pid(break_stale=True)
        if pid is None:
            return
        if pid <= 0:
            # tried that... to see what exception will be raised...
            # none was raised
            sys.stderr.write('Invalid PID: {}\n'.format(pid))
        try:
            os.kill(pid, SIGTERM)
        except OSError as e:
            exit('Failed to terminate process {}: {}'.format(
                pid, e), e.errno)
        # wait
        max_wait = 5
        interval = 0.5
        curr_wait = 0
        try:
            while self._is_pid_running():
                if curr_wait >= max_wait:
                    exit('Failed to terminate process {}'.format(pid))
                sleep(interval)
                curr_wait += interval
        except KeyboardInterrupt:
            pass

    def _status(self):
        pid = self._get_pid()
        if pid is None:
            exit('Server is not running', 1)
        if not self._is_pid_running():
            exit('Server is not running, pidfile is old', -1)
        exit('Server is running, pid = {}'.format(pid), 0)

    def _log_event(self, message):
        try:
            self.loggers[__name__]
        except KeyError:
            return
        self.loggers[__name__].info(message)

    def _term_sighandler(self, signo, stack_frame):
        self._log_event('Stopping server on {}'.format(self.url))
        self.httpd.server_close()
        self._log_event('Stopped server on {}'.format(self.url))

        if self.access_log is not None:
            self.access_log.close()
        sys.exit(0)

    def _send_cors_headers(self, reqself):
        def get_cors(what):
            res = getattr(self.conf, 'cors_{}'.format(what))
            if isinstance(res, list):
                return ', '.join(res)
            return res

        for h in self.conf.headers:
            reqself.send_header(*re.split(': *', h, maxsplit=1))
        cors_origins = get_cors('origins')
        if cors_origins is not None:
            cors_origins = urllib.parse.unquote_plus(
                cors_origins)
        if cors_origins == '{ECHO}':
            cors_origins = reqself.headers.get('Origin')
            if not cors_origins:
                cors_origins = '*'
        cors_headers = get_cors('headers')
        cors_methods = get_cors('methods')
        cors_creds = get_cors('creds')
        if cors_origins:
            reqself.send_header('Access-Control-Allow-Origin',
                                cors_origins)
        if cors_headers:
            reqself.send_header('Access-Control-Allow-Headers',
                                cors_headers)
        if cors_methods:
            reqself.send_header('Access-Control-Allow-Methods',
                                cors_methods)
        if cors_creds:
            reqself.send_header('Access-Control-Allow-Credentials',
                                'true')

    def _get_pid(self, break_stale=False):
        if self.pidlockfile is None:
            sys.stderr.write('No PID file supplied\n')
            return None
        if not self.pidlockfile.is_locked():
            #  sys.stderr.write(
            #      'PID file {} not locked\n'.format(
            #          self.pidlockfile.path))
            return None
        if not self._is_pid_running() and break_stale:
            sys.stderr.write(
                'PID file {} is old, removing\n'.format(
                    self.pidlockfile.path))
            self.pidlockfile.break_lock()
            return None
        return self.pidlockfile.read_pid()

    def _is_pid_running(self):
        '''Adapted from daemon.runner'''

        pid = self.pidlockfile.read_pid()
        if pid is not None:
            try:
                os.kill(pid, SIG_DFL)
            except ProcessLookupError:
                return False
            except OSError as e:
                # Under Python 2, process lookup error is an OSError.
                if e.errno == errno.ESRCH:
                    # The specified PID does not exist.
                    return False
            return True
        return False
Esempio n. 16
0
def worker(args):
    """Starts Airflow Celery worker"""
    env = os.environ.copy()
    env['AIRFLOW_HOME'] = settings.AIRFLOW_HOME

    if not settings.validate_session():
        print("Worker exiting... database connection precheck failed! ")
        sys.exit(1)

    # Celery worker
    from airflow.executors.celery_executor import app as celery_app
    from celery.bin import worker  # pylint: disable=redefined-outer-name

    autoscale = args.autoscale
    if autoscale is None and conf.has_option("celery", "worker_autoscale"):
        autoscale = conf.get("celery", "worker_autoscale")
    worker = worker.worker(app=celery_app)   # pylint: disable=redefined-outer-name
    options = {
        'optimization': 'fair',
        'O': 'fair',
        'queues': args.queues,
        'concurrency': args.concurrency,
        'autoscale': autoscale,
        'hostname': args.celery_hostname,
        'loglevel': conf.get('core', 'LOGGING_LEVEL'),
    }

    if conf.has_option("celery", "pool"):
        options["pool"] = conf.get("celery", "pool")

    if args.daemon:
        pid, stdout, stderr, log_file = setup_locations("worker",
                                                        args.pid,
                                                        args.stdout,
                                                        args.stderr,
                                                        args.log_file)
        handle = setup_logging(log_file)
        stdout = open(stdout, 'w+')
        stderr = open(stderr, 'w+')

        ctx = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pid, -1),
            files_preserve=[handle],
            stdout=stdout,
            stderr=stderr,
        )
        with ctx:
            sub_proc = subprocess.Popen(['airflow', 'serve_logs'], env=env, close_fds=True)
            worker.run(**options)
            sub_proc.kill()

        stdout.close()
        stderr.close()
    else:
        signal.signal(signal.SIGINT, sigint_handler)
        signal.signal(signal.SIGTERM, sigint_handler)

        sub_proc = subprocess.Popen(['airflow', 'serve_logs'], env=env, close_fds=True)

        worker.run(**options)
        sub_proc.kill()
Esempio n. 17
0
def main(sys_args=None):
    parser = argparse.ArgumentParser(description='manage ansible execution')

    parser.add_argument('--version', action='version', version=VERSION)

    parser.add_argument('command',
                        choices=['run', 'start', 'stop', 'is-alive'])

    parser.add_argument(
        'private_data_dir',
        help=
        'Base directory containing Runner metadata (project, inventory, etc')

    group = parser.add_mutually_exclusive_group()

    group.add_argument(
        "-m",
        "--module",
        default=DEFAULT_RUNNER_MODULE,
        help="Invoke an Ansible module directly without a playbook")

    group.add_argument("-p",
                       "--playbook",
                       default=DEFAULT_RUNNER_PLAYBOOK,
                       help="The name of the playbook to execute")

    group.add_argument(
        "-r",
        "--role",
        default=DEFAULT_RUNNER_ROLE,
        help="Invoke an Ansible role directly without a playbook")

    parser.add_argument("-b",
                        "--binary",
                        default=DEFAULT_RUNNER_BINARY,
                        help="The full path to ansible[-playbook] binary")

    parser.add_argument("--hosts",
                        help="Define the set of hosts to execute against")

    parser.add_argument(
        "-i",
        "--ident",
        default=uuid4(),
        help="An identifier that will be used when generating the"
        "artifacts directory and can be used to uniquely identify a playbook run"
    )

    parser.add_argument(
        "--rotate-artifacts",
        default=0,
        type=int,
        help=
        "Automatically clean up old artifact directories after a given number has been created, the default is 0 which disables rotation"
    )

    parser.add_argument("--roles-path",
                        default=DEFAULT_ROLES_PATH,
                        help="Path to the Ansible roles directory")

    parser.add_argument("--role-vars",
                        help="Variables to pass to the role at runtime")

    parser.add_argument(
        "--role-skip-facts",
        action="store_true",
        default=False,
        help="Disable fact collection when executing a role directly")

    parser.add_argument(
        "--artifact-dir",
        help=
        "Optional Path for the artifact root directory, by default it is located inside the private data dir"
    )

    parser.add_argument(
        "--project-dir",
        help=
        "Optional Path for the location of the playbook content directory, by default this is 'project' inside the private data dir"
    )

    parser.add_argument(
        "--inventory",
        help="Override the default inventory location in private_data_dir")

    parser.add_argument("--forks", help="Set Ansible concurrency via Forks")

    parser.add_argument(
        "-j",
        "--json",
        action="store_true",
        help=
        "Output the json event structure to stdout instead of Ansible output")

    parser.add_argument(
        "-v",
        action="count",
        help=
        "Increase the verbosity with multiple v's (up to 5) of the ansible-playbook output"
    )

    parser.add_argument("-q",
                        "--quiet",
                        action="store_true",
                        help="Disable all output")

    parser.add_argument(
        "--cmdline",
        help=
        "Command line options to pass to ansible-playbook at execution time")

    parser.add_argument("--debug",
                        action="store_true",
                        help="Enable Runner debug output logging")

    parser.add_argument("--logfile", help="Log output messages to a file")

    parser.add_argument("-a",
                        "--args",
                        dest='module_args',
                        help="Module arguments")

    parser.add_argument(
        "--process-isolation",
        dest='process_isolation',
        action="store_true",
        help=
        "Limits what directories on the filesystem the playbook run has access to, defaults to /tmp"
    )

    parser.add_argument(
        "--process-isolation-executable",
        dest='process_isolation_executable',
        default="bwrap",
        help="Process isolation executable that will be used. Defaults to bwrap"
    )

    parser.add_argument(
        "--process-isolation-path",
        dest='process_isolation_path',
        default="/tmp",
        help=
        "Path that an isolated playbook run will use for staging. Defaults to /tmp"
    )

    parser.add_argument(
        "--process-isolation-hide-paths",
        dest='process_isolation_hide_paths',
        help=
        "List of paths on the system that should be hidden from the playbook run"
    )

    parser.add_argument(
        "--process-isolation-show-paths",
        dest='process_isolation_show_paths',
        help=
        "List of paths on the system that should be exposed to the playbook run"
    )

    parser.add_argument(
        "--process-isolation-ro-paths",
        dest='process_isolation_ro_paths',
        help=
        "List of paths on the system that should be exposed to the playbook run as read-only"
    )

    parser.add_argument(
        "--directory-isolation-base-path",
        dest='directory_isolation_base_path',
        help=
        "Copies the project directory to a location in this directory to prevent multiple simultaneous executions from conflicting"
    )

    parser.add_argument(
        "--limit",
        help=
        "Matches ansible's ``--limit`` parameter to further constrain the inventory to be used"
    )

    args = parser.parse_args(sys_args)

    output.configure()

    # enable or disable debug mode
    output.set_debug('enable' if args.debug else 'disable')

    # set the output logfile
    if args.logfile:
        output.set_logfile(args.logfile)

    output.debug('starting debug logging')

    # get the absolute path for start since it is a daemon
    args.private_data_dir = os.path.abspath(args.private_data_dir)

    pidfile = os.path.join(args.private_data_dir, 'pid')

    try:
        os.makedirs(args.private_data_dir, mode=0o700)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(args.private_data_dir):
            pass
        else:
            raise

    stderr_path = None
    if args.command != 'run':
        stderr_path = os.path.join(args.private_data_dir, 'daemon.log')
        if not os.path.exists(stderr_path):
            os.close(
                os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))

    if args.command in ('start', 'run'):

        if args.command == 'start':
            import daemon
            from daemon.pidfile import TimeoutPIDLockFile
            context = daemon.DaemonContext(pidfile=TimeoutPIDLockFile(pidfile))
        else:
            context = threading.Lock()

        with context:
            with role_manager(args) as args:
                run_options = dict(
                    private_data_dir=args.private_data_dir,
                    ident=args.ident,
                    binary=args.binary,
                    playbook=args.playbook,
                    module=args.module,
                    module_args=args.module_args,
                    host_pattern=args.hosts,
                    verbosity=args.v,
                    quiet=args.quiet,
                    rotate_artifacts=args.rotate_artifacts,
                    ignore_logging=False,
                    json_mode=args.json,
                    inventory=args.inventory,
                    forks=args.forks,
                    project_dir=args.project_dir,
                    roles_path=[args.roles_path] if args.roles_path else None,
                    process_isolation=args.process_isolation,
                    process_isolation_executable=args.
                    process_isolation_executable,
                    process_isolation_path=args.process_isolation_path,
                    process_isolation_hide_paths=args.
                    process_isolation_hide_paths,
                    process_isolation_show_paths=args.
                    process_isolation_show_paths,
                    process_isolation_ro_paths=args.process_isolation_ro_paths,
                    directory_isolation_base_path=args.
                    directory_isolation_base_path,
                    limit=args.limit)
                if args.cmdline:
                    run_options['cmdline'] = args.cmdline

                try:
                    res = run(**run_options)
                except Exception:
                    exc = traceback.format_exc()
                    if stderr_path:
                        open(stderr_path, 'w+').write(exc)
                    else:
                        sys.stderr.write(exc)
                    return 1
            return (res.rc)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        return (1)

    if args.command == 'stop':
        Runner.handle_termination(pid)
        return (0)

    elif args.command == 'is-alive':
        try:
            os.kill(pid, signal.SIG_DFL)
            return (0)
        except OSError:
            return (1)
Esempio n. 18
0
def main(sys_args=None):
    """Main entry point for ansible-runner executable

    When the ```ansible-runner``` command is executed, this function
    is the main entry point that is called and executed.

    :param sys_args: List of arguments to be parsed by the parser
    :type sys_args: list

    :returns: an instance of SystemExit
    :rtype: SystemExit
    """

    parser = AnsibleRunnerArgumentParser(
        prog='ansible-runner',
        description=
        "Use 'ansible-runner' (with no arguments) to see basic usage")
    subparser = parser.add_subparsers(
        help="Command to invoke",
        dest='command',
        description="COMMAND PRIVATE_DATA_DIR [ARGS]")
    add_args_to_parser(parser, DEFAULT_CLI_ARGS['generic_args'])
    subparser.required = True

    # positional options
    run_subparser = subparser.add_parser(
        'run', help="Run ansible-runner in the foreground")
    add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['positional_args'])
    add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['playbook_group'])
    start_subparser = subparser.add_parser(
        'start', help="Start an ansible-runner process in the background")
    add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['positional_args'])
    add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['playbook_group'])
    stop_subparser = subparser.add_parser(
        'stop',
        help="Stop an ansible-runner process that's running in the background")
    add_args_to_parser(stop_subparser, DEFAULT_CLI_ARGS['positional_args'])
    isalive_subparser = subparser.add_parser(
        'is-alive',
        help=
        "Check if a an ansible-runner process in the background is still running."
    )
    add_args_to_parser(isalive_subparser, DEFAULT_CLI_ARGS['positional_args'])

    # streaming commands
    transmit_subparser = subparser.add_parser(
        'transmit', help="Send a job to a remote ansible-runner process")
    add_args_to_parser(transmit_subparser, DEFAULT_CLI_ARGS['positional_args'])

    worker_subparser = subparser.add_parser(
        'worker', help="Execute work streamed from a controlling instance")
    worker_subcommands = worker_subparser.add_subparsers(
        help="Sub-sub command to invoke",
        dest='worker_subcommand',
        description="ansible-runner worker [sub-sub-command]",
    )
    cleanup_command = worker_subcommands.add_parser(
        'cleanup',
        help=
        "Cleanup private_data_dir patterns from prior jobs and supporting temporary folders.",
    )
    cleanup.add_cleanup_args(cleanup_command)

    worker_subparser.add_argument(
        "--private-data-dir",
        help="base directory containing the ansible-runner metadata "
        "(project, inventory, env, etc)",
    )

    worker_subparser.add_argument(
        "--worker-info",
        dest="worker_info",
        action="store_true",
        help=
        "show the execution node's Ansible Runner version along with its memory and CPU capacities"
    )
    worker_subparser.add_argument(
        "--delete",
        dest="delete_directory",
        action="store_true",
        default=False,
        help=
        ("Delete existing folder (and everything in it) in the location specified by --private-data-dir. "
         "The directory will be re-populated when the streamed data is unpacked. "
         "Using this will also assure that the directory is deleted when the job finishes."
         ))
    process_subparser = subparser.add_parser(
        'process',
        help=
        "Receive the output of remote ansible-runner work and distribute the results"
    )
    add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['positional_args'])

    # generic args for all subparsers
    add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(stop_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(isalive_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(transmit_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(worker_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['generic_args'])

    # runner group
    ansible_runner_group_options = (
        "Ansible Runner Options",
        "configuration options for controlling the ansible-runner "
        "runtime environment.",
    )
    base_runner_group = parser.add_argument_group(
        *ansible_runner_group_options)
    run_runner_group = run_subparser.add_argument_group(
        *ansible_runner_group_options)
    start_runner_group = start_subparser.add_argument_group(
        *ansible_runner_group_options)
    stop_runner_group = stop_subparser.add_argument_group(
        *ansible_runner_group_options)
    isalive_runner_group = isalive_subparser.add_argument_group(
        *ansible_runner_group_options)
    transmit_runner_group = transmit_subparser.add_argument_group(
        *ansible_runner_group_options)
    add_args_to_parser(base_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(run_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(start_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(stop_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(isalive_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(transmit_runner_group, DEFAULT_CLI_ARGS['runner_group'])

    # mutually exclusive group
    run_mutually_exclusive_group = run_subparser.add_mutually_exclusive_group()
    start_mutually_exclusive_group = start_subparser.add_mutually_exclusive_group(
    )
    stop_mutually_exclusive_group = stop_subparser.add_mutually_exclusive_group(
    )
    isalive_mutually_exclusive_group = isalive_subparser.add_mutually_exclusive_group(
    )
    transmit_mutually_exclusive_group = transmit_subparser.add_mutually_exclusive_group(
    )
    add_args_to_parser(run_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(start_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(stop_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(isalive_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(transmit_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])

    # ansible options
    ansible_options = (
        "Ansible Options",
        "control the ansible[-playbook] execution environment",
    )
    run_ansible_group = run_subparser.add_argument_group(*ansible_options)
    start_ansible_group = start_subparser.add_argument_group(*ansible_options)
    stop_ansible_group = stop_subparser.add_argument_group(*ansible_options)
    isalive_ansible_group = isalive_subparser.add_argument_group(
        *ansible_options)
    transmit_ansible_group = transmit_subparser.add_argument_group(
        *ansible_options)
    add_args_to_parser(run_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(start_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(stop_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(isalive_ansible_group,
                       DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(transmit_ansible_group,
                       DEFAULT_CLI_ARGS['ansible_group'])

    # roles group
    roles_group_options = (
        "Ansible Role Options",
        "configuration options for directly executing Ansible roles",
    )
    run_roles_group = run_subparser.add_argument_group(*roles_group_options)
    start_roles_group = start_subparser.add_argument_group(
        *roles_group_options)
    stop_roles_group = stop_subparser.add_argument_group(*roles_group_options)
    isalive_roles_group = isalive_subparser.add_argument_group(
        *roles_group_options)
    transmit_roles_group = transmit_subparser.add_argument_group(
        *roles_group_options)
    add_args_to_parser(run_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(start_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(stop_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(isalive_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(transmit_roles_group, DEFAULT_CLI_ARGS['roles_group'])

    # modules groups

    modules_group_options = (
        "Ansible Module Options",
        "configuration options for directly executing Ansible modules",
    )
    run_modules_group = run_subparser.add_argument_group(
        *modules_group_options)
    start_modules_group = start_subparser.add_argument_group(
        *modules_group_options)
    stop_modules_group = stop_subparser.add_argument_group(
        *modules_group_options)
    isalive_modules_group = isalive_subparser.add_argument_group(
        *modules_group_options)
    transmit_modules_group = transmit_subparser.add_argument_group(
        *modules_group_options)
    add_args_to_parser(run_modules_group, DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(start_modules_group, DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(stop_modules_group, DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(isalive_modules_group,
                       DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(transmit_modules_group,
                       DEFAULT_CLI_ARGS['modules_group'])

    # container group
    container_group_options = (
        "Ansible Container Options",
        "configuation options for executing Ansible playbooks",
    )
    run_container_group = run_subparser.add_argument_group(
        *container_group_options)
    start_container_group = start_subparser.add_argument_group(
        *container_group_options)
    stop_container_group = stop_subparser.add_argument_group(
        *container_group_options)
    isalive_container_group = isalive_subparser.add_argument_group(
        *container_group_options)
    transmit_container_group = transmit_subparser.add_argument_group(
        *container_group_options)
    add_args_to_parser(run_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(start_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(stop_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(isalive_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(transmit_container_group,
                       DEFAULT_CLI_ARGS['container_group'])

    args = parser.parse_args(sys_args)

    vargs = vars(args)

    if vargs.get('command') == 'worker':
        if vargs.get('worker_subcommand') == 'cleanup':
            cleanup.run_cleanup(vargs)
            parser.exit(0)
        if vargs.get('worker_info'):
            cpu = get_cpu_count()
            mem = get_mem_in_bytes()
            errors = []
            uuid = ensure_uuid()
            if not isinstance(mem, int):
                errors.append(mem)
                mem = None
            if "Could not find" in uuid:
                errors.append(uuid)
                uuid = None
            info = {
                'errors': errors,
                'mem_in_bytes': mem,
                'cpu_count': cpu,
                'runner_version': VERSION,
                'uuid': uuid,
            }
            print(safe_dump(info, default_flow_style=True))
            parser.exit(0)

        private_data_dir = vargs.get('private_data_dir')
        delete_directory = vargs.get('delete_directory', False)
        if private_data_dir and delete_directory:
            shutil.rmtree(private_data_dir, ignore_errors=True)
            register_for_cleanup(private_data_dir)
        elif private_data_dir is None:
            temp_private_dir = tempfile.mkdtemp()
            vargs['private_data_dir'] = temp_private_dir
            register_for_cleanup(temp_private_dir)

    if vargs.get('command') == 'process':
        # the process command is the final destination of artifacts, user expects private_data_dir to not be cleaned up
        if not vargs.get('private_data_dir'):
            temp_private_dir = tempfile.mkdtemp()
            vargs['private_data_dir'] = temp_private_dir

    if vargs.get('command') in ('start', 'run', 'transmit'):
        if vargs.get('hosts') and not (vargs.get('module')
                                       or vargs.get('role')):
            parser.exit(
                status=1,
                message="The --hosts option can only be used with -m or -r\n")
        if not (vargs.get('module')
                or vargs.get('role')) and not vargs.get('playbook'):
            parser.exit(
                status=1,
                message=
                "The -p option must be specified when not using -m or -r\n")

    output.configure()

    # enable or disable debug mode
    output.set_debug('enable' if vargs.get('debug') else 'disable')

    # set the output logfile
    if ('logfile' in args) and vargs.get('logfile'):
        output.set_logfile(vargs.get('logfile'))

    output.debug('starting debug logging')

    # get the absolute path for start since it is a daemon
    vargs['private_data_dir'] = os.path.abspath(vargs.get('private_data_dir'))

    pidfile = os.path.join(vargs.get('private_data_dir'), 'pid')

    try:
        os.makedirs(vargs.get('private_data_dir'), mode=0o700)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(
                vargs.get('private_data_dir')):
            pass
        else:
            raise

    stderr_path = None
    context = None
    if vargs.get('command') not in ('run', 'transmit', 'worker'):
        stderr_path = os.path.join(vargs.get('private_data_dir'), 'daemon.log')
        if not os.path.exists(stderr_path):
            os.close(
                os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))

    if vargs.get('command') in ('start', 'run', 'transmit', 'worker',
                                'process'):

        if vargs.get('command') == 'start':
            import daemon
            from daemon.pidfile import TimeoutPIDLockFile
            context = daemon.DaemonContext(pidfile=TimeoutPIDLockFile(pidfile))
        else:
            context = threading.Lock()

        streamer = None
        if vargs.get('command') in ('transmit', 'worker', 'process'):
            streamer = vargs.get('command')

        with context:
            with role_manager(vargs) as vargs:
                run_options = dict(
                    private_data_dir=vargs.get('private_data_dir'),
                    ident=vargs.get('ident'),
                    binary=vargs.get('binary'),
                    playbook=vargs.get('playbook'),
                    module=vargs.get('module'),
                    module_args=vargs.get('module_args'),
                    host_pattern=vargs.get('hosts'),
                    verbosity=vargs.get('v'),
                    quiet=vargs.get('quiet'),
                    rotate_artifacts=vargs.get('rotate_artifacts'),
                    ignore_logging=False,
                    json_mode=vargs.get('json'),
                    omit_event_data=vargs.get('omit_event_data'),
                    only_failed_event_data=vargs.get('only_failed_event_data'),
                    inventory=vargs.get('inventory'),
                    forks=vargs.get('forks'),
                    project_dir=vargs.get('project_dir'),
                    artifact_dir=vargs.get('artifact_dir'),
                    roles_path=[vargs.get('roles_path')]
                    if vargs.get('roles_path') else None,
                    process_isolation=vargs.get('process_isolation'),
                    process_isolation_executable=vargs.get(
                        'process_isolation_executable'),
                    process_isolation_path=vargs.get('process_isolation_path'),
                    process_isolation_hide_paths=vargs.get(
                        'process_isolation_hide_paths'),
                    process_isolation_show_paths=vargs.get(
                        'process_isolation_show_paths'),
                    process_isolation_ro_paths=vargs.get(
                        'process_isolation_ro_paths'),
                    container_image=vargs.get('container_image'),
                    container_volume_mounts=vargs.get(
                        'container_volume_mounts'),
                    container_options=vargs.get('container_options'),
                    directory_isolation_base_path=vargs.get(
                        'directory_isolation_base_path'),
                    resource_profiling=vargs.get('resource_profiling'),
                    resource_profiling_base_cgroup=vargs.get(
                        'resource_profiling_base_cgroup'),
                    resource_profiling_cpu_poll_interval=vargs.get(
                        'resource_profiling_cpu_poll_interval'),
                    resource_profiling_memory_poll_interval=vargs.get(
                        'resource_profiling_memory_poll_interval'),
                    resource_profiling_pid_poll_interval=vargs.get(
                        'resource_profiling_pid_poll_interval'),
                    resource_profiling_results_dir=vargs.get(
                        'resource_profiling_results_dir'),
                    cmdline=vargs.get('cmdline'),
                    limit=vargs.get('limit'),
                    streamer=streamer)
                try:
                    res = run(**run_options)
                except Exception:
                    exc = traceback.format_exc()
                    if stderr_path:
                        open(stderr_path, 'w+').write(exc)
                    else:
                        sys.stderr.write(exc)
                    return 1
            return (res.rc)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        return (1)

    if vargs.get('command') == 'stop':
        Runner.handle_termination(pid, pidfile=pidfile)
        return (0)

    elif vargs.get('command') == 'is-alive':
        try:
            os.kill(pid, signal.SIG_DFL)
            return (0)
        except OSError:
            return (1)
Esempio n. 19
0
    handler = logging.handlers.SysLogHandler(address='/dev/log')
    logger.addHandler(handler)
    return logger


cfg = None
logger = None
check_thread = None
updater = None
pid = None

if ((len(sys.argv) == 3)
        and (sys.argv[2] in ['start', 'stop', 'reload', 'status'])):
    cfg = readConf(sys.argv[1])
    updater = Updater(token=cfg['TOKEN'], use_context=True)
    pid = TimeoutPIDLockFile(cfg['PIDFILE'], cfg['LOCK_WAIT_TIMEOUT'])
    if sys.argv[2] == 'stop':
        if pid.is_locked():
            pidNumber = pid.read_pid()
            os.kill(pidNumber, signal.SIGHUP)
            sleep(15)
            if psutil.pid_exists(pidNumber):
                os.kill(pidNumber, signal.SIGTERM)
                sleep(5)
                if psutil.pid_exists(pidNumber) or pid.is_locked():
                    sys.stderr.write(cfg['MONITOR_NAME'] +
                                     " Bot can't be stopped")
                    sys.exit(1)
        sys.exit(0)
    elif sys.argv[2] == 'reload':
        if pid.is_locked():
Esempio n. 20
0
def run_manager():
    parser = argparse.ArgumentParser(description='Multipile', )
    parser.add_argument('-r', '--root_path', help='The rootpath for mailpile')
    parser.add_argument('-m',
                        '--mailpile_bin',
                        help='The mailpile executable',
                        default='mailpile')
    parser.add_argument('-b',
                        '--backend',
                        help='the backend to use',
                        default='fork',
                        choices=['fork', 'docker'])
    parser.add_argument('--bind',
                        help="bind to interface. Default 127.0.0.1",
                        default='127.0.0.1')
    parser.add_argument('--sslcert',
                        help='The SSL certficate to use',
                        default=None)
    parser.add_argument('--sslkey', help='The SSL key to use', default=None)
    parser.add_argument('--debug',
                        help='Set log level to debug',
                        default=False,
                        action='store_true')
    parser.add_argument(
        '--daemon',
        help='start in daemon mode and put process into background',
        default=False,
        action='store_true')
    parser.add_argument('--pidfile',
                        help='path for pid file. By default none is created',
                        default=None)
    parser.add_argument('--log-config',
                        help='Provide a python logging config file',
                        default=None)
    parser.add_argument(
        '--leap-provider',
        '-lp',
        help='Specify the LEAP provider this dispatcher will connect to',
        default='localhost')
    parser.add_argument(
        '--leap-provider-ca',
        '-lpc',
        dest='leap_provider_ca',
        help='Specify the LEAP provider CA to use to validate connections',
        default=True)
    parser.add_argument(
        '--leap-provider-fingerprint',
        '-lpf',
        dest='leap_provider_fingerprint',
        help=
        'Specify the LEAP provider fingerprint to use to validate connections',
        default=None)
    group = parser.add_mutually_exclusive_group()
    group.add_argument('--mailpile-virtualenv',
                       help='Use specified virtual env for mailpile',
                       default=None)
    group.add_argument('--auto-mailpile-virtualenv',
                       dest='auto_venv',
                       help='Boostrap virtualenv for mailpile',
                       default=False,
                       action='store_true')

    args = parser.parse_args(args=filter_args())

    if args.sslcert:
        ssl_config = SSLConfig(args.sslcert, args.sslkey,
                               latest_available_ssl_version())
    else:
        ssl_config = None

    venv = args.mailpile_virtualenv
    mailpile_bin = args.mailpile_bin

    if args.auto_venv:
        venv, mailpile_bin = prepare_venv(args.root_path)

    if args.root_path is None or not os.path.isdir(args.root_path):
        raise ValueError('root path %s not found!' % args.root_path)

    log_level = logging.DEBUG if args.debug else logging.INFO
    log_config = args.log_config

    provider_ca = args.leap_provider_ca if args.leap_provider_fingerprint is None else False

    manager = DispatcherManager(
        args.root_path,
        mailpile_bin,
        ssl_config,
        args.leap_provider,
        mailpile_virtualenv=venv,
        provider=args.backend,
        leap_provider_ca=provider_ca,
        leap_provider_fingerprint=args.leap_provider_fingerprint,
        bindaddr=args.bind)

    if args.daemon:
        pidfile = TimeoutPIDLockFile(
            args.pidfile,
            acquire_timeout=PID_ACQUIRE_TIMEOUT_IN_S) if args.pidfile else None
        can_use_pidfile(pidfile)
        with daemon.DaemonContext(pidfile=pidfile):
            # init logging only after we have spawned the sub process. Otherwise there might be some hickups
            init_logging('manager', level=log_level, config_file=log_config)
            manager.serve_forever()
    else:
        init_logging('manager', level=log_level, config_file=log_config)
        manager.serve_forever()
Esempio n. 21
0
def main():
    # Workaround for development
    modpath = os.path.realpath(os.path.join(
        os.path.dirname(os.path.realpath(__file__)),
        '..',
    ))
    if modpath not in sys.path:
        sys.path.insert(0, modpath)

    parser = argparse.ArgumentParser()
    parser.add_argument('restart', nargs='?')
    parser.add_argument('--foreground', '-f', action='store_true')
    parser.add_argument('--disable-loop-monitor', '-L', action='store_true')
    parser.add_argument('--plugins-dirs', '-p', action='append')
    parser.add_argument('--debug-level', choices=[
        'TRACE',
        'DEBUG',
        'INFO',
        'WARN',
        'ERROR',
    ])
    parser.add_argument('--log-handler', choices=[
        'console',
        'file',
    ])
    args = parser.parse_args()

    #  Logger
    if args.log_handler:
        log_handlers = [args.log_handler]
    else:
        log_handlers = ['console' if args.foreground else 'file']

    if args.debug_level is None and args.foreground:
        debug_level = 'TRACE'
    else:
        debug_level = args.debug_level or 'DEBUG'

    _logger = logger.Logger('middleware', debug_level)
    get_logger = _logger.getLogger()

    pidpath = '/var/run/middlewared.pid'

    if args.restart:
        if os.path.exists(pidpath):
            with open(pidpath, 'r') as f:
                pid = int(f.read().strip())
            try:
                os.kill(pid, 15)
            except ProcessLookupError as e:
                if e.errno != errno.ESRCH:
                    raise

    if not args.foreground:
        _logger.configure_logging('file')
        daemonc = DaemonContext(
            pidfile=TimeoutPIDLockFile(pidpath),
            detach_process=True,
            stdout=logger.LoggerStream(get_logger),
            stderr=logger.LoggerStream(get_logger),
        )
        daemonc.open()
    elif 'file' in log_handlers:
        _logger.configure_logging('file')
        sys.stdout = sys.stderr = _logger.stream()
    elif 'console' in log_handlers:
        _logger.configure_logging('console')
    else:
        _logger.configure_logging('file')

    setproctitle.setproctitle('middlewared')
    # Workaround to tell django to not set up logging on its own
    os.environ['MIDDLEWARED'] = str(os.getpid())

    if args.foreground:
        with open(pidpath, "w") as _pidfile:
            _pidfile.write(f"{str(os.getpid())}\n")

    Middleware(
        loop_monitor=not args.disable_loop_monitor,
        plugins_dirs=args.plugins_dirs,
        debug_level=debug_level,
    ).run()
    if not args.foreground:
        daemonc.close()
Esempio n. 22
0
def main():
    parser = argparse.ArgumentParser(description='manage ansible execution')

    parser.add_argument('--version', action='version', version=VERSION)

    parser.add_argument('command', choices=['run', 'start', 'stop', 'is-alive'])

    parser.add_argument('private_data_dir',
                        help='Base directory containing Runner metadata (project, inventory, etc')

    group = parser.add_mutually_exclusive_group()

    group.add_argument("-m", "--module", default=DEFAULT_RUNNER_MODULE,
                       help="Invoke an Ansible module directly without a playbook")

    group.add_argument("-p", "--playbook", default=DEFAULT_RUNNER_PLAYBOOK,
                       help="The name of the playbook to execute")

    group.add_argument("-r", "--role", default=DEFAULT_RUNNER_ROLE,
                       help="Invoke an Ansible role directly without a playbook")

    parser.add_argument("--hosts",
                        help="Define the set of hosts to execute against")

    parser.add_argument("-i", "--ident",
                        default=uuid4(),
                        help="An identifier that will be used when generating the"
                             "artifacts directory and can be used to uniquely identify a playbook run")

    parser.add_argument("--rotate-artifacts",
                        default=0,
                        type=int,
                        help="Automatically clean up old artifact directories after a given number has been created, the default is 0 which disables rotation")

    parser.add_argument("--roles-path", default=DEFAULT_ROLES_PATH,
                        help="Path to the Ansible roles directory")

    parser.add_argument("--role-vars",
                        help="Variables to pass to the role at runtime")

    parser.add_argument("--role-skip-facts", action="store_true", default=False,
                        help="Disable fact collection when executing a role directly")
    parser.add_argument("--artifact-dir",
                        help="Optional Path for the artifact root directory, by default it is located inside the private data dir")

    parser.add_argument("--inventory",
                        help="Override the default inventory location in private_data_dir")

    parser.add_argument("-j", "--json", action="store_true",
                        help="Output the json event structure to stdout instead of Ansible output")

    parser.add_argument("-v", action="count",
                        help="Increase the verbosity with multiple v's (up to 5) of the ansible-playbook output")

    parser.add_argument("-q", "--quiet", action="store_true",
                        help="Disable all output")

    parser.add_argument("--cmdline",
                        help="Command line options to pass to ansible-playbook at execution time")
    parser.add_argument("--debug", action="store_true",
                        help="Enable Runner debug output logging")

    parser.add_argument("--logfile",
                        help="Log output messages to a file")

    parser.add_argument("-a", "--args", dest='module_args',
                        help="Module arguments")

    args = parser.parse_args()

    output.configure()

    # enable or disable debug mode
    output.set_debug('enable' if args.debug else 'disable')

    # set the output logfile
    if args.logfile:
        output.set_logfile(args.logfile)

    output.debug('starting debug logging')

    # get the absolute path for start since it is a daemon
    args.private_data_dir = os.path.abspath(args.private_data_dir)

    pidfile = os.path.join(args.private_data_dir, 'pid')

    try:
        os.makedirs(args.private_data_dir)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(args.private_data_dir):
            pass
        else:
            raise

    if args.command != 'run':
        stderr_path = os.path.join(args.private_data_dir, 'daemon.log')
        if not os.path.exists(stderr_path):
            os.close(os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
        stderr = open(stderr_path, 'w+')

    if args.command in ('start', 'run'):

        if args.command == 'start':
            import daemon
            from daemon.pidfile import TimeoutPIDLockFile
            context = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pidfile),
                stderr=stderr
            )
        else:
            context = threading.Lock()

        with context:
            with role_manager(args) as args:
                run_options = dict(private_data_dir=args.private_data_dir,
                                   ident=args.ident,
                                   playbook=args.playbook,
                                   module=args.module,
                                   module_args=args.module_args,
                                   host_pattern=args.hosts,
                                   verbosity=args.v,
                                   quiet=args.quiet,
                                   rotate_artifacts=args.rotate_artifacts,
                                   ignore_logging=False,
                                   json_mode=args.json)

                if args.hosts is not None:
                    run_options.update(inventory=args.hosts)

                if args.cmdline:
                    run_options['cmdline'] = args.cmdline

                res = run(**run_options)
            sys.exit(res.rc)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        sys.exit(1)

    if args.command == 'stop':
        try:
            with open(os.path.join(args.private_data_dir, 'args'), 'r') as args:
                Runner.handle_termination(pid)
        except IOError:
            Runner.handle_termination(pid)

    elif args.command == 'is-alive':
        try:
            os.kill(pid, signal.SIG_DFL)
            sys.exit(0)
        except OSError:
            sys.exit(1)
Esempio n. 23
0
def webserver(args):
    """Starts Airflow Webserver"""
    print(settings.HEADER)

    access_logfile = args.access_logfile or conf.get('webserver', 'access_logfile')
    error_logfile = args.error_logfile or conf.get('webserver', 'error_logfile')
    num_workers = args.workers or conf.get('webserver', 'workers')
    worker_timeout = (args.worker_timeout or
                      conf.get('webserver', 'web_server_worker_timeout'))
    ssl_cert = args.ssl_cert or conf.get('webserver', 'web_server_ssl_cert')
    ssl_key = args.ssl_key or conf.get('webserver', 'web_server_ssl_key')
    if not ssl_cert and ssl_key:
        raise AirflowException(
            'An SSL certificate must also be provided for use with ' + ssl_key)
    if ssl_cert and not ssl_key:
        raise AirflowException(
            'An SSL key must also be provided for use with ' + ssl_cert)

    if args.debug:
        print(
            "Starting the web server on port {0} and host {1}.".format(
                args.port, args.hostname))
        app, _ = create_app(None, testing=conf.getboolean('core', 'unit_test_mode'))
        app.run(debug=True, use_reloader=not app.config['TESTING'],
                port=args.port, host=args.hostname,
                ssl_context=(ssl_cert, ssl_key) if ssl_cert and ssl_key else None)
    else:
        os.environ['SKIP_DAGS_PARSING'] = 'True'
        app = cached_app(None)
        pid, stdout, stderr, log_file = setup_locations(
            "webserver", args.pid, args.stdout, args.stderr, args.log_file)
        os.environ.pop('SKIP_DAGS_PARSING')
        if args.daemon:
            handle = setup_logging(log_file)
            stdout = open(stdout, 'w+')
            stderr = open(stderr, 'w+')

        print(
            textwrap.dedent('''\
                Running the Gunicorn Server with:
                Workers: {num_workers} {workerclass}
                Host: {hostname}:{port}
                Timeout: {worker_timeout}
                Logfiles: {access_logfile} {error_logfile}
                =================================================================\
            '''.format(num_workers=num_workers, workerclass=args.workerclass,
                       hostname=args.hostname, port=args.port,
                       worker_timeout=worker_timeout, access_logfile=access_logfile,
                       error_logfile=error_logfile)))

        run_args = [
            'gunicorn',
            '-w', str(num_workers),
            '-k', str(args.workerclass),
            '-t', str(worker_timeout),
            '-b', args.hostname + ':' + str(args.port),
            '-n', 'airflow-webserver',
            '-p', str(pid),
            '-c', 'python:airflow.www.gunicorn_config',
        ]

        if args.access_logfile:
            run_args += ['--access-logfile', str(args.access_logfile)]

        if args.error_logfile:
            run_args += ['--error-logfile', str(args.error_logfile)]

        if args.daemon:
            run_args += ['-D']

        if ssl_cert:
            run_args += ['--certfile', ssl_cert, '--keyfile', ssl_key]

        webserver_module = 'www'
        run_args += ["airflow." + webserver_module + ".app:cached_app()"]

        gunicorn_master_proc = None

        def kill_proc(dummy_signum, dummy_frame):  # pylint: disable=unused-argument
            gunicorn_master_proc.terminate()
            gunicorn_master_proc.wait()
            sys.exit(0)

        def monitor_gunicorn(gunicorn_master_proc):
            # These run forever until SIG{INT, TERM, KILL, ...} signal is sent
            if conf.getint('webserver', 'worker_refresh_interval') > 0:
                master_timeout = conf.getint('webserver', 'web_server_master_timeout')
                restart_workers(gunicorn_master_proc, num_workers, master_timeout)
            else:
                while gunicorn_master_proc.poll() is None:
                    time.sleep(1)

                sys.exit(gunicorn_master_proc.returncode)

        if args.daemon:
            base, ext = os.path.splitext(pid)
            ctx = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(base + "-monitor" + ext, -1),
                files_preserve=[handle],
                stdout=stdout,
                stderr=stderr,
                signal_map={
                    signal.SIGINT: kill_proc,
                    signal.SIGTERM: kill_proc
                },
            )
            with ctx:
                subprocess.Popen(run_args, close_fds=True)

                # Reading pid file directly, since Popen#pid doesn't
                # seem to return the right value with DaemonContext.
                while True:
                    try:
                        with open(pid) as file:
                            gunicorn_master_proc_pid = int(file.read())
                            break
                    except OSError:
                        log.debug("Waiting for gunicorn's pid file to be created.")
                        time.sleep(0.1)

                gunicorn_master_proc = psutil.Process(gunicorn_master_proc_pid)
                monitor_gunicorn(gunicorn_master_proc)

            stdout.close()
            stderr.close()
        else:
            gunicorn_master_proc = subprocess.Popen(run_args, close_fds=True)

            signal.signal(signal.SIGINT, kill_proc)
            signal.signal(signal.SIGTERM, kill_proc)

            monitor_gunicorn(gunicorn_master_proc)
Esempio n. 24
0
def main():
    global SETUID_USER, SETGID_GROUP
    options = parse_args()
    SETUID_USER = options.user
    SETGID_GROUP = options.group
    root = desktop.lib.paths.get_run_root()
    log_dir = os.path.join(root, options.log_dir)

    if options.show_supervisees:
        for name, supervisee in get_supervisees().items():
            if name not in options.supervisee_exclusions:
                print(name)
        sys.exit(0)

    # Let our children know
    os.environ['DESKTOP_LOG_DIR'] = log_dir

    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    setup_user_info()

    pid_file = os.path.abspath(os.path.join(root, options.pid_file))
    pidfile_context = TimeoutPIDLockFile(pid_file, LOCKFILE_TIMEOUT)

    existing_pid = pidfile_context.read_pid()
    if existing_pid:
        cmdline = get_pid_cmdline(existing_pid)
        if not cmdline.strip():
            # pid is not actually running
            pidfile_context.break_lock()
        else:
            LOG.error(
                "Pid file %s indicates that Hue is already running (pid %d)" %
                (pid_file, existing_pid))
            sys.exit(1)
    elif pidfile_context.is_locked():
        # If there's no pidfile but there is a lock, it's a strange situation,
        # but we should break the lock because it doesn't seem to be actually running
        logging.warn("No existing pid file, but lock exists. Breaking lock.")
        pidfile_context.break_lock()

    if options.daemonize:
        if sys.version_info[0] > 2:
            outfile = open_file(os.path.join(log_dir, 'supervisor.out'), 'ba+',
                                0)
        else:
            outfile = open_file(os.path.join(log_dir, 'supervisor.out'), 'a+',
                                0)
        context = daemon.DaemonContext(
            working_directory=root,
            pidfile=pidfile_context,
            stdout=outfile,
            stderr=outfile,
        )

        context.signal_map = {
            signal.SIGTERM: sig_handler,
        }

        context.open()
    os.umask(0o22)

    # Log initialization must come after daemonization, which closes all open files.
    # Log statements before this point goes to stderr.
    _init_log(log_dir)

    sups = []
    try:
        for name, supervisee in get_supervisees().items():

            if name in options.supervisee_exclusions:
                continue

            if supervisee.drop_root:
                preexec_fn = drop_privileges
            else:
                preexec_fn = None

            if options.daemonize:
                if sys.version_info[0] > 2:
                    log_stdout = open_file(
                        os.path.join(log_dir, name + '.out'), 'ba+', 0)
                else:
                    log_stdout = open_file(
                        os.path.join(log_dir, name + '.out'), 'a+', 0)
                log_stderr = log_stdout
            else:
                # Passing None to subprocess.Popen later makes the subprocess inherit the
                # standard fds from the supervisor
                log_stdout = None
                log_stderr = None
            sup = Supervisor(supervisee.cmdv,
                             stdout=log_stdout,
                             stderr=log_stderr,
                             preexec_fn=preexec_fn)
            sup.start()
            sups.append(sup)

        wait_loop(sups, options)
    except BaseException as ex:
        LOG.exception("Exception in supervisor main loop")
        shutdown(sups)  # shutdown() exits the process

    return 0
Esempio n. 25
0
def main(sys_args=None):
    """Main entry point for ansible-runner executable

    When the ```ansible-runner``` command is executed, this function
    is the main entry point that is called and executed.

    :param sys_args: List of arguments to be parsed by the parser
    :type sys_args: list

    :returns: an instance of SystemExit
    :rtype: SystemExit
    """

    parser = argparse.ArgumentParser(
        prog='ansible-runner',
        description=
        "Use 'ansible-runner' (with no arguments) to see basic usage")
    subparser = parser.add_subparsers(
        help="Command to invoke",
        dest='command',
        description="COMMAND PRIVATE_DATA_DIR [ARGS]")
    add_args_to_parser(parser, DEFAULT_CLI_ARGS['generic_args'])
    subparser.required = True

    # positional options
    run_subparser = subparser.add_parser(
        'run', help="Run ansible-runner in the foreground")
    add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['positional_args'])
    start_subparser = subparser.add_parser(
        'start', help="Start an ansible-runner process in the background")
    add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['positional_args'])
    stop_subparser = subparser.add_parser(
        'stop',
        help="Stop an ansible-runner process that's running in the background")
    add_args_to_parser(stop_subparser, DEFAULT_CLI_ARGS['positional_args'])
    isalive_subparser = subparser.add_parser(
        'is-alive',
        help=
        "Check if a an ansible-runner process in the background is still running."
    )
    add_args_to_parser(isalive_subparser, DEFAULT_CLI_ARGS['positional_args'])

    # streaming commands
    transmit_subparser = subparser.add_parser(
        'transmit', help="Send a job to a remote ansible-runner process")
    add_args_to_parser(transmit_subparser, DEFAULT_CLI_ARGS['positional_args'])

    worker_subparser = subparser.add_parser(
        'worker', help="Execute work streamed from a controlling instance")
    worker_subparser.add_argument(
        "--private-data-dir",
        help="base directory containing the ansible-runner metadata "
        "(project, inventory, env, etc)",
    )

    process_subparser = subparser.add_parser(
        'process',
        help=
        "Receive the output of remote ansible-runner work and distribute the results"
    )
    add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['positional_args'])

    # adhoc command exec
    adhoc_subparser = subparser.add_parser(
        'adhoc', help="Run ansible adhoc commands in an Execution Environment")
    adhoc_subparser.add_argument(
        "--private-data-dir",
        help="base directory containing the ansible-runner metadata "
        "(project, inventory, env, etc)",
    )
    add_args_to_parser(adhoc_subparser, DEFAULT_CLI_ARGS['execenv_cli_group'])

    # playbook command exec
    playbook_subparser = subparser.add_parser(
        'playbook',
        help="Run ansible-playbook commands in an Execution Environment")
    playbook_subparser.add_argument(
        "--private-data-dir",
        help="base directory containing the ansible-runner metadata "
        "(project, inventory, env, etc)",
    )
    add_args_to_parser(playbook_subparser,
                       DEFAULT_CLI_ARGS['execenv_cli_group'])

    # generic args for all subparsers
    add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(stop_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(isalive_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(adhoc_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(playbook_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(transmit_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(worker_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['generic_args'])

    # runner group
    ansible_runner_group_options = (
        "Ansible Runner Options",
        "configuration options for controlling the ansible-runner "
        "runtime environment.",
    )
    base_runner_group = parser.add_argument_group(
        *ansible_runner_group_options)
    run_runner_group = run_subparser.add_argument_group(
        *ansible_runner_group_options)
    start_runner_group = start_subparser.add_argument_group(
        *ansible_runner_group_options)
    stop_runner_group = stop_subparser.add_argument_group(
        *ansible_runner_group_options)
    isalive_runner_group = isalive_subparser.add_argument_group(
        *ansible_runner_group_options)
    transmit_runner_group = transmit_subparser.add_argument_group(
        *ansible_runner_group_options)
    add_args_to_parser(base_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(run_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(start_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(stop_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(isalive_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(transmit_runner_group, DEFAULT_CLI_ARGS['runner_group'])

    # mutually exclusive group
    run_mutually_exclusive_group = run_subparser.add_mutually_exclusive_group()
    start_mutually_exclusive_group = start_subparser.add_mutually_exclusive_group(
    )
    stop_mutually_exclusive_group = stop_subparser.add_mutually_exclusive_group(
    )
    isalive_mutually_exclusive_group = isalive_subparser.add_mutually_exclusive_group(
    )
    transmit_mutually_exclusive_group = transmit_subparser.add_mutually_exclusive_group(
    )
    add_args_to_parser(run_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(start_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(stop_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(isalive_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(transmit_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])

    # ansible options
    ansible_options = (
        "Ansible Options",
        "control the ansible[-playbook] execution environment",
    )
    run_ansible_group = run_subparser.add_argument_group(*ansible_options)
    start_ansible_group = start_subparser.add_argument_group(*ansible_options)
    stop_ansible_group = stop_subparser.add_argument_group(*ansible_options)
    isalive_ansible_group = isalive_subparser.add_argument_group(
        *ansible_options)
    transmit_ansible_group = transmit_subparser.add_argument_group(
        *ansible_options)
    add_args_to_parser(run_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(start_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(stop_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(isalive_ansible_group,
                       DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(transmit_ansible_group,
                       DEFAULT_CLI_ARGS['ansible_group'])

    # roles group
    roles_group_options = (
        "Ansible Role Options",
        "configuration options for directly executing Ansible roles",
    )
    run_roles_group = run_subparser.add_argument_group(*roles_group_options)
    start_roles_group = start_subparser.add_argument_group(
        *roles_group_options)
    stop_roles_group = stop_subparser.add_argument_group(*roles_group_options)
    isalive_roles_group = isalive_subparser.add_argument_group(
        *roles_group_options)
    transmit_roles_group = transmit_subparser.add_argument_group(
        *roles_group_options)
    add_args_to_parser(run_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(start_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(stop_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(isalive_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(transmit_roles_group, DEFAULT_CLI_ARGS['roles_group'])

    # modules groups

    modules_group_options = (
        "Ansible Module Options",
        "configuration options for directly executing Ansible modules",
    )
    run_modules_group = run_subparser.add_argument_group(
        *modules_group_options)
    start_modules_group = start_subparser.add_argument_group(
        *modules_group_options)
    stop_modules_group = stop_subparser.add_argument_group(
        *modules_group_options)
    isalive_modules_group = isalive_subparser.add_argument_group(
        *modules_group_options)
    transmit_modules_group = transmit_subparser.add_argument_group(
        *modules_group_options)
    add_args_to_parser(run_modules_group, DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(start_modules_group, DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(stop_modules_group, DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(isalive_modules_group,
                       DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(transmit_modules_group,
                       DEFAULT_CLI_ARGS['modules_group'])

    # playbook options
    playbook_group_options = (
        "Ansible Playbook Options",
        "configuation options for executing Ansible playbooks",
    )
    run_playbook_group = run_subparser.add_argument_group(
        *playbook_group_options)
    start_playbook_group = start_subparser.add_argument_group(
        *playbook_group_options)
    stop_playbook_group = stop_subparser.add_argument_group(
        *playbook_group_options)
    isalive_playbook_group = isalive_subparser.add_argument_group(
        *playbook_group_options)
    transmit_playbook_group = transmit_subparser.add_argument_group(
        *playbook_group_options)
    add_args_to_parser(run_playbook_group, DEFAULT_CLI_ARGS['playbook_group'])
    add_args_to_parser(start_playbook_group,
                       DEFAULT_CLI_ARGS['playbook_group'])
    add_args_to_parser(stop_playbook_group, DEFAULT_CLI_ARGS['playbook_group'])
    add_args_to_parser(isalive_playbook_group,
                       DEFAULT_CLI_ARGS['playbook_group'])
    add_args_to_parser(transmit_playbook_group,
                       DEFAULT_CLI_ARGS['playbook_group'])

    # container group
    container_group_options = (
        "Ansible Container Options",
        "configuation options for executing Ansible playbooks",
    )
    run_container_group = run_subparser.add_argument_group(
        *container_group_options)
    start_container_group = start_subparser.add_argument_group(
        *container_group_options)
    stop_container_group = stop_subparser.add_argument_group(
        *container_group_options)
    isalive_container_group = isalive_subparser.add_argument_group(
        *container_group_options)
    transmit_container_group = transmit_subparser.add_argument_group(
        *container_group_options)
    adhoc_container_group = adhoc_subparser.add_argument_group(
        *container_group_options)
    playbook_container_group = playbook_subparser.add_argument_group(
        *container_group_options)
    add_args_to_parser(run_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(start_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(stop_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(isalive_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(transmit_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(adhoc_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(playbook_container_group,
                       DEFAULT_CLI_ARGS['container_group'])

    if len(sys.argv) == 1:
        parser.print_usage()
        print_common_usage()
        parser.exit(status=0)

    if ('playbook' in sys.argv) or ('adhoc' in sys.argv):
        args, leftover_args = parser.parse_known_args(sys_args)
    else:
        args = parser.parse_args(sys_args)

    vargs = vars(args)

    # FIXME - Probably a more elegant way to handle this.
    # set some state about CLI Exec Env
    cli_execenv_cmd = ""

    if vargs.get('command') in ('adhoc', 'playbook'):
        cli_execenv_cmd = vargs.get('command')

        if not leftover_args:
            parser.exit(
                status=1,
                message=
                "The {} subcommand requires arguments to pass to Ansible inside the container.\n"
                .format(vargs.get('command')))

    if vargs.get('command') in ('worker', 'process', 'adhoc', 'playbook'):
        if not vargs.get('private_data_dir'):
            temp_private_dir = tempfile.mkdtemp()
            vargs['private_data_dir'] = temp_private_dir
            if vargs.get('keep_files', False):
                print("ANSIBLE-RUNNER: keeping temporary data directory: {}".
                      format(temp_private_dir))
            else:

                @atexit.register
                def conditonally_clean_cli_execenv_tempdir():
                    shutil.rmtree(temp_private_dir)

    if vargs.get('command') in ('start', 'run', 'transmit'):
        if vargs.get('hosts') and not (vargs.get('module')
                                       or vargs.get('role')):
            parser.exit(
                status=1,
                message="The --hosts option can only be used with -m or -r\n")
        if not (vargs.get('module')
                or vargs.get('role')) and not vargs.get('playbook'):
            parser.exit(
                status=1,
                message=
                "The -p option must be specified when not using -m or -r\n")

    output.configure()

    # enable or disable debug mode
    output.set_debug('enable' if vargs.get('debug') else 'disable')

    # set the output logfile
    if ('logfile' in args) and vargs.get('logfile'):
        output.set_logfile(vargs.get('logfile'))

    output.debug('starting debug logging')

    # get the absolute path for start since it is a daemon
    vargs['private_data_dir'] = os.path.abspath(vargs.get('private_data_dir'))

    pidfile = os.path.join(vargs.get('private_data_dir'), 'pid')

    try:
        os.makedirs(vargs.get('private_data_dir'), mode=0o700)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(
                vargs.get('private_data_dir')):
            pass
        else:
            raise

    stderr_path = None
    context = None
    if vargs.get('command') not in ('run', 'transmit', 'worker', 'adhoc',
                                    'playbook'):
        stderr_path = os.path.join(vargs.get('private_data_dir'), 'daemon.log')
        if not os.path.exists(stderr_path):
            os.close(
                os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))

    if vargs.get('command') in ('start', 'run', 'transmit', 'worker',
                                'process', 'adhoc', 'playbook'):

        if vargs.get('command') == 'start':
            import daemon
            from daemon.pidfile import TimeoutPIDLockFile
            context = daemon.DaemonContext(pidfile=TimeoutPIDLockFile(pidfile))
        else:
            context = threading.Lock()

        streamer = None
        if vargs.get('command') in ('transmit', 'worker', 'process'):
            streamer = vargs.get('command')

        with context:
            with role_manager(vargs) as vargs:
                run_options = dict(
                    private_data_dir=vargs.get('private_data_dir'),
                    ident=vargs.get('ident'),
                    binary=vargs.get('binary'),
                    playbook=vargs.get('playbook'),
                    module=vargs.get('module'),
                    module_args=vargs.get('module_args'),
                    host_pattern=vargs.get('hosts'),
                    verbosity=vargs.get('v'),
                    quiet=vargs.get('quiet'),
                    rotate_artifacts=vargs.get('rotate_artifacts'),
                    ignore_logging=False,
                    json_mode=vargs.get('json'),
                    omit_event_data=vargs.get('omit_event_data'),
                    only_failed_event_data=vargs.get('only_failed_event_data'),
                    inventory=vargs.get('inventory'),
                    forks=vargs.get('forks'),
                    project_dir=vargs.get('project_dir'),
                    artifact_dir=vargs.get('artifact_dir'),
                    roles_path=[vargs.get('roles_path')]
                    if vargs.get('roles_path') else None,
                    process_isolation=vargs.get('process_isolation'),
                    process_isolation_executable=vargs.get(
                        'process_isolation_executable'),
                    process_isolation_path=vargs.get('process_isolation_path'),
                    process_isolation_hide_paths=vargs.get(
                        'process_isolation_hide_paths'),
                    process_isolation_show_paths=vargs.get(
                        'process_isolation_show_paths'),
                    process_isolation_ro_paths=vargs.get(
                        'process_isolation_ro_paths'),
                    container_image=vargs.get('container_image'),
                    container_volume_mounts=vargs.get(
                        'container_volume_mounts'),
                    container_options=vargs.get('container_options'),
                    directory_isolation_base_path=vargs.get(
                        'directory_isolation_base_path'),
                    resource_profiling=vargs.get('resource_profiling'),
                    resource_profiling_base_cgroup=vargs.get(
                        'resource_profiling_base_cgroup'),
                    resource_profiling_cpu_poll_interval=vargs.get(
                        'resource_profiling_cpu_poll_interval'),
                    resource_profiling_memory_poll_interval=vargs.get(
                        'resource_profiling_memory_poll_interval'),
                    resource_profiling_pid_poll_interval=vargs.get(
                        'resource_profiling_pid_poll_interval'),
                    resource_profiling_results_dir=vargs.get(
                        'resource_profiling_results_dir'),
                    limit=vargs.get('limit'),
                    streamer=streamer,
                    cli_execenv_cmd=cli_execenv_cmd)
                if vargs.get('command') in ('adhoc', 'playbook'):
                    run_options['cmdline'] = sys.argv[sys.argv.
                                                      index(leftover_args[0]):]
                    run_options['process_isolation'] = True
                    run_options['process_isolation_executable'] = vargs.get(
                        'container_runtime')

                try:
                    res = run(**run_options)
                except Exception:
                    exc = traceback.format_exc()
                    if stderr_path:
                        open(stderr_path, 'w+').write(exc)
                    else:
                        sys.stderr.write(exc)
                    return 1
            return (res.rc)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        return (1)

    if vargs.get('command') == 'stop':
        Runner.handle_termination(pid, pidfile=pidfile)
        return (0)

    elif vargs.get('command') == 'is-alive':
        try:
            os.kill(pid, signal.SIG_DFL)
            return (0)
        except OSError:
            return (1)
Esempio n. 26
0
class IMAPWatch:
    def __init__(self,
                 basedir=None,
                 configfile='imapwatch.yml',
                 pidfile='/tmp/imapwatch.pid',
                 logfile='log/imapwatch.log',
                 daemon=False,
                 verbose=None,
                 force=False):

        if basedir:
            # basedir must be a full, absolute path
            __location__ = os.path.realpath(os.path.join(basedir))
        else:
            # assume the configfile and log are in the parent-parent directory of the directory of this file
            __location__ = os.path.realpath(
                os.path.join(os.getcwd(),
                             os.path.dirname(os.path.realpath(__file__)),
                             os.pardir, os.pardir))

        configfile = os.path.join(__location__, configfile)
        self.config = yaml.load(open(configfile, 'r'))
        if not self.config:
            raise SystemExit("No config file found. Exiting.")

        self.pidfile = TimeoutPIDLockFile(pidfile, timeout=1)
        self.logfile = os.path.join(__location__, logfile)
        self.daemon = daemon
        self.verbose = verbose
        self.force = force

        self.stop_event = threading.Event()
        self.threads = []

    def start(self):

        self.setup_logging()

        context = LoggingDaemonContext()
        context.loggers_preserve = [self.logger]
        context.stdout_logger = self.stdout_logger
        context.stderr_logger = self.stderr_logger
        context.pidfile = self.pidfile
        context.signal_map = {
            signal.SIGTERM: self.stop,
            signal.SIGINT: self.stop,
        }

        if self.daemon:
            context.detach_process = True
        else:
            context.detach_process = False
            # TODO should this not be below the else statement?
            context.stdout = sys.stdout
            context.stdin = sys.stdin

        # TODO first acquire and then release so we can go back to the command line
        # then do the same in the DaemonContext
        try:
            with context as c:
                self.logger.info('---------------')
                self.logger.info(
                    f'Starting daemon with pid {self.pidfile.read_pid()}')
                sender = Sender(self.logger, self.config['smtp']['server'],
                                self.config['smtp']['username'],
                                self.config['smtp']['password'],
                                self.config['smtp']['from'])

                self.logger.info("Setting up mailboxes")
                for account in self.config['accounts']:
                    mailboxes = account['mailboxes']
                    for mailbox in mailboxes:
                        action = [
                            a for a in self.config['actions']
                            if a['action'] == mailbox['action']
                        ][0]
                        checker = Checker(self.logger, self.stop_event,
                                          account['server'],
                                          account['username'],
                                          account['password'],
                                          mailbox['mailbox'],
                                          mailbox['check_for'], action, sender,
                                          bool(account['use_ssl']),
                                          int(account['timeout']))
                        checker_thread = CheckerThread(self.logger, checker)
                        self.threads.append(checker_thread)
                        checker_thread.start()

                # we have to do this, otherwise we lose the context and lockfile
                # (after all the threads have been created and detached)
                while not self.stop_event.is_set():
                    time.sleep(1)
        except FileExistsError:
            self.logger.debug('Removed stale lock file')
            self.pidfile.break_lock()
        except AlreadyLocked:
            if not self.force:
                raise SystemExit('Another imapwatch process already running')
            pass
        except LockTimeout:
            raise SystemExit('LockTimeout')
        except NotLocked:
            raise SystemExit('NotLocked')
            pass

    def setup_logging(self):

        # configure logging
        logFormatter = logging.Formatter(
            '%(asctime)s %(name)-10.10s [%(process)-5d] [%(levelname)-8.8s] [%(threadName)-11.11s] %(message)s'
        )
        self.logger = logging.getLogger('imapwatch')
        # this shouldn't be necessary? level should be NOTSET standard
        # https://docs.python.org/3/library/logging.html
        self.logger.setLevel(logging.DEBUG)

        # create the filehandler
        self.fileHandler = RotatingFileHandler(
            self.logfile,
            mode='a',
            maxBytes=1048576,
            backupCount=9,
            encoding='UTF-8',
            # if we don't set delay to False, the stream is not opened until we start writing
            # this prevents getLogFileHandler() to find the right handle to preserve
            delay=False)
        self.fileHandler.formatter = logFormatter
        self.logger.addHandler(self.fileHandler)

        # get the (already existing) imapclient logger
        self.imapclient_logger = logging.getLogger('imapclient')
        self.imapclient_logger.addHandler(self.fileHandler)

        self.stdout_logger = logging.getLogger('stdout')
        self.stdout_logger.setLevel(logging.DEBUG)
        self.stdout_logger.addHandler(self.fileHandler)

        self.stderr_logger = logging.getLogger('stderr')
        self.stderr_logger.setLevel(logging.DEBUG)
        self.stderr_logger.addHandler(self.fileHandler)

        consoleHandler = logging.StreamHandler()
        consoleHandler.formatter = logFormatter

        if not self.daemon:
            # Add optional ConsoleHandler
            consoleHandler.setLevel('DEBUG')
            self.logger.setLevel(self.verbose)

            self.logger.addHandler(consoleHandler)
            self.stdout_logger.addHandler(consoleHandler)
            self.stderr_logger.addHandler(consoleHandler)

            # TODO add custom level for imapclient logging on the console
            # or in the configfile?
            self.imapclient_logger.addHandler(consoleHandler)

    def stop(self, signum, frame):
        self.logger.debug('Stopping')
        self.stop_event.set()
        # TODO should we use threading.enumerate() to stop threads?
        # https://docs.python.org/3/library/threading.html
        for t in self.threads:
            #self.logger.debug(f'Calling stop() for thread {t.name}')
            t.stop()
            #self.logger.debug(f'Finished stop() for thread {t.name}')
            #self.logger.debug(f'Calling join() for thread {t.name}')
            t.join()
            #self.logger.debug(f'Finshed join() for thread {t.name}')
        self.logger.info('Stopped')
Esempio n. 27
0
def webserver(args):
    """Starts Airflow Webserver"""
    print(settings.HEADER)

    access_logfile = args.access_logfile or conf.get('webserver',
                                                     'access_logfile')
    error_logfile = args.error_logfile or conf.get('webserver',
                                                   'error_logfile')
    num_workers = args.workers or conf.get('webserver', 'workers')
    worker_timeout = args.worker_timeout or conf.get(
        'webserver', 'web_server_worker_timeout')
    ssl_cert = args.ssl_cert or conf.get('webserver', 'web_server_ssl_cert')
    ssl_key = args.ssl_key or conf.get('webserver', 'web_server_ssl_key')
    if not ssl_cert and ssl_key:
        raise AirflowException(
            'An SSL certificate must also be provided for use with ' + ssl_key)
    if ssl_cert and not ssl_key:
        raise AirflowException(
            'An SSL key must also be provided for use with ' + ssl_cert)

    if args.debug:
        print(
            f"Starting the web server on port {args.port} and host {args.hostname}."
        )
        app = create_app(testing=conf.getboolean('core', 'unit_test_mode'))
        app.run(
            debug=True,
            use_reloader=not app.config['TESTING'],
            port=args.port,
            host=args.hostname,
            ssl_context=(ssl_cert, ssl_key) if ssl_cert and ssl_key else None,
        )
    else:
        # This pre-warms the cache, and makes possible errors
        # get reported earlier (i.e. before demonization)
        os.environ['SKIP_DAGS_PARSING'] = 'True'
        app = cached_app(None)
        os.environ.pop('SKIP_DAGS_PARSING')

        pid_file, stdout, stderr, log_file = setup_locations(
            "webserver", args.pid, args.stdout, args.stderr, args.log_file)

        # Check if webserver is already running if not, remove old pidfile
        check_if_pidfile_process_is_running(pid_file=pid_file,
                                            process_name="webserver")

        print(
            textwrap.dedent('''\
                Running the Gunicorn Server with:
                Workers: {num_workers} {workerclass}
                Host: {hostname}:{port}
                Timeout: {worker_timeout}
                Logfiles: {access_logfile} {error_logfile}
                =================================================================\
            '''.format(
                num_workers=num_workers,
                workerclass=args.workerclass,
                hostname=args.hostname,
                port=args.port,
                worker_timeout=worker_timeout,
                access_logfile=access_logfile,
                error_logfile=error_logfile,
            )))

        run_args = [
            'gunicorn',
            '--workers',
            str(num_workers),
            '--worker-class',
            str(args.workerclass),
            '--timeout',
            str(worker_timeout),
            '--bind',
            args.hostname + ':' + str(args.port),
            '--name',
            'airflow-webserver',
            '--pid',
            pid_file,
            '--config',
            'python:airflow.www.gunicorn_config',
        ]

        if args.access_logfile:
            run_args += ['--access-logfile', str(args.access_logfile)]

        if args.error_logfile:
            run_args += ['--error-logfile', str(args.error_logfile)]

        if args.daemon:
            run_args += ['--daemon']

        if ssl_cert:
            run_args += ['--certfile', ssl_cert, '--keyfile', ssl_key]

        run_args += ["airflow.www.app:cached_app()"]

        gunicorn_master_proc = None

        def kill_proc(signum, _):  # pylint: disable=unused-argument
            log.info("Received signal: %s. Closing gunicorn.", signum)
            gunicorn_master_proc.terminate()
            with suppress(TimeoutError):
                gunicorn_master_proc.wait(timeout=30)
            if gunicorn_master_proc.poll() is not None:
                gunicorn_master_proc.kill()
            sys.exit(0)

        def monitor_gunicorn(gunicorn_master_pid: int):
            # Register signal handlers
            signal.signal(signal.SIGINT, kill_proc)
            signal.signal(signal.SIGTERM, kill_proc)

            # These run forever until SIG{INT, TERM, KILL, ...} signal is sent
            GunicornMonitor(
                gunicorn_master_pid=gunicorn_master_pid,
                num_workers_expected=num_workers,
                master_timeout=conf.getint('webserver',
                                           'web_server_master_timeout'),
                worker_refresh_interval=conf.getint('webserver',
                                                    'worker_refresh_interval',
                                                    fallback=30),
                worker_refresh_batch_size=conf.getint(
                    'webserver', 'worker_refresh_batch_size', fallback=1),
                reload_on_plugin_change=conf.getboolean(
                    'webserver', 'reload_on_plugin_change', fallback=False),
            ).start()

        if args.daemon:
            handle = setup_logging(log_file)

            base, ext = os.path.splitext(pid_file)
            with open(stdout, 'w+') as stdout, open(stderr, 'w+') as stderr:
                ctx = daemon.DaemonContext(
                    pidfile=TimeoutPIDLockFile(f"{base}-monitor{ext}", -1),
                    files_preserve=[handle],
                    stdout=stdout,
                    stderr=stderr,
                )
                with ctx:
                    subprocess.Popen(run_args, close_fds=True)

                    # Reading pid of gunicorn master as it will be different that
                    # the one of process spawned above.
                    while True:
                        sleep(0.1)
                        gunicorn_master_proc_pid = read_pid_from_pidfile(
                            pid_file)
                        if gunicorn_master_proc_pid:
                            break

                    # Run Gunicorn monitor
                    gunicorn_master_proc = psutil.Process(
                        gunicorn_master_proc_pid)
                    monitor_gunicorn(gunicorn_master_proc.pid)

        else:
            gunicorn_master_proc = subprocess.Popen(run_args, close_fds=True)
            monitor_gunicorn(gunicorn_master_proc.pid)
Esempio n. 28
0
    private_data_dir = args.private_data_dir
    pidfile = os.path.join(private_data_dir, 'pid')

    if args.command == 'start':
        # create a file to log stderr in case the daemonized process throws
        # an exception before it gets to `pexpect.spawn`
        stderr_path = os.path.join(private_data_dir, 'artifacts', 'daemon.log')
        if not os.path.exists(stderr_path):
            os.mknod(stderr_path, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR)
        stderr = open(stderr_path, 'w+')

        import daemon
        from daemon.pidfile import TimeoutPIDLockFile
        context = daemon.DaemonContext(
            pidfile=TimeoutPIDLockFile(pidfile),
            stderr=stderr
        )
        with context:
            __run__(private_data_dir)
        sys.exit(0)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        sys.exit(1)

    if args.command == 'stop':
        try:
            with open(os.path.join(private_data_dir, 'args'), 'r') as args:
Esempio n. 29
0
def main():
    #  Logger
    _logger = logger.Logger('middleware')
    get_logger = _logger.getLogger()

    # Workaround for development
    modpath = os.path.realpath(os.path.join(
        os.path.dirname(os.path.realpath(__file__)),
        '..',
    ))
    if modpath not in sys.path:
        sys.path.insert(0, modpath)

    parser = argparse.ArgumentParser()
    parser.add_argument('restart', nargs='?')
    parser.add_argument('--foreground', '-f', action='store_true')
    parser.add_argument('--debug-level', default='DEBUG', choices=[
        'DEBUG',
        'INFO',
        'WARN',
        'ERROR',
    ])
    parser.add_argument('--log-handler', choices=[
        'console',
        'file',
    ])
    args = parser.parse_args()

    if args.log_handler:
        log_handlers = [args.log_handler]
    else:
        log_handlers = ['console' if args.foreground else 'file']

    pidpath = '/var/run/middlewared.pid'

    if args.restart:
        if os.path.exists(pidpath):
            with open(pidpath, 'r') as f:
                pid = int(f.read().strip())
            os.kill(pid, 15)

    if not args.foreground:
        _logger.configure_logging('file')
        daemonc = DaemonContext(
            pidfile=TimeoutPIDLockFile(pidpath),
            detach_process=True,
            stdout=logger.LoggerStream(get_logger),
            stderr=logger.LoggerStream(get_logger),
        )
        daemonc.open()
    elif 'file' in log_handlers:
        _logger.configure_logging('file')
        sys.stdout = logger.LoggerStream(get_logger)
        sys.stderr = logger.LoggerStream(get_logger)
    elif 'console' in log_handlers:
        _logger.configure_logging('console')
    else:
        _logger.configure_logging('file')

    setproctitle.setproctitle('middlewared')
    # Workaround to tell django to not set up logging on its own
    os.environ['MIDDLEWARED'] = str(os.getpid())

    Middleware().run()
    if not args.foreground:
        daemonc.close()
Esempio n. 30
0
def main():
    args = parse_argv(sys.argv[1:])

    if args.version:
        print('Sendria %s' % __version__)
        sys.exit(0)

    # Do we just want to stop a running daemon?
    if args.stop:
        logger.get().msg(
            'stopping Sendria',
            debug='enabled' if args.debug else 'disabled',
            pidfile=str(args.pidfile) if args.pidfile else None,
        )
        stop(args.pidfile)
        sys.exit(0)

    logger.get().msg(
        'starting Sendria',
        debug='enabled' if args.debug else 'disabled',
        pidfile=str(args.pidfile) if args.pidfile else None,
        db=str(args.db),
        foreground='true' if args.foreground else 'false',
    )

    # Check if the static folder is writable
    if args.autobuild_assets and not os.access(STATIC_DIR, os.W_OK):
        exit_err('autobuilding assets requires write access to %s' %
                 STATIC_DIR)

    if not args.autobuild_assets and (not ASSETS_DIR.exists()
                                      or not list(ASSETS_DIR.glob('*'))):
        exit_err(
            'assets not found. Generate assets using: webassets -m sendria.build_assets build',
            0)

    daemon_kw = {}

    if args.foreground:
        # Do not detach and keep std streams open
        daemon_kw.update({
            'detach_process': False,
            'stdin': sys.stdin,
            'stdout': sys.stdout,
            'stderr': sys.stderr,
        })

    if args.pidfile:
        if args.pidfile.exists():
            pid = read_pidfile(args.pidfile)
            if not pid_exists(pid):
                logger.get().msg(
                    'deleting obsolete PID file (process %s does not exist)' %
                    pid,
                    pid=pid)
                args.pidfile.unlink()
        daemon_kw['pidfile'] = TimeoutPIDLockFile(str(args.pidfile), 5)

    # Unload threading module to avoid error on exit (it's loaded by lockfile)
    if 'threading' in sys.modules:
        del sys.modules['threading']

    context = daemon.DaemonContext(**daemon_kw)
    with context:
        loop = asyncio.get_event_loop()

        run_sendria_servers(loop, args)

        loop.run_forever()

    logger.get().msg('stop signal received')
    loop.close()

    logger.get().msg('terminating')
    sys.exit(0)
Esempio n. 31
0
File: run.py Progetto: MBcom/awx
    args = parser.parse_args()

    private_data_dir = args.private_data_dir
    pidfile = os.path.join(private_data_dir, 'pid')

    if args.command == 'start':
        # create a file to log stderr in case the daemonized process throws
        # an exception before it gets to `pexpect.spawn`
        stderr_path = os.path.join(private_data_dir, 'artifacts', 'daemon.log')
        if not os.path.exists(stderr_path):
            os.mknod(stderr_path, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR)
        stderr = open(stderr_path, 'w+')

        import daemon
        from daemon.pidfile import TimeoutPIDLockFile
        context = daemon.DaemonContext(pidfile=TimeoutPIDLockFile(pidfile),
                                       stderr=stderr)
        with context:
            __run__(private_data_dir)
        sys.exit(0)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        sys.exit(1)

    if args.command == 'stop':
        try:
            with open(os.path.join(private_data_dir, 'args'), 'r') as args:
                handle_termination(pid, json.load(args), 'bwrap')
Esempio n. 32
0
def main():
    parser = argparse.ArgumentParser(description='manage ansible execution')

    parser.add_argument('--version', action='version', version=VERSION)

    parser.add_argument('command', choices=['run', 'start', 'stop', 'is-alive'])

    parser.add_argument('private_data_dir',
                        help='Base directory containing Runner metadata (project, inventory, etc')

    group = parser.add_mutually_exclusive_group()

    group.add_argument("-p", "--playbook", default=DEFAULT_RUNNER_PLAYBOOK,
                       help="The name of the playbook to execute")

    group.add_argument("-r", "--role", default=DEFAULT_RUNNER_ROLE,
                       help="Invoke an Ansible role directly without a playbook")

    parser.add_argument("--hosts",
                        help="Define the set of hosts to execute against")

    parser.add_argument("-i", "--ident",
                        default=uuid4(),
                        help="An identifier that will be used when generating the"
                             "artifacts directory and can be used to uniquely identify a playbook run")

    parser.add_argument("--roles-path", default=DEFAULT_ROLES_PATH,
                        help="Path to the Ansible roles directory")

    parser.add_argument("--role-vars",
                        help="Variables to pass to the role at runtime")

    parser.add_argument("--role-skip-facts", action="store_true", default=False,
                        help="Disable fact collection when executing a role directly")

    parser.add_argument("--inventory")
    parser.add_argument("-j", "--json", action="store_true",
                        help="Output the json event structure to stdout instead of Ansible output")

    parser.add_argument("-v", action="count",
                        help="Increase the verbosity with multiple v's (up to 5) of the ansible-playbook output")

    args = parser.parse_args()

    pidfile = os.path.join(args.private_data_dir, 'pid')

    try:
        os.makedirs(args.private_data_dir)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(args.private_data_dir):
            pass
        else:
            raise

    if args.command != 'run':
        stderr_path = os.path.join(args.private_data_dir, 'daemon.log')
        if not os.path.exists(stderr_path):
            os.close(os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))
        stderr = open(stderr_path, 'w+')

    if args.command in ('start', 'run'):
        if args.role:

            role = {'name': args.role}
            if args.role_vars:
                role_vars = {}
                for item in shlex.split(args.role_vars):
                    key, value = item.split('=')
                    role_vars[key] = value
                role['vars'] = role_vars

            kwargs = dict(private_data_dir=args.private_data_dir,
                          json_mode=args.json)

            playbook = None
            tmpvars = None

            rc = 255

            try:
                play = [{'hosts': args.hosts if args.hosts is not None else "all",
                         'gather_facts': not args.role_skip_facts,
                         'roles': [role]}]

                path = os.path.abspath(os.path.join(args.private_data_dir, 'project'))
                filename = str(uuid4().hex)

                playbook = dump_artifact(json.dumps(play), path, filename)
                kwargs['playbook'] = playbook
                print('using playbook file %s' % playbook)

                if args.inventory:
                    inventory_file = os.path.abspath(os.path.join(args.private_data_dir, 'inventory', args.inventory))
                    kwargs['inventory'] = inventory_file
                    print('using inventory file %s' % inventory_file)

                envvars = {}

                roles_path = args.roles_path or os.path.join(args.private_data_dir, 'roles')
                roles_path = os.path.abspath(roles_path)
                print('setting ANSIBLE_ROLES_PATH to %s' % roles_path)

                # since envvars will overwrite an existing envvars, capture
                # the content of the current envvars if it exists and
                # restore it once done
                envvars = {}
                curvars = os.path.join(args.private_data_dir, 'env/envvars')
                if os.path.exists(curvars):
                    with open(curvars, 'rb') as f:
                        tmpvars = f.read()
                        envvars = safe_load(tmpvars)
                envvars['ANSIBLE_ROLES_PATH'] = roles_path
                kwargs['envvars'] = envvars

                res = run(**kwargs)
                rc = res.rc

            finally:
                if playbook and os.path.isfile(playbook):
                    os.remove(playbook)

                # if a previous envvars existed in the private_data_dir,
                # restore the original file contents
                if tmpvars:
                    with open(curvars, 'wb') as f:
                        f.write(tmpvars)

            sys.exit(rc)

        elif args.command == 'start':
            import daemon
            from daemon.pidfile import TimeoutPIDLockFile
            context = daemon.DaemonContext(
                pidfile=TimeoutPIDLockFile(pidfile),
                stderr=stderr
            )
        else:
            context = threading.Lock()

        with context:
            run_options = dict(private_data_dir=args.private_data_dir,
                               ident=args.ident,
                               playbook=args.playbook,
                               verbosity=args.v,
                               json_mode=args.json)
            if args.hosts is not None:
                run_options.update(inventory=args.hosts)
            run(**run_options)
            sys.exit(0)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        sys.exit(1)

    if args.command == 'stop':
        try:
            with open(os.path.join(args.private_data_dir, 'args'), 'r') as args:
                Runner.handle_termination(pid, json.load(args), 'bwrap')
        except IOError:
            Runner.handle_termination(pid, [], 'bwrap')

    elif args.command == 'is-alive':
        try:
            os.kill(pid, signal.SIG_DFL)
            sys.exit(0)
        except OSError:
            sys.exit(1)