Example #1
0
    def local_health_check(self):
        """Only call this method on the instance that this record represents"""
        errors = None
        try:
            # if redis is down for some reason, that means we can't persist
            # playbook event data; we should consider this a zero capacity event
            redis.Redis.from_url(settings.BROKER_URL).ping()
        except redis.ConnectionError:
            errors = _('Failed to connect ot Redis')

        self.save_health_data(awx_application_version, get_cpu_count(), get_mem_in_bytes(), update_last_seen=True, errors=errors)
Example #2
0
def main(sys_args=None):
    """Main entry point for ansible-runner executable

    When the ```ansible-runner``` command is executed, this function
    is the main entry point that is called and executed.

    :param sys_args: List of arguments to be parsed by the parser
    :type sys_args: list

    :returns: an instance of SystemExit
    :rtype: SystemExit
    """

    parser = AnsibleRunnerArgumentParser(
        prog='ansible-runner',
        description=
        "Use 'ansible-runner' (with no arguments) to see basic usage")
    subparser = parser.add_subparsers(
        help="Command to invoke",
        dest='command',
        description="COMMAND PRIVATE_DATA_DIR [ARGS]")
    add_args_to_parser(parser, DEFAULT_CLI_ARGS['generic_args'])
    subparser.required = True

    # positional options
    run_subparser = subparser.add_parser(
        'run', help="Run ansible-runner in the foreground")
    add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['positional_args'])
    add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['playbook_group'])
    start_subparser = subparser.add_parser(
        'start', help="Start an ansible-runner process in the background")
    add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['positional_args'])
    add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['playbook_group'])
    stop_subparser = subparser.add_parser(
        'stop',
        help="Stop an ansible-runner process that's running in the background")
    add_args_to_parser(stop_subparser, DEFAULT_CLI_ARGS['positional_args'])
    isalive_subparser = subparser.add_parser(
        'is-alive',
        help=
        "Check if a an ansible-runner process in the background is still running."
    )
    add_args_to_parser(isalive_subparser, DEFAULT_CLI_ARGS['positional_args'])

    # streaming commands
    transmit_subparser = subparser.add_parser(
        'transmit', help="Send a job to a remote ansible-runner process")
    add_args_to_parser(transmit_subparser, DEFAULT_CLI_ARGS['positional_args'])

    worker_subparser = subparser.add_parser(
        'worker', help="Execute work streamed from a controlling instance")
    worker_subcommands = worker_subparser.add_subparsers(
        help="Sub-sub command to invoke",
        dest='worker_subcommand',
        description="ansible-runner worker [sub-sub-command]",
    )
    cleanup_command = worker_subcommands.add_parser(
        'cleanup',
        help=
        "Cleanup private_data_dir patterns from prior jobs and supporting temporary folders.",
    )
    cleanup.add_cleanup_args(cleanup_command)

    worker_subparser.add_argument(
        "--private-data-dir",
        help="base directory containing the ansible-runner metadata "
        "(project, inventory, env, etc)",
    )

    worker_subparser.add_argument(
        "--worker-info",
        dest="worker_info",
        action="store_true",
        help=
        "show the execution node's Ansible Runner version along with its memory and CPU capacities"
    )
    worker_subparser.add_argument(
        "--delete",
        dest="delete_directory",
        action="store_true",
        default=False,
        help=
        ("Delete existing folder (and everything in it) in the location specified by --private-data-dir. "
         "The directory will be re-populated when the streamed data is unpacked. "
         "Using this will also assure that the directory is deleted when the job finishes."
         ))
    process_subparser = subparser.add_parser(
        'process',
        help=
        "Receive the output of remote ansible-runner work and distribute the results"
    )
    add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['positional_args'])

    # generic args for all subparsers
    add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(start_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(stop_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(isalive_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(transmit_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(worker_subparser, DEFAULT_CLI_ARGS['generic_args'])
    add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['generic_args'])

    # runner group
    ansible_runner_group_options = (
        "Ansible Runner Options",
        "configuration options for controlling the ansible-runner "
        "runtime environment.",
    )
    base_runner_group = parser.add_argument_group(
        *ansible_runner_group_options)
    run_runner_group = run_subparser.add_argument_group(
        *ansible_runner_group_options)
    start_runner_group = start_subparser.add_argument_group(
        *ansible_runner_group_options)
    stop_runner_group = stop_subparser.add_argument_group(
        *ansible_runner_group_options)
    isalive_runner_group = isalive_subparser.add_argument_group(
        *ansible_runner_group_options)
    transmit_runner_group = transmit_subparser.add_argument_group(
        *ansible_runner_group_options)
    add_args_to_parser(base_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(run_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(start_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(stop_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(isalive_runner_group, DEFAULT_CLI_ARGS['runner_group'])
    add_args_to_parser(transmit_runner_group, DEFAULT_CLI_ARGS['runner_group'])

    # mutually exclusive group
    run_mutually_exclusive_group = run_subparser.add_mutually_exclusive_group()
    start_mutually_exclusive_group = start_subparser.add_mutually_exclusive_group(
    )
    stop_mutually_exclusive_group = stop_subparser.add_mutually_exclusive_group(
    )
    isalive_mutually_exclusive_group = isalive_subparser.add_mutually_exclusive_group(
    )
    transmit_mutually_exclusive_group = transmit_subparser.add_mutually_exclusive_group(
    )
    add_args_to_parser(run_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(start_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(stop_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(isalive_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])
    add_args_to_parser(transmit_mutually_exclusive_group,
                       DEFAULT_CLI_ARGS['mutually_exclusive_group'])

    # ansible options
    ansible_options = (
        "Ansible Options",
        "control the ansible[-playbook] execution environment",
    )
    run_ansible_group = run_subparser.add_argument_group(*ansible_options)
    start_ansible_group = start_subparser.add_argument_group(*ansible_options)
    stop_ansible_group = stop_subparser.add_argument_group(*ansible_options)
    isalive_ansible_group = isalive_subparser.add_argument_group(
        *ansible_options)
    transmit_ansible_group = transmit_subparser.add_argument_group(
        *ansible_options)
    add_args_to_parser(run_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(start_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(stop_ansible_group, DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(isalive_ansible_group,
                       DEFAULT_CLI_ARGS['ansible_group'])
    add_args_to_parser(transmit_ansible_group,
                       DEFAULT_CLI_ARGS['ansible_group'])

    # roles group
    roles_group_options = (
        "Ansible Role Options",
        "configuration options for directly executing Ansible roles",
    )
    run_roles_group = run_subparser.add_argument_group(*roles_group_options)
    start_roles_group = start_subparser.add_argument_group(
        *roles_group_options)
    stop_roles_group = stop_subparser.add_argument_group(*roles_group_options)
    isalive_roles_group = isalive_subparser.add_argument_group(
        *roles_group_options)
    transmit_roles_group = transmit_subparser.add_argument_group(
        *roles_group_options)
    add_args_to_parser(run_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(start_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(stop_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(isalive_roles_group, DEFAULT_CLI_ARGS['roles_group'])
    add_args_to_parser(transmit_roles_group, DEFAULT_CLI_ARGS['roles_group'])

    # modules groups

    modules_group_options = (
        "Ansible Module Options",
        "configuration options for directly executing Ansible modules",
    )
    run_modules_group = run_subparser.add_argument_group(
        *modules_group_options)
    start_modules_group = start_subparser.add_argument_group(
        *modules_group_options)
    stop_modules_group = stop_subparser.add_argument_group(
        *modules_group_options)
    isalive_modules_group = isalive_subparser.add_argument_group(
        *modules_group_options)
    transmit_modules_group = transmit_subparser.add_argument_group(
        *modules_group_options)
    add_args_to_parser(run_modules_group, DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(start_modules_group, DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(stop_modules_group, DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(isalive_modules_group,
                       DEFAULT_CLI_ARGS['modules_group'])
    add_args_to_parser(transmit_modules_group,
                       DEFAULT_CLI_ARGS['modules_group'])

    # container group
    container_group_options = (
        "Ansible Container Options",
        "configuation options for executing Ansible playbooks",
    )
    run_container_group = run_subparser.add_argument_group(
        *container_group_options)
    start_container_group = start_subparser.add_argument_group(
        *container_group_options)
    stop_container_group = stop_subparser.add_argument_group(
        *container_group_options)
    isalive_container_group = isalive_subparser.add_argument_group(
        *container_group_options)
    transmit_container_group = transmit_subparser.add_argument_group(
        *container_group_options)
    add_args_to_parser(run_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(start_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(stop_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(isalive_container_group,
                       DEFAULT_CLI_ARGS['container_group'])
    add_args_to_parser(transmit_container_group,
                       DEFAULT_CLI_ARGS['container_group'])

    args = parser.parse_args(sys_args)

    vargs = vars(args)

    if vargs.get('command') == 'worker':
        if vargs.get('worker_subcommand') == 'cleanup':
            cleanup.run_cleanup(vargs)
            parser.exit(0)
        if vargs.get('worker_info'):
            cpu = get_cpu_count()
            mem = get_mem_in_bytes()
            errors = []
            uuid = ensure_uuid()
            if not isinstance(mem, int):
                errors.append(mem)
                mem = None
            if "Could not find" in uuid:
                errors.append(uuid)
                uuid = None
            info = {
                'errors': errors,
                'mem_in_bytes': mem,
                'cpu_count': cpu,
                'runner_version': VERSION,
                'uuid': uuid,
            }
            print(safe_dump(info, default_flow_style=True))
            parser.exit(0)

        private_data_dir = vargs.get('private_data_dir')
        delete_directory = vargs.get('delete_directory', False)
        if private_data_dir and delete_directory:
            shutil.rmtree(private_data_dir, ignore_errors=True)
            register_for_cleanup(private_data_dir)
        elif private_data_dir is None:
            temp_private_dir = tempfile.mkdtemp()
            vargs['private_data_dir'] = temp_private_dir
            register_for_cleanup(temp_private_dir)

    if vargs.get('command') == 'process':
        # the process command is the final destination of artifacts, user expects private_data_dir to not be cleaned up
        if not vargs.get('private_data_dir'):
            temp_private_dir = tempfile.mkdtemp()
            vargs['private_data_dir'] = temp_private_dir

    if vargs.get('command') in ('start', 'run', 'transmit'):
        if vargs.get('hosts') and not (vargs.get('module')
                                       or vargs.get('role')):
            parser.exit(
                status=1,
                message="The --hosts option can only be used with -m or -r\n")
        if not (vargs.get('module')
                or vargs.get('role')) and not vargs.get('playbook'):
            parser.exit(
                status=1,
                message=
                "The -p option must be specified when not using -m or -r\n")

    output.configure()

    # enable or disable debug mode
    output.set_debug('enable' if vargs.get('debug') else 'disable')

    # set the output logfile
    if ('logfile' in args) and vargs.get('logfile'):
        output.set_logfile(vargs.get('logfile'))

    output.debug('starting debug logging')

    # get the absolute path for start since it is a daemon
    vargs['private_data_dir'] = os.path.abspath(vargs.get('private_data_dir'))

    pidfile = os.path.join(vargs.get('private_data_dir'), 'pid')

    try:
        os.makedirs(vargs.get('private_data_dir'), mode=0o700)
    except OSError as exc:
        if exc.errno == errno.EEXIST and os.path.isdir(
                vargs.get('private_data_dir')):
            pass
        else:
            raise

    stderr_path = None
    context = None
    if vargs.get('command') not in ('run', 'transmit', 'worker'):
        stderr_path = os.path.join(vargs.get('private_data_dir'), 'daemon.log')
        if not os.path.exists(stderr_path):
            os.close(
                os.open(stderr_path, os.O_CREAT, stat.S_IRUSR | stat.S_IWUSR))

    if vargs.get('command') in ('start', 'run', 'transmit', 'worker',
                                'process'):

        if vargs.get('command') == 'start':
            import daemon
            from daemon.pidfile import TimeoutPIDLockFile
            context = daemon.DaemonContext(pidfile=TimeoutPIDLockFile(pidfile))
        else:
            context = threading.Lock()

        streamer = None
        if vargs.get('command') in ('transmit', 'worker', 'process'):
            streamer = vargs.get('command')

        with context:
            with role_manager(vargs) as vargs:
                run_options = dict(
                    private_data_dir=vargs.get('private_data_dir'),
                    ident=vargs.get('ident'),
                    binary=vargs.get('binary'),
                    playbook=vargs.get('playbook'),
                    module=vargs.get('module'),
                    module_args=vargs.get('module_args'),
                    host_pattern=vargs.get('hosts'),
                    verbosity=vargs.get('v'),
                    quiet=vargs.get('quiet'),
                    rotate_artifacts=vargs.get('rotate_artifacts'),
                    ignore_logging=False,
                    json_mode=vargs.get('json'),
                    omit_event_data=vargs.get('omit_event_data'),
                    only_failed_event_data=vargs.get('only_failed_event_data'),
                    inventory=vargs.get('inventory'),
                    forks=vargs.get('forks'),
                    project_dir=vargs.get('project_dir'),
                    artifact_dir=vargs.get('artifact_dir'),
                    roles_path=[vargs.get('roles_path')]
                    if vargs.get('roles_path') else None,
                    process_isolation=vargs.get('process_isolation'),
                    process_isolation_executable=vargs.get(
                        'process_isolation_executable'),
                    process_isolation_path=vargs.get('process_isolation_path'),
                    process_isolation_hide_paths=vargs.get(
                        'process_isolation_hide_paths'),
                    process_isolation_show_paths=vargs.get(
                        'process_isolation_show_paths'),
                    process_isolation_ro_paths=vargs.get(
                        'process_isolation_ro_paths'),
                    container_image=vargs.get('container_image'),
                    container_volume_mounts=vargs.get(
                        'container_volume_mounts'),
                    container_options=vargs.get('container_options'),
                    directory_isolation_base_path=vargs.get(
                        'directory_isolation_base_path'),
                    resource_profiling=vargs.get('resource_profiling'),
                    resource_profiling_base_cgroup=vargs.get(
                        'resource_profiling_base_cgroup'),
                    resource_profiling_cpu_poll_interval=vargs.get(
                        'resource_profiling_cpu_poll_interval'),
                    resource_profiling_memory_poll_interval=vargs.get(
                        'resource_profiling_memory_poll_interval'),
                    resource_profiling_pid_poll_interval=vargs.get(
                        'resource_profiling_pid_poll_interval'),
                    resource_profiling_results_dir=vargs.get(
                        'resource_profiling_results_dir'),
                    cmdline=vargs.get('cmdline'),
                    limit=vargs.get('limit'),
                    streamer=streamer)
                try:
                    res = run(**run_options)
                except Exception:
                    exc = traceback.format_exc()
                    if stderr_path:
                        open(stderr_path, 'w+').write(exc)
                    else:
                        sys.stderr.write(exc)
                    return 1
            return (res.rc)

    try:
        with open(pidfile, 'r') as f:
            pid = int(f.readline())
    except IOError:
        return (1)

    if vargs.get('command') == 'stop':
        Runner.handle_termination(pid, pidfile=pidfile)
        return (0)

    elif vargs.get('command') == 'is-alive':
        try:
            os.kill(pid, signal.SIG_DFL)
            return (0)
        except OSError:
            return (1)