示例#1
0
文件: main.py 项目: GunioRobot/fabric
def main():
    """
    Main command-line execution loop.
    """
    try:
        # Parse command line options
        parser, options, arguments = parse_options()

        # Handle regular args vs -- args
        arguments = parser.largs
        remainder_arguments = parser.rargs

        # Update env with any overridden option values
        # NOTE: This needs to remain the first thing that occurs
        # post-parsing, since so many things hinge on the values in env.
        for option in env_options:
            state.env[option.dest] = getattr(options, option.dest)

        # Handle --hosts, --roles, --exclude-hosts (comma separated string =>
        # list)
        for key in ['hosts', 'roles', 'exclude_hosts']:
            if key in state.env and isinstance(state.env[key], basestring):
                state.env[key] = state.env[key].split(',')

        # Handle output control level show/hide
        update_output_levels(show=options.show, hide=options.hide)

        # Handle version number option
        if options.show_version:
            print("Fabric %s" % state.env.version)
            sys.exit(0)

        # Load settings from user settings file, into shared env dict.
        state.env.update(load_settings(state.env.rcfile))

        # Find local fabfile path or abort
        fabfile = find_fabfile()
        if not fabfile and not remainder_arguments:
            abort("""Couldn't find any fabfiles!

Remember that -f can be used to specify fabfile path, and use -h for help.""")

        # Store absolute path to fabfile in case anyone needs it
        state.env.real_fabfile = fabfile

        # Load fabfile (which calls its module-level code, including
        # tweaks to env values) and put its commands in the shared commands
        # dict
        if fabfile:
            docstring, callables, default = load_fabfile(fabfile)
            state.commands.update(callables)

        # Handle case where we were called bare, i.e. just "fab", and print
        # a help message.
        actions = (options.list_commands, options.shortlist, options.display,
            arguments, remainder_arguments, default)
        if not any(actions):
            parser.print_help()
            sys.exit(1)

        # Abort if no commands found
        if not state.commands and not remainder_arguments:
            abort("Fabfile didn't contain any commands!")

        # Now that we're settled on a fabfile, inform user.
        if state.output.debug:
            if fabfile:
                print("Using fabfile '%s'" % fabfile)
            else:
                print("No fabfile loaded -- remainder command only")

        # Shortlist is now just an alias for the "short" list format;
        # it overrides use of --list-format if somebody were to specify both
        if options.shortlist:
            options.list_format = 'short'
            options.list_commands = True

        # List available commands
        if options.list_commands:
            print("\n".join(list_commands(docstring, options.list_format)))
            sys.exit(0)

        # Handle show (command-specific help) option
        if options.display:
            display_command(options.display)

        # If user didn't specify any commands to run, show help
        if not (arguments or remainder_arguments or default):
            parser.print_help()
            sys.exit(0)  # Or should it exit with error (1)?

        # Parse arguments into commands to run (plus args/kwargs/hosts)
        commands_to_run = parse_arguments(arguments)

        # Parse remainders into a faux "command" to execute
        remainder_command = parse_remainder(remainder_arguments)

        # Figure out if any specified task names are invalid
        unknown_commands = []
        for tup in commands_to_run:
            if crawl(tup[0], state.commands) is None:
                unknown_commands.append(tup[0])

        # Abort if any unknown commands were specified
        if unknown_commands:
            abort("Command(s) not found:\n%s" \
                % indent(unknown_commands))

        # Generate remainder command and insert into commands, commands_to_run
        if remainder_command:
            r = '<remainder>'
            state.commands[r] = lambda: api.run(remainder_command)
            commands_to_run.append((r, [], {}, [], [], []))

        # Ditto for a default, if found
        if not commands_to_run and default:
            commands_to_run.append((default.name, [], {}, [], [], []))

        if state.output.debug:
            names = ", ".join(x[0] for x in commands_to_run)
            print("Commands to run: %s" % names)

        # Import multiprocessing if needed, erroring out usefully if it can't.
        if state.env.parallel or _parallel_tasks(commands_to_run):
            try:
                import multiprocessing
            except ImportError, e:
                msg = "At least one task needs to be run in parallel, but the\nmultiprocessing module cannot be imported:"
                msg += "\n\n\t%s\n\n" % e
                msg += "Please make sure the module is installed or that the above ImportError is\nfixed."
                abort(msg)

        # At this point all commands must exist, so execute them in order.
        for name, args, kwargs, cli_hosts, cli_roles, cli_exclude_hosts in commands_to_run:
            # Get callable by itself
            task = crawl(name, state.commands)
            # Set current task name (used for some error messages)
            state.env.command = name
            # Set host list (also copy to env)
            state.env.all_hosts = hosts = get_hosts(
                task, cli_hosts, cli_roles, cli_exclude_hosts)

            # Get pool size for this task
            pool_size = _get_pool_size(task, hosts)
            # Set up job queue in case parallel is needed
            jobs = JobQueue(pool_size)
            if state.output.debug:
                jobs._debug = True

            # If hosts found, execute the function on each host in turn
            for host in hosts:
                # Preserve user
                prev_user = state.env.user
                # Split host string and apply to env dict
                username, hostname, port = interpret_host_string(host)
                # Log to stdout
                if state.output.running and not hasattr(task, 'return_value'):
                    print("[%s] Executing task '%s'" % (host, name))

                # Handle parallel execution
                if requires_parallel(task):
                    # Grab appropriate callable (func or instance method)
                    to_call = task
                    if hasattr(task, 'run') and callable(task.run):
                        to_call = task.run
                    # Wrap in another callable that nukes the child's cached
                    # connection object, if needed, to prevent shared-socket
                    # problems.
                    def inner(*args, **kwargs):
                        key = normalize_to_string(state.env.host_string)
                        state.connections.pop(key, "")
                        to_call(*args, **kwargs)
                    # Stuff into Process wrapper
                    p = multiprocessing.Process(target=inner, args=args,
                        kwargs=kwargs)
                    # Name/id is host string
                    p.name = state.env.host_string
                    # Add to queue
                    jobs.append(p)
                # Handle serial execution
                else:
                    _run_task(task, args, kwargs)

                # Put old user back
                state.env.user = prev_user

            # If running in parallel, block until job queue is emptied
            if jobs:
                jobs.close()
                jobs.start()

            # If no hosts found, assume local-only and run once
            if not hosts:
                _run_task(task, args, kwargs)

        # If we got here, no errors occurred, so print a final note.
        if state.output.status:
            print("\nDone.")