Example #1
0
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
    """
    Primary single-host work body of execute()
    """
    # Log to stdout
    if state.output.running and not hasattr(task, 'return_value'):
        print("[%s] Executing task '%s'" % (host, my_env['command']))
    # Create per-run env with connection settings
    local_env = to_dict(host)
    local_env.update(my_env)
    # Set a few more env flags for parallelism
    if queue is not None:
        local_env.update({'parallel': True, 'linewise': True})
    # Handle parallel execution
    if queue is not None:  # Since queue is only set for parallel
        name = local_env['host_string']

        # Wrap in another callable that:
        # * expands the env it's given to ensure parallel, linewise, etc are
        #   all set correctly and explicitly. Such changes are naturally
        #   insulted from the parent process.
        # * nukes the connection cache to prevent shared-access problems
        # * knows how to send the tasks' return value back over a Queue
        # * captures exceptions raised by the task
        def inner(args, kwargs, queue, name, env):
            state.env.update(env)

            def submit(result):
                queue.put({'name': name, 'result': result})

            try:
                key = normalize_to_string(state.env.host_string)
                state.connections.pop(key, "")
                submit(task.run(*args, **kwargs))
            except NetworkError, e:
                # Backwards compat test re: whether to use an exception or
                # abort
                if not state.env.use_exceptions_for['network']:
                    func = warn if state.env.skip_bad_hosts else abort
                    from fabric.io import prefixed_output
                    with prefixed_output("[%s]: " % state.env.host_string):
                        error(e.message, func=func, exception=e.wrapped)
                else:
                    raise

            except BaseException, e:  # We really do want to capture everything
                # SystemExit implies use of abort(), which prints its own
                # traceback, host info etc -- so we don't want to double up
                # on that. For everything else, though, we need to make
                # clear what host encountered the exception that will
                # print.
                if e.__class__ is not SystemExit:
                    sys.stderr.write(
                        "!!! Parallel execution exception under host %r:\n" %
                        name)
                    submit(e)
                # Here, anything -- unexpected exceptions, or abort()
                # driven SystemExits -- will bubble up and terminate the
                # child process.
                raise
Example #2
0
 def setUp(self):
    # Set up default networking for test server
     # we don't actually use this, but if we don't do it, tests ask questions on the command line
     env.disable_known_hosts = True
     env.update(to_dict('%s@%s:%s' % (USER, HOST, PORT)))
     env.password = PASSWORDS[USER]
     env.use_shell = False
Example #3
0
def _execute(task, host, my_env, args, kwargs, jobs, queue):
    """
    Primary single-host work body of execute()
    """
    # Log to stdout
    if state.output.running and not hasattr(task, 'return_value'):
        print("[%s] Executing task '%s'" % (host, my_env['command']))
    # Create per-run env with connection settings
    local_env = copy(my_env)
    local_env.update(to_dict(host))

    with settings(**local_env):
        if jobs is None or queue is None:
            return task.run(*args, **kwargs)
        else:
            import multiprocessing

            # Stuff into Process wrapper
            kwarg_dict = {
                'task': task,
                'args': args,
                'kwargs': kwargs,
                'env': copy(state.env),
                'queue': queue,
            }
            kwarg_dict['env'].update({'parallel': True, 'linewise': True})

            job_name = '|'.join([task.role, host])
            p = multiprocessing.Process(target=parallel_task_target, kwargs=kwarg_dict, name=job_name)
            # Add to queue
            jobs.append(p)
Example #4
0
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
    """
    Primary single-host work body of execute()
    """
    # Log to stdout
    if state.output.running and not hasattr(task, 'return_value'):
        print("[%s] Executing task '%s'" % (host, my_env['command']))
    # Create per-run env with connection settings
    local_env = to_dict(host)
    local_env.update(my_env)
    # Set a few more env flags for parallelism
    if queue is not None:
        local_env.update({'parallel': True, 'linewise': True})
    with settings(**local_env):
        # Handle parallel execution
        if queue is not None:  # Since queue is only set for parallel
            name = local_env['host_string']

            # Wrap in another callable that:
            # * nukes the connection cache to prevent shared-access problems
            # * knows how to send the tasks' return value back over a Queue
            # * captures exceptions raised by the task
            def inner(args, kwargs, queue, name):
                def submit(result):
                    queue.put({'name': name, 'result': result})

                try:
                    key = normalize_to_string(state.env.host_string)
                    state.connections.pop(key, "")
                    submit(task.run(*args, **kwargs))
                except BaseException, e:  # We really do want to capture everything
                    # SystemExit implies use of abort(), which prints its own
                    # traceback, host info etc -- so we don't want to double up
                    # on that. For everything else, though, we need to make
                    # clear what host encountered the exception that will
                    # print.
                    if e.__class__ is not SystemExit:
                        sys.stderr.write(
                            "!!! Parallel execution exception under host %r:\n"
                            % name)
                        submit(e)
                    # Here, anything -- unexpected exceptions, or abort()
                    # driven SystemExits -- will bubble up and terminate the
                    # child process.
                    raise

            # Stuff into Process wrapper
            kwarg_dict = {
                'args': args,
                'kwargs': kwargs,
                'queue': queue,
                'name': name
            }
            p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
            # Name/id is host string
            p.name = name
            # Add to queue
            jobs.append(p)
        # Handle serial execution
        else:
Example #5
0
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
    """
    Primary single-host work body of execute()
    """
    # Log to stdout
    if state.output.running and not hasattr(task, 'return_value'):
        print("[%s] Executing task '%s'" % (host, my_env['command']))
    # Create per-run env with connection settings
    local_env = to_dict(host)
    local_env.update(my_env)
    # Set a few more env flags for parallelism
    if queue is not None:
        local_env.update({'parallel': True, 'linewise': True})
    # Handle parallel execution
    if queue is not None: # Since queue is only set for parallel
        name = local_env['host_string']
        # Wrap in another callable that:
        # * expands the env it's given to ensure parallel, linewise, etc are
        #   all set correctly and explicitly. Such changes are naturally
        #   insulted from the parent process.
        # * nukes the connection cache to prevent shared-access problems
        # * knows how to send the tasks' return value back over a Queue
        # * captures exceptions raised by the task
        def inner(args, kwargs, queue, name, env):
            state.env.update(env)
            def submit(result):
                queue.put({'name': name, 'result': result})
            try:
                # key = normalize_to_string(state.env.host_string)
                # state.connections.pop(key, "")
                state.connections.clear()
                submit(task.run(*args, **kwargs))
            except BaseException, e: # We really do want to capture everything
                # SystemExit implies use of abort(), which prints its own
                # traceback, host info etc -- so we don't want to double up
                # on that. For everything else, though, we need to make
                # clear what host encountered the exception that will
                # print.
                if e.__class__ is not SystemExit:
                    sys.stderr.write("!!! Parallel execution exception under host %r:\n" % name)
                    submit(e)
                # Here, anything -- unexpected exceptions, or abort()
                # driven SystemExits -- will bubble up and terminate the
                # child process.
                raise

        # Stuff into Process wrapper
        kwarg_dict = {
            'args': args,
            'kwargs': kwargs,
            'queue': queue,
            'name': name,
            'env': local_env,
        }
        p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
        # Name/id is host string
        p.name = name
        # Add to queue
        jobs.append(p)
Example #6
0
 def env_setup(self):
     # Set up default networking for test server
     env.disable_known_hosts = True
     env.update(to_dict('%s@%s:%s' % (USER, HOST, PORT)))
     env.password = PASSWORDS[USER]
     # Command response mocking is easier without having to account for
     # shell wrapping everywhere.
     env.use_shell = False
Example #7
0
 def env_setup(self):
     # Set up default networking for test server
     env.disable_known_hosts = True
     env.update(to_dict('%s@%s:%s' % (USER, HOST, PORT)))
     env.password = PASSWORDS[USER]
     # Command response mocking is easier without having to account for
     # shell wrapping everywhere.
     env.use_shell = False
Example #8
0
 def host_prompting_wrapper(*args, **kwargs):
     while not env.get('host_string', False):
         handle_prompt_abort("the target host connection string")
         host_string = raw_input("No hosts found. Please specify "
                                 "host string for connection [localhost]: ")
         if host_string == '':
             host_string = 'localhost'
         env.update(to_dict(host_string))
     return func(*args, **kwargs)
Example #9
0
 def host_prompting_wrapper(*args, **kwargs):
     while not env.get('host_string', False):
         handle_prompt_abort("the target host connection string")
         host_string = raw_input("No hosts found. Please specify "
                                 "host string for connection [localhost]: ")
         if host_string == '':
             host_string = 'localhost'
         env.update(to_dict(host_string))
     return func(*args, **kwargs)
Example #10
0
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
    """
    Primary single-host work body of execute()
    """
    # Log to stdout
    if state.output.running and not hasattr(task, 'return_value'):
        print("[%s] Executing task '%s'" % (host, my_env['command']))
    # Create per-run env with connection settings
    local_env = to_dict(host)
    local_env.update(my_env)
    # Set a few more env flags for parallelism
    if queue is not None:
        local_env.update({'parallel': True, 'linewise': True})
    with settings(**local_env):
        # Handle parallel execution
        if queue is not None:  # Since queue is only set for parallel
            name = local_env['host_string']

            # Wrap in another callable that:
            # * nukes the connection cache to prevent shared-access problems
            # * knows how to send the tasks' return value back over a Queue
            # * captures exceptions raised by the task
            def inner(args, kwargs, queue, name):
                try:
                    key = normalize_to_string(state.env.host_string)
                    state.connections.pop(key, "")
                    result = task.run(*args, **kwargs)
                except BaseException, e:  # We really do want to capture everything
                    result = e
                    # But still print it out, otherwise users won't know what the
                    # f**k. Especially if the task is run at top level and nobody's
                    # doing anything with the return value.
                    # BUT don't do this if it's a SystemExit as that implies use of
                    # abort(), which does its own printing.
                    if e.__class__ is not SystemExit:
                        print >> sys.stderr, "!!! Parallel execution exception under host %r:" % name
                        sys.excepthook(*sys.exc_info())
                    # Conversely, if it IS SystemExit, we can raise it to ensure a
                    # correct return value.
                    else:
                        raise
                queue.put({'name': name, 'result': result})

            # Stuff into Process wrapper
            kwarg_dict = {
                'args': args,
                'kwargs': kwargs,
                'queue': queue,
                'name': name
            }
            p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
            # Name/id is host string
            p.name = name
            # Add to queue
            jobs.append(p)
        # Handle serial execution
        else:
Example #11
0
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
    """
    Primary single-host work body of execute()
    """
    # Log to stdout
    if state.output.running and not hasattr(task, 'return_value'):
        print("[%s] Executing task '%s'" % (host, my_env['command']))
    # Create per-run env with connection settings
    local_env = to_dict(host)
    local_env.update(my_env)
    # Set a few more env flags for parallelism
    if queue is not None:
        local_env.update({'parallel': True, 'linewise': True})
    with settings(**local_env):
        # Handle parallel execution
        if queue is not None: # Since queue is only set for parallel
            name = local_env['host_string']
            # Wrap in another callable that:
            # * nukes the connection cache to prevent shared-access problems
            # * knows how to send the tasks' return value back over a Queue
            # * captures exceptions raised by the task
            def inner(args, kwargs, queue, name):
                try:
                    key = normalize_to_string(state.env.host_string)
                    state.connections.pop(key, "")
                    result = task.run(*args, **kwargs)
                except BaseException, e: # We really do want to capture everything
                    result = e
                    # But still print it out, otherwise users won't know what the
                    # f**k. Especially if the task is run at top level and nobody's
                    # doing anything with the return value.
                    # BUT don't do this if it's a SystemExit as that implies use of
                    # abort(), which does its own printing.
                    if e.__class__ is not SystemExit:
                        print >> sys.stderr, "!!! Parallel execution exception under host %r:" % name
                        sys.excepthook(*sys.exc_info())
                    # Conversely, if it IS SystemExit, we can raise it to ensure a
                    # correct return value.
                    else:
                        raise
                queue.put({'name': name, 'result': result})

            # Stuff into Process wrapper
            kwarg_dict = {
                'args': args,
                'kwargs': kwargs,
                'queue': queue,
                'name': name
            }
            p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
            # Name/id is host string
            p.name = name
            # Add to queue
            jobs.append(p)
        # Handle serial execution
        else:
Example #12
0
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
    """
    Primary single-host work body of execute().
    """
    # Log to stdout
    if state.output.running and not hasattr(task, 'return_value'):
        print("[%s] Executing task '%s'" % (host, my_env['command']))
    # Create per-run env with connection settings
    local_env = to_dict(host)
    local_env.update(my_env)
    # Set a few more env flags for parallelism
    if queue is not None:
        local_env.update({'parallel': True, 'linewise': True})
    # Handle parallel execution
    if queue is not None:  # Since queue is only set for parallel
        name = local_env['host_string']

        # Wrap in another callable that:
        # * expands the env it's given to ensure parallel, linewise, etc are
        # all set correctly and explicitly. Such changes are naturally
        # insulted from the parent process.
        # * nukes the connection cache to prevent shared-access problems
        # * knows how to send the tasks' return value back over a Queue
        # * captures exceptions raised by the task
        def inner(args, kwargs, queue, name, env):
            state.env.update(env)

            def submit(result):
                queue.put({'name': name, 'result': result})

            try:
                state.connections.clear()
                submit(task.run(*args, **kwargs))
            except BaseException, e:
                _LOGGER.error(traceback.format_exc())
                submit(e)
                sys.exit(1)

        # Stuff into Process wrapper
        kwarg_dict = {
            'args': args,
            'kwargs': kwargs,
            'queue': queue,
            'name': name,
            'env': local_env,
        }
        p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
        # Name/id is host string
        p.name = name
        # Add to queue
        jobs.append(p)
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
    """
    Primary single-host work body of execute().
    """
    # Log to stdout
    if state.output.running and not hasattr(task, 'return_value'):
        print("[%s] Executing task '%s'" % (host, my_env['command']))
    # Create per-run env with connection settings
    local_env = to_dict(host)
    local_env.update(my_env)
    # Set a few more env flags for parallelism
    if queue is not None:
        local_env.update({'parallel': True, 'linewise': True})
    # Handle parallel execution
    if queue is not None:  # Since queue is only set for parallel
        name = local_env['host_string']

        # Wrap in another callable that:
        # * expands the env it's given to ensure parallel, linewise, etc are
        # all set correctly and explicitly. Such changes are naturally
        # insulted from the parent process.
        # * nukes the connection cache to prevent shared-access problems
        # * knows how to send the tasks' return value back over a Queue
        # * captures exceptions raised by the task
        def inner(args, kwargs, queue, name, env):
            state.env.update(env)

            def submit(result):
                queue.put({'name': name, 'result': result})

            try:
                state.connections.clear()
                submit(task.run(*args, **kwargs))
            except BaseException, e:
                _LOGGER.error(traceback.format_exc())
                submit(e)
                sys.exit(1)

        # Stuff into Process wrapper
        kwarg_dict = {
            'args': args,
            'kwargs': kwargs,
            'queue': queue,
            'name': name,
            'env': local_env,
        }
        p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
        # Name/id is host string
        p.name = name
        # Add to queue
        jobs.append(p)
Example #14
0
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
    """
    Primary single-host work body of execute()
    """
    # Log to stdout
    if state.output.running and not hasattr(task, 'return_value'):
        print("[%s] Executing task '%s'" % (host, my_env['command']))
    # Create per-run env with connection settings
    local_env = to_dict(host)
    local_env.update(my_env)
    state.env.update(local_env)
    # Handle parallel execution
    if queue is not None: # Since queue is only set for parallel
        # Set a few more env flags for parallelism
        state.env.parallel = True # triggers some extra aborts, etc
        state.env.linewise = True # to mirror -P behavior
        name = local_env['host_string']
        # Wrap in another callable that:
        # * nukes the connection cache to prevent shared-access problems
        # * knows how to send the tasks' return value back over a Queue
        # * captures exceptions raised by the task
        def inner(args, kwargs, queue, name):
            key = normalize_to_string(state.env.host_string)
            state.connections.pop(key, "")
            try:
                result = task.run(*args, **kwargs)
            except BaseException, e: # We really do want to capture everything
                result = e
                # But still print it out, otherwise users won't know what the
                # f**k. Especially if the task is run at top level and nobody's
                # doing anything with the return value.
                print >> sys.stderr, "!!! Parallel execution exception under host %r:" % name
                sys.excepthook(*sys.exc_info())
            queue.put({'name': name, 'result': result})

        # Stuff into Process wrapper
        kwarg_dict = {
            'args': args,
            'kwargs': kwargs,
            'queue': queue,
            'name': name
        }
        p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
        # Name/id is host string
        p.name = name
        # Add to queue
        jobs.append(p)
Example #15
0
 def setup(self):
     # Clear Fudge mock expectations
     clear_expectations()
     # Copy env, output for restoration in teardown
     self.previous_env = copy.deepcopy(env)
     # Deepcopy doesn't work well on AliasDicts; but they're only one layer
     # deep anyways, so...
     self.previous_output = output.items()
     # Set up default networking for test server
     env.disable_known_hosts = True
     env.update(to_dict('%s@%s:%s' % (USER, HOST, PORT)))
     env.password = PASSWORDS[USER]
     # Command response mocking is easier without having to account for
     # shell wrapping everywhere.
     env.use_shell = False
     # Temporary local file dir
     self.tmpdir = tempfile.mkdtemp()
Example #16
0
 def setup(self):
     # Clear Fudge mock expectations
     clear_expectations()
     # Copy env, output for restoration in teardown
     self.previous_env = copy.deepcopy(env)
     # Deepcopy doesn't work well on AliasDicts; but they're only one layer
     # deep anyways, so...
     self.previous_output = output.items()
     # Set up default networking for test server
     env.disable_known_hosts = True
     env.update(to_dict('%s@%s:%s' % (USER, HOST, PORT)))
     env.password = PASSWORDS[USER]
     # Command response mocking is easier without having to account for
     # shell wrapping everywhere.
     env.use_shell = False
     # Temporary local file dir
     self.tmpdir = tempfile.mkdtemp()
Example #17
0
def _execute(task, host, my_env, args, kwargs):
    """
    Primary single-host work body of execute()
    """
    # Log to stdout
    if state.output.running and not hasattr(task, 'return_value'):
        print("[%s] Executing task '%s'" % (host, my_env['command']))
    # Create per-run env with connection settings
    local_env = to_dict(host)
    local_env.update(my_env)
    state.env.update(local_env)
    # Handle parallel execution
    if requires_parallel(task):
        # Set a few more env flags for parallelism
        state.env.parallel = True # triggers some extra aborts, etc
        state.env.linewise = True # to mirror -P behavior
        # Import multiprocessing if needed, erroring out usefully
        # if it can't.
        try:
            import multiprocessing
        except ImportError:
            import traceback
            tb = traceback.format_exc()
            abort(tb + """
At least one task needs to be run in parallel, but the
multiprocessing module cannot be imported (see above
traceback.) Please make sure the module is installed
or that the above ImportError is fixed.""")

        # Wrap in another callable that nukes the child's cached
        # connection object, if needed, to prevent shared-socket
        # problems.
        def inner(*args, **kwargs):
            key = normalize_to_string(state.env.host_string)
            state.connections.pop(key, "")
            task.run(*args, **kwargs)
        # Stuff into Process wrapper
        p = multiprocessing.Process(target=inner, args=args,
            kwargs=kwargs)
        # Name/id is host string
        p.name = local_env['host_string']
        # Add to queue
        jobs.append(p)
    # Handle serial execution
    else:
        task.run(*args, **kwargs)
Example #18
0
def _execute(task, host, my_env, args, kwargs):
    """
    Primary single-host work body of execute()
    """
    # Log to stdout
    if state.output.running and not hasattr(task, 'return_value'):
        print("[%s] Executing task '%s'" % (host, my_env['command']))
    # Create per-run env with connection settings
    local_env = to_dict(host)
    local_env.update(my_env)
    state.env.update(local_env)
    # Handle parallel execution
    if requires_parallel(task):
        # Set a few more env flags for parallelism
        state.env.parallel = True  # triggers some extra aborts, etc
        state.env.linewise = True  # to mirror -P behavior
        # Import multiprocessing if needed, erroring out usefully
        # if it can't.
        try:
            import multiprocessing
        except ImportError:
            import traceback
            tb = traceback.format_exc()
            abort(tb + """
At least one task needs to be run in parallel, but the
multiprocessing module cannot be imported (see above
traceback.) Please make sure the module is installed
or that the above ImportError is fixed.""")

        # Wrap in another callable that nukes the child's cached
        # connection object, if needed, to prevent shared-socket
        # problems.
        def inner(*args, **kwargs):
            key = normalize_to_string(state.env.host_string)
            state.connections.pop(key, "")
            task.run(*args, **kwargs)

        # Stuff into Process wrapper
        p = multiprocessing.Process(target=inner, args=args, kwargs=kwargs)
        # Name/id is host string
        p.name = local_env['host_string']
        # Add to queue
        jobs.append(p)
    # Handle serial execution
    else:
        task.run(*args, **kwargs)
Example #19
0
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
    """
    Primary single-host work body of execute()
    """
    # Log to stdout
    if state.output.running and not hasattr(task, 'return_value'):
        print("[%s] Executing task '%s'" % (host, my_env['command']))
    # Create per-run env with connection settings
    local_env = to_dict(host)
    local_env.update(my_env)
    state.env.update(local_env)
    # Handle parallel execution
    if queue is not None:  # Since queue is only set for parallel
        # Set a few more env flags for parallelism
        state.env.parallel = True  # triggers some extra aborts, etc
        state.env.linewise = True  # to mirror -P behavior
        name = local_env['host_string']

        # Wrap in another callable that:
        # * nukes the connection cache to prevent shared-access problems
        # * knows how to send the tasks' return value back over a Queue
        # * captures exceptions raised by the task
        def inner(args, kwargs, queue, name):
            key = normalize_to_string(state.env.host_string)
            state.connections.pop(key, "")
            try:
                result = task.run(*args, **kwargs)
            except BaseException, e:  # We really do want to capture everything
                result = e
            queue.put({'name': name, 'result': result})

        # Stuff into Process wrapper
        kwarg_dict = {
            'args': args,
            'kwargs': kwargs,
            'queue': queue,
            'name': name
        }
        p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
        # Name/id is host string
        p.name = name
        # Add to queue
        jobs.append(p)
Example #20
0
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
    """
    Primary single-host work body of execute()
    """
    # Log to stdout
    if state.output.running and not hasattr(task, 'return_value'):
        print("[%s] Executing task '%s'" % (host, my_env['command']))
    # Create per-run env with connection settings
    local_env = to_dict(host)
    local_env.update(my_env)
    state.env.update(local_env)
    # Handle parallel execution
    if queue is not None: # Since queue is only set for parallel
        # Set a few more env flags for parallelism
        state.env.parallel = True # triggers some extra aborts, etc
        state.env.linewise = True # to mirror -P behavior
        # Wrap in another callable that:
        # * nukes the connection cache to prevent shared-access problems
        # * knows how to send the tasks' return value back over a Queue
        def inner(args, kwargs, queue):
            key = normalize_to_string(state.env.host_string)
            state.connections.pop(key, "")
            result = task.run(*args, **kwargs)
            queue.put(result)

        # Stuff into Process wrapper
        kwarg_dict = {
            'args': args,
            'kwargs': kwargs,
            'queue': queue,
        }
        p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
        # Name/id is host string
        p.name = local_env['host_string']
        # Add to queue
        jobs.append(p, queue)
    # Handle serial execution
    else:
        return task.run(*args, **kwargs)
Example #21
0
def execute(task, *args, **kwargs):
    """
    Execute ``task`` (callable or name), honoring host/role decorators, etc.

    ``task`` may be an actual callable object, or it may be a registered task
    name, which is used to look up a callable just as if the name had been
    given on the command line (including :ref:`namespaced tasks <namespaces>`,
    e.g. ``"deploy.migrate"``.

    The task will then be executed once per host in its host list, which is
    (again) assembled in the same manner as CLI-specified tasks: drawing from
    :option:`-H`, :ref:`env.hosts <hosts>`, the `~fabric.decorators.hosts` or
    `~fabric.decorators.roles` decorators, and so forth.

    ``host``, ``hosts``, ``role``, ``roles`` and ``exclude_hosts`` kwargs will
    be stripped out of the final call, and used to set the task's host list, as
    if they had been specified on the command line like e.g. ``fab
    taskname:host=hostname``.

    Any other arguments or keyword arguments will be passed verbatim into
    ``task`` when it is called, so ``execute(mytask, 'arg1', kwarg1='value')``
    will (once per host) invoke ``mytask('arg1', kwarg1='value')``.

    .. seealso::
        :ref:`The execute usage docs <execute>`, for an expanded explanation
        and some examples.

    .. versionadded:: 1.3
    """
    my_env = {}
    # Obtain task
    if not callable(task):
        # Assume string, set env.command to it
        my_env["command"] = task
        task = crawl(task, state.commands)
        if task is None:
            abort("%r is not callable or a valid task name" % (task,))
    # Set env.command if we were given a real function or callable task obj
    else:
        dunder_name = getattr(task, "__name__", None)
        my_env["command"] = getattr(task, "name", dunder_name)
    # Normalize to Task instance
    if not hasattr(task, "run"):
        task = WrappedCallableTask(task)
    # Filter out hosts/roles kwargs
    new_kwargs, hosts, roles, exclude_hosts = parse_kwargs(kwargs)
    # Set up host list
    my_env["all_hosts"] = task.get_hosts(hosts, roles, exclude_hosts, state.env)

    # Get pool size for this task
    pool_size = task.get_pool_size(my_env["all_hosts"], state.env.pool_size)
    # Set up job queue in case parallel is needed
    jobs = JobQueue(pool_size)
    if state.output.debug:
        jobs._debug = True

    # Call on host list
    if my_env["all_hosts"]:
        for host in my_env["all_hosts"]:
            # Log to stdout
            if state.output.running and not hasattr(task, "return_value"):
                print ("[%s] Executing task '%s'" % (host, my_env["command"]))
            # Create per-run env with connection settings
            local_env = to_dict(host)
            local_env.update(my_env)
            state.env.update(local_env)
            # Handle parallel execution
            if requires_parallel(task):
                # Import multiprocessing if needed, erroring out usefully
                # if it can't.
                try:
                    import multiprocessing
                except ImportError, e:
                    msg = "At least one task needs to be run in parallel, but the\nmultiprocessing module cannot be imported:"
                    msg += "\n\n\t%s\n\n" % e
                    msg += "Please make sure the module is installed or that the above ImportError is\nfixed."
                    abort(msg)

                # Wrap in another callable that nukes the child's cached
                # connection object, if needed, to prevent shared-socket
                # problems.
                def inner(*args, **kwargs):
                    key = normalize_to_string(state.env.host_string)
                    state.connections.pop(key, "")
                    task.run(*args, **kwargs)

                # Stuff into Process wrapper
                p = multiprocessing.Process(target=inner, args=args, kwargs=new_kwargs)
                # Name/id is host string
                p.name = local_env["host_string"]
                # Add to queue
                jobs.append(p)
            # Handle serial execution
            else:
                task.run(*args, **new_kwargs)

        # If running in parallel, block until job queue is emptied
        if jobs:
            jobs.close()
            exitcodes = jobs.run()
            # Abort if any children did not exit cleanly (fail-fast).
            # This prevents Fabric from continuing on to any other tasks.
            if any([x != 0 for x in exitcodes]):
                abort("One or more hosts failed while executing task '%s'" % (my_env["command"]))
Example #22
0
 def set_network(self):
     env.update(to_dict('%s@%s:%s' % (USER, HOST, PORT)))
Example #23
0
File: tasks.py Project: sjmh/fabric
def execute(task, *args, **kwargs):
    """
    Execute ``task`` (callable or name), honoring host/role decorators, etc.

    ``task`` may be an actual callable object, or it may be a registered task
    name, which is used to look up a callable just as if the name had been
    given on the command line (including :ref:`namespaced tasks <namespaces>`,
    e.g. ``"deploy.migrate"``.

    The task will then be executed once per host in its host list, which is
    (again) assembled in the same manner as CLI-specified tasks: drawing from
    :option:`-H`, :ref:`env.hosts <hosts>`, the `~fabric.decorators.hosts` or
    `~fabric.decorators.roles` decorators, and so forth.

    ``host``, ``hosts``, ``role``, ``roles`` and ``exclude_hosts`` kwargs will
    be stripped out of the final call, and used to set the task's host list, as
    if they had been specified on the command line like e.g. ``fab
    taskname:host=hostname``.

    Any other arguments or keyword arguments will be passed verbatim into
    ``task`` when it is called, so ``execute(mytask, 'arg1', kwarg1='value')``
    will (once per host) invoke ``mytask('arg1', kwarg1='value')``.

    .. seealso::
        :ref:`The execute usage docs <execute>`, for an expanded explanation
        and some examples.

    .. versionadded:: 1.3
    """
    my_env = {}
    # Obtain task
    if not callable(task):
        # Assume string, set env.command to it
        my_env['command'] = task
        task = crawl(task, state.commands)
        if task is None:
            abort("%r is not callable or a valid task name" % (task, ))
    # Set env.command if we were given a real function or callable task obj
    else:
        dunder_name = getattr(task, '__name__', None)
        my_env['command'] = getattr(task, 'name', dunder_name)
    # Normalize to Task instance
    if not hasattr(task, 'run'):
        task = WrappedCallableTask(task)
    # Filter out hosts/roles kwargs
    new_kwargs, hosts, roles, exclude_hosts = parse_kwargs(kwargs)
    # Set up host list
    my_env['all_hosts'] = task.get_hosts(hosts, roles, exclude_hosts,
                                         state.env)

    # Get pool size for this task
    pool_size = task.get_pool_size(my_env['all_hosts'], state.env.pool_size)
    # Set up job queue in case parallel is needed
    jobs = JobQueue(pool_size)
    if state.output.debug:
        jobs._debug = True

    # Call on host list
    if my_env['all_hosts']:
        for host in my_env['all_hosts']:
            # Log to stdout
            if state.output.running and not hasattr(task, 'return_value'):
                print("[%s] Executing task '%s'" % (host, my_env['command']))
            # Create per-run env with connection settings
            local_env = to_dict(host)
            local_env.update(my_env)
            state.env.update(local_env)
            # Handle parallel execution
            if requires_parallel(task):
                # Set a few more env flags for parallelism
                state.env.parallel = True  # triggers some extra aborts, etc
                state.env.linewise = True  # to mirror -P behavior
                # Import multiprocessing if needed, erroring out usefully
                # if it can't.
                try:
                    import multiprocessing
                except ImportError:
                    import traceback
                    tb = traceback.format_exc()
                    abort(tb + """
At least one task needs to be run in parallel, but the
multiprocessing module cannot be imported (see above
traceback.) Please make sure the module is installed
or that the above ImportError is fixed.""")

                # Wrap in another callable that nukes the child's cached
                # connection object, if needed, to prevent shared-socket
                # problems.
                def inner(*args, **kwargs):
                    key = normalize_to_string(state.env.host_string)
                    state.connections.pop(key, "")
                    task.run(*args, **kwargs)

                # Stuff into Process wrapper
                p = multiprocessing.Process(target=inner,
                                            args=args,
                                            kwargs=new_kwargs)
                # Name/id is host string
                p.name = local_env['host_string']
                # Add to queue
                jobs.append(p)
            # Handle serial execution
            else:
                task.run(*args, **new_kwargs)

        # If running in parallel, block until job queue is emptied
        if jobs:
            jobs.close()
            exitcodes = jobs.run()
            # Abort if any children did not exit cleanly (fail-fast).
            # This prevents Fabric from continuing on to any other tasks.
            if any([x != 0 for x in exitcodes]):
                abort("One or more hosts failed while executing task '%s'" %
                      (my_env['command']))

    # Or just run once for local-only
    else:
        state.env.update(my_env)
        task.run(*args, **new_kwargs)
Example #24
0
 def set_network(self):
     env.update(to_dict('%s@%s:%s' % (USER, HOST, PORT)))
Example #25
0
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
    """
    Primary single-host work body of execute()
    """
    # Log to stdout
    if state.output.running and not hasattr(task, 'return_value'):
        print(("[%s] Executing task '%s'" % (host, my_env['command'])))
    # Create per-run env with connection settings
    local_env = to_dict(host)
    local_env.update(my_env)
    # Set a few more env flags for parallelism
    if queue is not None:
        local_env.update({'parallel': True, 'linewise': True})
    # Handle parallel execution
    if queue is not None:  # Since queue is only set for parallel
        name = local_env['host_string']

        # Wrap in another callable that:
        # * expands the env it's given to ensure parallel, linewise, etc are
        #   all set correctly and explicitly. Such changes are naturally
        #   insulted from the parent process.
        # * nukes the connection cache to prevent shared-access problems
        # * knows how to send the tasks' return value back over a Queue
        # * captures exceptions raised by the task
        def inner(args, kwargs, queue, name, env):
            state.env.update(env)

            def submit(result):
                queue.put({'name': name, 'result': result})

            try:
                state.connections.clear()
                submit(task.run(*args, **kwargs))
            except BaseException as e:  # We really do want to capture everything
                # SystemExit implies use of abort(), which prints its own
                # traceback, host info etc -- so we don't want to double up
                # on that. For everything else, though, we need to make
                # clear what host encountered the exception that will
                # print.
                if e.__class__ is not SystemExit:
                    if not (isinstance(e, NetworkError)
                            and _is_network_error_ignored()):
                        sys.stderr.write(
                            "!!! Parallel execution exception under host %r:\n"
                            % name)
                    submit(e)
                # Here, anything -- unexpected exceptions, or abort()
                # driven SystemExits -- will bubble up and terminate the
                # child process.
                if not (isinstance(e, NetworkError)
                        and _is_network_error_ignored()):
                    raise

        # Stuff into Process wrapper
        kwarg_dict = {
            'args': args,
            'kwargs': kwargs,
            'queue': queue,
            'name': name,
            'env': local_env,
        }
        p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
        # Name/id is host string
        p.name = name
        # Add to queue
        jobs.append(p)
    # Handle serial execution
    else:
        if py33:
            with ExitStack() as stack:
                for s in settings(**local_env):
                    stack.enter_context(s)
                    return task.run(*args, **kwargs)
        else:
            with settings(**local_env):
                return task.run(*args, **kwargs)
Example #26
0
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
    """
    Primary single-host work body of execute()
    """
    # Log to stdout
    if state.output.running and not hasattr(task, 'return_value'):
        print("[%s] Executing task '%s'" % (host, my_env['command']))
    # Create per-run env with connection settings
    local_env = to_dict(host)
    local_env.update(my_env)
    # Set a few more env flags for parallelism
    if queue is not None:
        local_env.update({'parallel': True, 'linewise': True})
    # Handle parallel execution
    if queue is not None: # Since queue is only set for parallel
        name = local_env['host_string']
        workers = kwargs['workers']
        worker = workers[name]

        # Wrap in another callable that:
        # * expands the env it's given to ensure parallel, linewise, etc are
        #   all set correctly and explicitly. Such changes are naturally
        #   insulted from the parent process.
        # * nukes the connection cache to prevent shared-access problems
        # * knows how to send the tasks' return value back over a Queue
        # * captures exceptions raised by the task
        def inner(args, kwargs, queue, name, worker, env):
            #setup the correct host_string for this process
            #since the env currently has the worker cfarm name as the
            #host_string which is always the actual connection_name
            env['host_string'] = worker.connection_name
            state.env.update(env)

            def submit(result):
                queue.put({'name': name, 'result': result})
            try:
                key = normalize_to_string(state.env.host_string)
                state.connections.pop(key, "")

                #copy kwargs and remove workers and replace it
                #with the current worker this only works since we control
                #the tasks we are calling
                my_kwargs = kwargs
                my_kwargs.pop('workers')
                my_kwargs['worker']=worker

                submit(task.run(*args, **kwargs))
            except BaseException, e: # We really do want to capture everything
                # SystemExit implies use of abort(), which prints its own
                # traceback, host info etc -- so we don't want to double up
                # on that. For everything else, though, we need to make
                # clear what host encountered the exception that will
                # print.
                if e.__class__ is not SystemExit:
                    sys.stderr.write("!!! Parallel execution exception under host %r:\n" % name)
                    submit(e)
                # Here, anything -- unexpected exceptions, or abort()
                # driven SystemExits -- will bubble up and terminate the
                # child process.
                raise

        # Stuff into Process wrapper
        kwarg_dict = {
            'args': args,
            'kwargs': kwargs,
            'queue': queue,
            'name': name,
            'worker' : worker,
            'env': local_env,
        }
        p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
        # Name/id is host string
        p.name = name
        # Add to queue
        jobs.append(p)