Пример #1
0
    def __init__(self, inventory, variable_manager, loader, display, options, passwords, stdout_callback=None):

        self._inventory        = inventory
        self._variable_manager = variable_manager
        self._loader           = loader
        self._display          = display
        self._options          = options
        self._stats            = AggregateStats()
        self.passwords         = passwords
        self._stdout_callback  = stdout_callback

        self._callbacks_loaded = False
        self._callback_plugins = []
        self._start_at_done    = False

        # make sure the module path (if specified) is parsed and
        # added to the module_loader object
        if options.module_path is not None:
            for path in options.module_path.split(os.pathsep):
                module_loader.add_directory(path)

        # a special flag to help us exit cleanly
        self._terminated = False

        # this dictionary is used to keep track of notified handlers
        self._notified_handlers = dict()

        # dictionaries to keep track of failed/unreachable hosts
        self._failed_hosts      = dict()
        self._unreachable_hosts = dict()

        self._final_q = multiprocessing.Queue()

        # create the pool of worker threads, based on the number of forks specified
        try:
            fileno = sys.stdin.fileno()
        except ValueError:
            fileno = None

        # A temporary file (opened pre-fork) used by connection
        # plugins for inter-process locking.
        self._connection_lockfile = tempfile.TemporaryFile()

        #FIXME: should this move to 'run' and get serial and play pattern applied as limiter?
        # Treat "forks" config parameter as max value. Only create number of workers
        # equal to number of hosts in inventory if less than max value.
        num_workers = min(self._options.forks, len(self._inventory.list_hosts()))

        self._workers = []
        for i in range(num_workers):
            main_q = multiprocessing.Queue()
            rslt_q = multiprocessing.Queue()

            prc = WorkerProcess(self, main_q, rslt_q, loader)
            prc.start()

            self._workers.append((prc, main_q, rslt_q))

        self._result_prc = ResultProcess(self._final_q, self._workers)
        self._result_prc.start()
Пример #2
0
    def _queue_task(self, host, task, task_vars, play_context):
        ''' handles queueing the task up to be sent to a worker '''

        display.debug("entering _queue_task() for %s/%s" %
                      (host.name, task.action))

        # Add a write lock for tasks.
        # Maybe this should be added somewhere further up the call stack but
        # this is the earliest in the code where we have task (1) extracted
        # into its own variable and (2) there's only a single code path
        # leading to the module being run.  This is called by three
        # functions: __init__.py::_do_handler_run(), linear.py::run(), and
        # free.py::run() so we'd have to add to all three to do it there.
        # The next common higher level is __init__.py::run() and that has
        # tasks inside of play_iterator so we'd have to extract them to do it
        # there.

        global action_write_locks
        if task.action not in action_write_locks:
            display.debug('Creating lock for %s' % task.action)
            action_write_locks[task.action] = Lock()

        # and then queue the new task
        try:

            # create a dummy object with plugin loaders set as an easier
            # way to share them with the forked processes
            shared_loader_obj = SharedPluginLoaderObj()

            queued = False
            starting_worker = self._cur_worker
            while True:
                (worker_prc, rslt_q) = self._workers[self._cur_worker]
                if worker_prc is None or not worker_prc.is_alive():
                    worker_prc = WorkerProcess(self._final_q, task_vars, host,
                                               task, play_context,
                                               self._loader,
                                               self._variable_manager,
                                               shared_loader_obj)
                    self._workers[self._cur_worker][0] = worker_prc
                    worker_prc.start()
                    display.debug("worker is %d (out of %d available)" %
                                  (self._cur_worker + 1, len(self._workers)))
                    queued = True
                self._cur_worker += 1
                if self._cur_worker >= len(self._workers):
                    self._cur_worker = 0
                if queued:
                    break
                elif self._cur_worker == starting_worker:
                    time.sleep(0.0001)

            self._pending_results += 1
        except (EOFError, IOError, AssertionError) as e:
            # most likely an abort
            display.debug("got an error while queuing: %s" % e)
            return
        display.debug("exiting _queue_task() for %s/%s" %
                      (host.name, task.action))
Пример #3
0
    def __init__(self, inventory, callback, variable_manager, loader, display,
                 options, passwords):

        self._inventory = inventory
        self._variable_manager = variable_manager
        self._loader = loader
        self._display = display
        self._options = options
        self._stats = AggregateStats()
        self.passwords = passwords

        # a special flag to help us exit cleanly
        self._terminated = False

        # this dictionary is used to keep track of notified handlers
        self._notified_handlers = dict()

        # dictionaries to keep track of failed/unreachable hosts
        self._failed_hosts = dict()
        self._unreachable_hosts = dict()

        self._final_q = multiprocessing.Queue()

        # load all available callback plugins
        # FIXME: we need an option to white-list callback plugins
        self._callback_plugins = []
        for callback_plugin in callback_loader.all(class_only=True):
            if hasattr(callback_plugin, 'CALLBACK_VERSION'
                       ) and callback_plugin.CALLBACK_VERSION >= 2.0:
                self._callback_plugins.append(callback_plugin(self._display))
            else:
                self._callback_plugins.append(callback_plugin())

        # create the pool of worker threads, based on the number of forks specified
        try:
            fileno = sys.stdin.fileno()
        except ValueError:
            fileno = None

        self._workers = []
        for i in range(self._options.forks):
            # duplicate stdin, if possible
            new_stdin = None
            if fileno is not None:
                try:
                    new_stdin = os.fdopen(os.dup(fileno))
                except OSError, e:
                    # couldn't dupe stdin, most likely because it's
                    # not a valid file descriptor, so we just rely on
                    # using the one that was passed in
                    pass

            main_q = multiprocessing.Queue()
            rslt_q = multiprocessing.Queue()

            prc = WorkerProcess(self, main_q, rslt_q, loader, new_stdin)
            prc.start()

            self._workers.append((prc, main_q, rslt_q))
Пример #4
0
    def _queue_thread_main(self):

        # create a dummy object with plugin loaders set as an easier
        # way to share them with the forked processes
        shared_loader_obj = SharedPluginLoaderObj()

        display.debug("queuing thread starting")
        while not self._terminated:
            available_workers = []
            for idx, entry in enumerate(self._workers):
                (worker_prc, _) = entry
                if worker_prc is None or not worker_prc.is_alive():
                    available_workers.append(idx)

            if len(available_workers) == 0:
                time.sleep(0.01)
                continue

            for worker_idx in available_workers:
                try:
                    self._queued_tasks_lock.acquire()
                    (host, task, task_vars,
                     play_context) = self._queued_tasks.pop()
                except IndexError:
                    break
                finally:
                    self._queued_tasks_lock.release()

                if task.action not in action_write_locks.action_write_locks:
                    display.debug('Creating lock for %s' % task.action)
                    action_write_locks.action_write_locks[
                        task.action] = multiprocessing.Lock()

                try:
                    worker_prc = WorkerProcess(
                        self._final_q,
                        self._iterator._play,
                        host,
                        task,
                        task_vars,
                        play_context,
                        self._loader,
                        self._variable_manager,
                        shared_loader_obj,
                    )
                    self._workers[worker_idx][0] = worker_prc
                    worker_prc.start()
                    display.debug("worker is %d (out of %d available)" %
                                  (worker_idx + 1, len(self._workers)))

                except (EOFError, IOError, AssertionError) as e:
                    # most likely an abort
                    display.debug("got an error while queuing: %s" % e)
                    break

        display.debug("queuing thread exiting")
    def __init__(self, inventory, variable_manager, loader, display, options, passwords, stdout_callback=None):

        self._inventory        = inventory
        self._variable_manager = variable_manager
        self._loader           = loader
        self._display          = display
        self._options          = options
        self._stats            = AggregateStats()
        self.passwords         = passwords
        self._stdout_callback  = stdout_callback

        self._callbacks_loaded = False
        self._callback_plugins = []
        self._start_at_done    = False

        # make sure the module path (if specified) is parsed and
        # added to the module_loader object
        if options.module_path is not None:
            for path in options.module_path.split(os.pathsep):
                module_loader.add_directory(path)

        # a special flag to help us exit cleanly
        self._terminated = False

        # this dictionary is used to keep track of notified handlers
        self._notified_handlers = dict()

        # dictionaries to keep track of failed/unreachable hosts
        self._failed_hosts      = dict()
        self._unreachable_hosts = dict()

        self._final_q = multiprocessing.Queue()

        # create the pool of worker threads, based on the number of forks specified
        try:
            fileno = sys.stdin.fileno()
        except ValueError:
            fileno = None

        # A temporary file (opened pre-fork) used by connection
        # plugins for inter-process locking.
        self._connection_lockfile = tempfile.TemporaryFile()

        self._workers = []
        for i in range(self._options.forks):
            main_q = multiprocessing.Queue()
            rslt_q = multiprocessing.Queue()

            prc = WorkerProcess(self, main_q, rslt_q, loader)
            prc.start()

            self._workers.append((prc, main_q, rslt_q))

        self._result_prc = ResultProcess(self._final_q, self._workers)
        self._result_prc.start()
    def __init__(self, inventory, callback, variable_manager, loader, options):

        self._inventory = inventory
        self._variable_manager = variable_manager
        self._loader = loader
        self._options = options

        # a special flag to help us exit cleanly
        self._terminated = False

        # create and start the multiprocessing manager
        #self._manager = AnsibleManager()
        #self._manager.start()

        # this dictionary is used to keep track of notified handlers
        self._notified_handlers = dict()

        # dictionaries to keep track of failed/unreachable hosts
        self._failed_hosts = dict()
        self._unreachable_hosts = dict()

        self._final_q = multiprocessing.Queue()

        # FIXME: hard-coded the default callback plugin here, which
        #        should be configurable.
        self._callback = callback_loader.get(callback)

        # create the pool of worker threads, based on the number of forks specified
        try:
            fileno = sys.stdin.fileno()
        except ValueError:
            fileno = None

        self._workers = []
        for i in range(self._options.forks):
            # duplicate stdin, if possible
            new_stdin = None
            if fileno is not None:
                try:
                    new_stdin = os.fdopen(os.dup(fileno))
                except OSError, e:
                    # couldn't dupe stdin, most likely because it's
                    # not a valid file descriptor, so we just rely on
                    # using the one that was passed in
                    pass

            main_q = multiprocessing.Queue()
            rslt_q = multiprocessing.Queue()

            prc = WorkerProcess(self, main_q, rslt_q, loader, new_stdin)
            prc.start()

            self._workers.append((prc, main_q, rslt_q))
Пример #7
0
    def _queue_task(self, host, task, task_vars, play_context):
        ''' handles queueing the task up to be sent to a worker '''

        display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))

        # Add a write lock for tasks.
        # Maybe this should be added somewhere further up the call stack but
        # this is the earliest in the code where we have task (1) extracted
        # into its own variable and (2) there's only a single code path
        # leading to the module being run.  This is called by three
        # functions: __init__.py::_do_handler_run(), linear.py::run(), and
        # free.py::run() so we'd have to add to all three to do it there.
        # The next common higher level is __init__.py::run() and that has
        # tasks inside of play_iterator so we'd have to extract them to do it
        # there.

        global action_write_locks
        if task.action not in action_write_locks:
            display.debug('Creating lock for %s' % task.action)
            action_write_locks[task.action] = Lock()

        # and then queue the new task
        try:

            # create a dummy object with plugin loaders set as an easier
            # way to share them with the forked processes
            shared_loader_obj = SharedPluginLoaderObj()

            queued = False
            starting_worker = self._cur_worker
            while True:
                (worker_prc, rslt_q) = self._workers[self._cur_worker]
                if worker_prc is None or not worker_prc.is_alive():
                    worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj)
                    self._workers[self._cur_worker][0] = worker_prc
                    worker_prc.start()
                    display.debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers)))
                    queued = True
                self._cur_worker += 1
                if self._cur_worker >= len(self._workers):
                    self._cur_worker = 0
                if queued:
                    break
                elif self._cur_worker == starting_worker:
                    time.sleep(0.0001)

            self._pending_results += 1
        except (EOFError, IOError, AssertionError) as e:
            # most likely an abort
            display.debug("got an error while queuing: %s" % e)
            return
        display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
Пример #8
0
    def __init__(self,
                 inventory,
                 variable_manager,
                 loader,
                 display,
                 options,
                 passwords,
                 stdout_callback=None):

        self._inventory = inventory
        self._variable_manager = variable_manager
        self._loader = loader
        self._display = display
        self._options = options
        self._stats = AggregateStats()
        self.passwords = passwords
        self._stdout_callback = stdout_callback

        self._callbacks_loaded = False
        self._callback_plugins = []

        # a special flag to help us exit cleanly
        self._terminated = False

        # this dictionary is used to keep track of notified handlers
        self._notified_handlers = dict()

        # dictionaries to keep track of failed/unreachable hosts
        self._failed_hosts = dict()
        self._unreachable_hosts = dict()

        self._final_q = multiprocessing.Queue()

        # create the pool of worker threads, based on the number of forks specified
        try:
            fileno = sys.stdin.fileno()
        except ValueError:
            fileno = None

        self._workers = []
        for i in range(self._options.forks):
            main_q = multiprocessing.Queue()
            rslt_q = multiprocessing.Queue()

            prc = WorkerProcess(self, main_q, rslt_q, loader)
            prc.start()

            self._workers.append((prc, main_q, rslt_q))

        self._result_prc = ResultProcess(self._final_q, self._workers)
        self._result_prc.start()
Пример #9
0
    def _initialize_processes(self, num):
        self._workers = []

        for i in xrange(num):
            main_q = multiprocessing.Queue()
            rslt_q = multiprocessing.Queue()

            prc = WorkerProcess(self, main_q, rslt_q, self._loader)
            prc.start()

            self._workers.append((prc, main_q, rslt_q))

        self._result_prc = ResultProcess(self._final_q, self._workers)
        self._result_prc.start()
Пример #10
0
    def __init__(self, inventory, callback, variable_manager, loader, options):

        self._inventory        = inventory
        self._variable_manager = variable_manager
        self._loader           = loader
        self._options          = options

        # a special flag to help us exit cleanly
        self._terminated = False

        # this dictionary is used to keep track of notified handlers
        self._notified_handlers = dict()

        # dictionaries to keep track of failed/unreachable hosts
        self._failed_hosts      = dict()
        self._unreachable_hosts = dict()

        self._final_q = multiprocessing.Queue()

        # FIXME: hard-coded the default callback plugin here, which
        #        should be configurable.
        self._callback = callback_loader.get(callback)

        # create the pool of worker threads, based on the number of forks specified
        try:
            fileno = sys.stdin.fileno()
        except ValueError:
            fileno = None

        self._workers = []
        for i in range(self._options.forks):
            # duplicate stdin, if possible
            new_stdin = None
            if fileno is not None:
                try:
                    new_stdin = os.fdopen(os.dup(fileno))
                except OSError, e:
                    # couldn't dupe stdin, most likely because it's
                    # not a valid file descriptor, so we just rely on
                    # using the one that was passed in
                    pass

            main_q = multiprocessing.Queue()
            rslt_q = multiprocessing.Queue()

            prc = WorkerProcess(self, main_q, rslt_q, loader, new_stdin)
            prc.start()

            self._workers.append((prc, main_q, rslt_q))
Пример #11
0
    def __init__(self, inventory, variable_manager, loader, display, options, passwords, stdout_callback=None):

        self._inventory        = inventory
        self._variable_manager = variable_manager
        self._loader           = loader
        self._display          = display
        self._options          = options
        self._stats            = AggregateStats()
        self.passwords         = passwords

        # a special flag to help us exit cleanly
        self._terminated = False

        # this dictionary is used to keep track of notified handlers
        self._notified_handlers = dict()

        # dictionaries to keep track of failed/unreachable hosts
        self._failed_hosts      = dict()
        self._unreachable_hosts = dict()

        self._final_q = multiprocessing.Queue()

        # load callback plugins
        self._callback_plugins = self._load_callbacks(stdout_callback)

        # create the pool of worker threads, based on the number of forks specified
        try:
            fileno = sys.stdin.fileno()
        except ValueError:
            fileno = None

        self._workers = []
        for i in range(self._options.forks):
            main_q = multiprocessing.Queue()
            rslt_q = multiprocessing.Queue()

            prc = WorkerProcess(self, main_q, rslt_q, loader)
            prc.start()

            self._workers.append((prc, main_q, rslt_q))

        self._result_prc = ResultProcess(self._final_q, self._workers)
        self._result_prc.start()
Пример #12
0
    def _queue_task(self, host, task, task_vars, play_context):
        ''' handles queueing the task up to be sent to a worker '''

        display.debug("entering _queue_task() for %s/%s" % (host, task))

        task_vars['hostvars'] = self._tqm.hostvars
        # and then queue the new task
        display.debug("%s - putting task (%s) in queue" % (host, task))
        try:
            display.debug("worker is %d (out of %d available)" %
                          (self._cur_worker + 1, len(self._workers)))

            # create a dummy object with plugin loaders set as an easier
            # way to share them with the forked processes
            shared_loader_obj = SharedPluginLoaderObj()

            queued = False
            while True:
                (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
                if worker_prc is None or not worker_prc.is_alive():
                    worker_prc = WorkerProcess(rslt_q, task_vars, host, task,
                                               play_context, self._loader,
                                               self._variable_manager,
                                               shared_loader_obj)
                    self._workers[self._cur_worker][0] = worker_prc
                    worker_prc.start()
                    queued = True
                self._cur_worker += 1
                if self._cur_worker >= len(self._workers):
                    self._cur_worker = 0
                    time.sleep(0.0001)
                if queued:
                    break

            del task_vars
            self._pending_results += 1
        except (EOFError, IOError, AssertionError) as e:
            # most likely an abort
            display.debug("got an error while queuing: %s" % e)
            return
        display.debug("exiting _queue_task() for %s/%s" % (host, task))
Пример #13
0
    def _queue_task(self, host, task, task_vars, play_context):
        ''' handles queueing the task up to be sent to a worker '''

        display.debug("entering _queue_task() for %s/%s" % (host, task))

        task_vars['hostvars'] = self._tqm.hostvars
        # and then queue the new task
        display.debug("%s - putting task (%s) in queue" % (host, task))
        try:
            display.debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers)))

            # create a dummy object with plugin loaders set as an easier
            # way to share them with the forked processes
            shared_loader_obj = SharedPluginLoaderObj()

            queued = False
            while True:
                (worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
                if worker_prc is None or not worker_prc.is_alive():
                    worker_prc = WorkerProcess(rslt_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj)
                    self._workers[self._cur_worker][0] = worker_prc
                    worker_prc.start()
                    queued = True
                self._cur_worker += 1
                if self._cur_worker >= len(self._workers):
                    self._cur_worker = 0
                    time.sleep(0.0001)
                if queued:
                    break

            del task_vars
            self._pending_results += 1
        except (EOFError, IOError, AssertionError) as e:
            # most likely an abort
            display.debug("got an error while queuing: %s" % e)
            return
        display.debug("exiting _queue_task() for %s/%s" % (host, task))