def _default_executor_factory(self):
     max_simultaneous_jobs = self._max_simultaneous_jobs
     if max_simultaneous_jobs <= 0:
         max_workers = tu.get_optimal_thread_count()
     else:
         max_workers = max_simultaneous_jobs
     return futurist.ThreadPoolExecutor(max_workers=max_workers)
Example #2
0
 def start(self):
     if self._create_executor:
         if self._max_workers is not None:
             max_workers = self._max_workers
         else:
             max_workers = threading_utils.get_optimal_thread_count()
         self._executor = futures.ThreadPoolExecutor(max_workers)
Example #3
0
 def start(self):
     if self._own_executor:
         if self._max_workers is not None:
             max_workers = self._max_workers
         else:
             max_workers = threading_utils.get_optimal_thread_count()
         self._executor = self._create_executor(max_workers=max_workers)
Example #4
0
 def __init__(self,
              exchange,
              topic,
              tasks,
              executor=None,
              threads_count=None,
              url=None,
              transport=None,
              transport_options=None,
              retry_options=None):
     self._topic = topic
     self._executor = executor
     self._owns_executor = False
     self._threads_count = -1
     if self._executor is None:
         if threads_count is not None:
             self._threads_count = int(threads_count)
         else:
             self._threads_count = tu.get_optimal_thread_count()
         self._executor = futures.ThreadPoolExecutor(self._threads_count)
         self._owns_executor = True
     self._endpoints = self._derive_endpoints(tasks)
     self._exchange = exchange
     self._server = server.Server(topic,
                                  exchange,
                                  self._executor,
                                  self._endpoints,
                                  url=url,
                                  transport=transport,
                                  transport_options=transport_options,
                                  retry_options=retry_options)
Example #5
0
 def _default_executor_factory(self):
     max_simultaneous_jobs = self._max_simultaneous_jobs
     if max_simultaneous_jobs <= 0:
         max_workers = tu.get_optimal_thread_count()
     else:
         max_workers = max_simultaneous_jobs
     return futurist.ThreadPoolExecutor(max_workers=max_workers)
Example #6
0
 def __init__(self, exchange, topic, tasks, executor=None, **kwargs):
     self._topic = topic
     self._executor = executor
     self._threads_count = kwargs.pop("threads_count", tu.get_optimal_thread_count())
     if self._executor is None:
         self._executor = futures.ThreadPoolExecutor(self._threads_count)
     self._endpoints = self._derive_endpoints(tasks)
     self._server = server.Server(topic, exchange, self._executor, self._endpoints, **kwargs)
Example #7
0
class NonBlockingConductor(impl_executor.ExecutorConductor):
    """Non-blocking conductor that processes job(s) using a thread executor.

    NOTE(harlowja): A custom executor factory can be provided via keyword
                    argument ``executor_factory``, if provided it will be
                    invoked at
                    :py:meth:`~taskflow.conductors.base.Conductor.run` time
                    with one positional argument (this conductor) and it must
                    return a compatible `executor`_ which can be used
                    to submit jobs to. If ``None`` is a provided a thread pool
                    backed executor is selected by default (it will have
                    an equivalent number of workers as this conductors
                    simultaneous job count).

    .. _executor: https://docs.python.org/dev/library/\
                  concurrent.futures.html#executor-objects
    """

    MAX_SIMULTANEOUS_JOBS = tu.get_optimal_thread_count()
    """
    Default maximum number of jobs that can be in progress at the same time.
    """
    def _default_executor_factory(self):
        max_simultaneous_jobs = self._max_simultaneous_jobs
        if max_simultaneous_jobs <= 0:
            max_workers = tu.get_optimal_thread_count()
        else:
            max_workers = max_simultaneous_jobs
        return futurist.ThreadPoolExecutor(max_workers=max_workers)

    def __init__(self,
                 name,
                 jobboard,
                 persistence=None,
                 engine=None,
                 engine_options=None,
                 wait_timeout=None,
                 log=None,
                 max_simultaneous_jobs=MAX_SIMULTANEOUS_JOBS,
                 executor_factory=None):
        super(NonBlockingConductor,
              self).__init__(name,
                             jobboard,
                             persistence=persistence,
                             engine=engine,
                             engine_options=engine_options,
                             wait_timeout=wait_timeout,
                             log=log,
                             max_simultaneous_jobs=max_simultaneous_jobs)
        if executor_factory is None:
            self._executor_factory = self._default_executor_factory
        else:
            if not six.callable(executor_factory):
                raise ValueError("Provided keyword argument 'executor_factory'"
                                 " must be callable")
            self._executor_factory = executor_factory
Example #8
0
 def __init__(self, exchange, topic, tasks, executor=None, **kwargs):
     self._topic = topic
     self._executor = executor
     self._threads_count = kwargs.pop('threads_count',
                                      tu.get_optimal_thread_count())
     if self._executor is None:
         self._executor = futures.ThreadPoolExecutor(self._threads_count)
     self._endpoints = self._derive_endpoints(tasks)
     self._server = server.Server(topic, exchange, self._executor,
                                  self._endpoints, **kwargs)
Example #9
0
 def __init__(self, max_workers=None):
     if max_workers is None:
         max_workers = tu.get_optimal_thread_count()
     super(ProcessPoolExecutor, self).__init__(max_workers=max_workers)
     if self._max_workers <= 0:
         raise ValueError("Max workers must be greater than zero")
     self._gatherer = _Gatherer(
         # Since our submit will use this gatherer we have to reference
         # the parent submit, bound to this instance (which is what we
         # really want to use anyway).
         super(ProcessPoolExecutor, self).submit)
Example #10
0
 def __init__(self, max_workers=None):
     if max_workers is None:
         max_workers = tu.get_optimal_thread_count()
     super(ThreadPoolExecutor, self).__init__(max_workers=max_workers)
     if self._max_workers <= 0:
         raise ValueError("Max workers must be greater than zero")
     self._gatherer = _Gatherer(
         # Since our submit will use this gatherer we have to reference
         # the parent submit, bound to this instance (which is what we
         # really want to use anyway).
         super(ThreadPoolExecutor, self).submit)
Example #11
0
 def __init__(self, exchange, topic, tasks, executor=None, **kwargs):
     self._topic = topic
     self._executor = executor
     self._owns_executor = False
     self._threads_count = -1
     if self._executor is None:
         if 'threads_count' in kwargs:
             self._threads_count = int(kwargs.pop('threads_count'))
             if self._threads_count <= 0:
                 raise ValueError("threads_count provided must be > 0")
         else:
             self._threads_count = tu.get_optimal_thread_count()
         self._executor = futures.ThreadPoolExecutor(self._threads_count)
         self._owns_executor = True
     self._endpoints = self._derive_endpoints(tasks)
     self._server = server.Server(topic, exchange, self._executor,
                                  self._endpoints, **kwargs)
Example #12
0
 def __init__(self, exchange, topic, tasks, executor=None, **kwargs):
     self._topic = topic
     self._executor = executor
     self._owns_executor = False
     self._threads_count = -1
     if self._executor is None:
         if 'threads_count' in kwargs:
             self._threads_count = int(kwargs.pop('threads_count'))
             if self._threads_count <= 0:
                 raise ValueError("threads_count provided must be > 0")
         else:
             self._threads_count = tu.get_optimal_thread_count()
         self._executor = futures.ThreadPoolExecutor(self._threads_count)
         self._owns_executor = True
     self._endpoints = self._derive_endpoints(tasks)
     self._exchange = exchange
     self._server = server.Server(topic, exchange, self._executor,
                                  self._endpoints, **kwargs)
Example #13
0
    def run(self):
        if self._executor is None:
            # NOTE(harlowja): since no executor was provided we have to create
            # one, and also ensure that we shutdown the one we create to
            # ensure that we don't leak threads.
            thread_count = threading_utils.get_optimal_thread_count()
            self._executor = futures.ThreadPoolExecutor(thread_count)
            owns_executor = True
        else:
            owns_executor = False

        try:
            ActionEngine.run(self)
        finally:
            # Don't forget to shutdown the executor!!
            if owns_executor:
                try:
                    self._executor.shutdown(wait=True)
                finally:
                    self._executor = None
Example #14
0
    def run(self):
        if self._executor is None:
            # NOTE(harlowja): since no executor was provided we have to create
            # one, and also ensure that we shutdown the one we create to
            # ensure that we don't leak threads.
            thread_count = threading_utils.get_optimal_thread_count()
            self._executor = futures.ThreadPoolExecutor(thread_count)
            owns_executor = True
        else:
            owns_executor = False

        try:
            ActionEngine.run(self)
        finally:
            # Don't forget to shutdown the executor!!
            if owns_executor:
                try:
                    self._executor.shutdown(wait=True)
                finally:
                    self._executor = None
Example #15
0
 def __init__(self, exchange, topic, tasks,
              executor=None, threads_count=None, url=None,
              transport=None, transport_options=None,
              retry_options=None):
     self._topic = topic
     self._executor = executor
     self._owns_executor = False
     self._threads_count = -1
     if self._executor is None:
         if threads_count is not None:
             self._threads_count = int(threads_count)
         else:
             self._threads_count = tu.get_optimal_thread_count()
         self._executor = futures.ThreadPoolExecutor(self._threads_count)
         self._owns_executor = True
     self._endpoints = self._derive_endpoints(tasks)
     self._exchange = exchange
     self._server = server.Server(topic, exchange, self._executor,
                                  self._endpoints, url=url,
                                  transport=transport,
                                  transport_options=transport_options,
                                  retry_options=retry_options)
Example #16
0
 def __init__(self, max_workers=None):
     if max_workers is None:
         max_workers = tu.get_optimal_thread_count()
     super(ProcessPoolExecutor, self).__init__(max_workers=max_workers)
     if self._max_workers <= 0:
         raise ValueError("Max workers must be greater than zero")
Example #17
0
 def start(self):
     if self._own_executor:
         thread_count = threading_utils.get_optimal_thread_count()
         self._executor = futures.ThreadPoolExecutor(thread_count)
Example #18
0
 def start(self):
     if self._own_executor:
         thread_count = threading_utils.get_optimal_thread_count()
         self._executor = futures.ThreadPoolExecutor(thread_count)
Example #19
0
 def __init__(self, max_workers=None):
     if max_workers is None:
         max_workers = tu.get_optimal_thread_count()
     super(ProcessPoolExecutor, self).__init__(max_workers=max_workers)
     if self._max_workers <= 0:
         raise ValueError("Max workers must be greater than zero")