예제 #1
0
 def start(self):
     if self._create_executor:
         if self._max_workers is not None:
             max_workers = self._max_workers
         else:
             max_workers = threading_utils.get_optimal_thread_count()
         self._executor = futures.ThreadPoolExecutor(max_workers)
예제 #2
0
 def __init__(self,
              exchange,
              topic,
              tasks,
              executor=None,
              threads_count=None,
              url=None,
              transport=None,
              transport_options=None,
              retry_options=None):
     self._topic = topic
     self._executor = executor
     self._owns_executor = False
     self._threads_count = -1
     if self._executor is None:
         if threads_count is not None:
             self._threads_count = int(threads_count)
         else:
             self._threads_count = tu.get_optimal_thread_count()
         self._executor = futures.ThreadPoolExecutor(self._threads_count)
         self._owns_executor = True
     self._endpoints = self._derive_endpoints(tasks)
     self._exchange = exchange
     self._server = server.Server(topic,
                                  exchange,
                                  self._executor,
                                  self._endpoints,
                                  url=url,
                                  transport=transport,
                                  transport_options=transport_options,
                                  retry_options=retry_options)
예제 #3
0
 def test_using_common_executor(self):
     flow = utils.TaskNoRequiresNoReturns(name='task1')
     executor = futures.ThreadPoolExecutor(self._EXECUTOR_WORKERS)
     try:
         e1 = self._make_engine(flow, executor=executor)
         e2 = self._make_engine(flow, executor=executor)
         self.assertIs(e1.options['executor'], e2.options['executor'])
     finally:
         executor.shutdown(wait=True)
예제 #4
0
 def _executor(self):
     if CONF.taskflow_executor.engine_mode != 'parallel':
         yield None
     else:
         max_workers = CONF.taskflow_executor.max_workers
         if eventlet_utils.EVENTLET_AVAILABLE:
             yield futures.GreenThreadPoolExecutor(max_workers=max_workers)
         else:
             yield futures.ThreadPoolExecutor(max_workers=max_workers)
예제 #5
0
def main():
    if len(sys.argv) == 2:
        tbl = []
        with open(sys.argv[1], 'rb') as fh:
            reader = csv.reader(fh)
            for row in reader:
                tbl.append([float(r) if r else 0.0 for r in row])
    else:
        # Make some random table out of thin air...
        tbl = []
        cols = random.randint(1, 100)
        rows = random.randint(1, 100)
        for _i in compat_range(0, rows):
            row = []
            for _j in compat_range(0, cols):
                row.append(random.random())
            tbl.append(row)

    # Generate the work to be done.
    f = make_flow(tbl)

    # Now run it (using the specified executor)...
    if eventlet_utils.EVENTLET_AVAILABLE:
        executor = futures.GreenThreadPoolExecutor(max_workers=5)
    else:
        executor = futures.ThreadPoolExecutor(max_workers=5)
    try:
        e = engines.load(f, engine='parallel', executor=executor)
        for st in e.run_iter():
            print(st)
    finally:
        executor.shutdown()

    # Find the old rows and put them into place...
    #
    # TODO(harlowja): probably easier just to sort instead of search...
    computed_tbl = []
    for i in compat_range(0, len(tbl)):
        for t in f:
            if t.index == i:
                computed_tbl.append(e.storage.get(t.name))

    # Do some basic validation (which causes the return code of this process
    # to be different if things were not as expected...)
    if len(computed_tbl) != len(tbl):
        return 1
    else:
        return 0
예제 #6
0
 def _make_executor(self, max_workers):
     return futures.ThreadPoolExecutor(max_workers=max_workers)
예제 #7
0
    def execute(self):
        print("Running '%s' in thread '%s'" % (self.name, tu.get_ident()))
        time.sleep(self._wait_for)


f1 = uf.Flow("f1")
f1.add(DelayedTask("f1-1"))
f1.add(DelayedTask("f1-2"))

f2 = uf.Flow("f2")
f2.add(DelayedTask("f2-1"))
f2.add(DelayedTask("f2-2"))

# Run them all using the same futures (thread-pool based) executor...
with futures.ThreadPoolExecutor() as ex:
    e1 = engines.load(f1, engine='parallel', executor=ex)
    e2 = engines.load(f2, engine='parallel', executor=ex)
    iters = [e1.run_iter(), e2.run_iter()]
    # Iterate over a copy (so we can remove from the source list).
    cloned_iters = list(iters)
    while iters:
        # Run a single 'step' of each iterator, forcing each engine to perform
        # some work, then yield, and repeat until each iterator is consumed
        # and there is no more engine work to be done.
        for it in cloned_iters:
            try:
                six.next(it)
            except StopIteration:
                try:
                    iters.remove(it)
예제 #8
0
 def test_thread_executor_creation(self):
     with futures.ThreadPoolExecutor(1) as e:
         eng = self._create_engine(executor=e)
         self.assertIsInstance(eng._task_executor,
                               executor.ParallelThreadTaskExecutor)
예제 #9
0
# The composition starts with the conductor and then runs in sequence with
# the chorus running in parallel, but no matter what the 'hello' chorus must
# always run before the 'world' chorus (otherwise the world will fall apart).
song.add(
    PrinterTask("conductor@begin",
                show_name=False,
                inject={'output': "*ding*"}), hi_chorus, world_chorus,
    PrinterTask("conductor@end", show_name=False, inject={'output': "*dong*"}))

# Run in parallel using eventlet green threads...
if eventlet_utils.EVENTLET_AVAILABLE:
    with futures.GreenThreadPoolExecutor() as executor:
        e = engines.load(song, executor=executor, engine='parallel')
        e.run()

# Run in parallel using real threads...
with futures.ThreadPoolExecutor(max_workers=1) as executor:
    e = engines.load(song, executor=executor, engine='parallel')
    e.run()

# Run in parallel using external processes...
with futures.ProcessPoolExecutor(max_workers=1) as executor:
    e = engines.load(song, executor=executor, engine='parallel')
    e.run()

# Run serially (aka, if the workflow could have been ran in parallel, it will
# not be when ran in this mode)...
e = engines.load(song, engine='serial')
e.run()
예제 #10
0
 def _create_executor(self, max_workers=None):
     return futures.ThreadPoolExecutor(max_workers=max_workers)