Beispiel #1
0
 def _make_engine(self, flow, flow_detail=None, executor=None):
     if executor is None:
         executor = eu.GreenExecutor()
     engine_conf = dict(engine='parallel', executor=executor)
     return taskflow.engines.load(flow,
                                  flow_detail=flow_detail,
                                  engine_conf=engine_conf,
                                  backend=self.backend)
Beispiel #2
0
    def test_exception_transfer(self):
        def blowup():
            raise IOError("Broke!")

        with eu.GreenExecutor(2) as e:
            f = e.submit(blowup)

        self.assertRaises(IOError, f.result)
Beispiel #3
0
    def test_func_calls(self):
        called = collections.defaultdict(int)

        with eu.GreenExecutor(2) as e:
            for f in self.make_funcs(called, 2):
                e.submit(f)

        self.assertEquals(1, called[0])
        self.assertEquals(1, called[1])
Beispiel #4
0
    def test_result_callback(self):
        called = collections.defaultdict(int)

        def call_back(future):
            called[future] += 1

        funcs = list(self.make_funcs(called, 1))
        with eu.GreenExecutor(2) as e:
            f = e.submit(funcs[0])
            f.add_done_callback(call_back)

        self.assertEquals(2, len(called))
Beispiel #5
0
    def test_result_transfer(self):
        def return_given(given):
            return given

        create_am = 50
        with eu.GreenExecutor(2) as e:
            futures = []
            for i in range(0, create_am):
                futures.append(e.submit(functools.partial(return_given, i)))

        self.assertEquals(create_am, len(futures))
        for i in range(0, create_am):
            result = futures[i].result()
            self.assertEquals(i, result)
Beispiel #6
0
    def test_func_cancellation(self):
        called = collections.defaultdict(int)

        futures = []
        with eu.GreenExecutor(2) as e:
            for func in self.make_funcs(called, 2):
                futures.append(e.submit(func))
            # Greenthreads don't start executing until we wait for them
            # to, since nothing here does IO, this will work out correctly.
            #
            # If something here did a blocking call, then eventlet could swap
            # one of the executors threads in, but nothing in this test does.
            for f in futures:
                self.assertFalse(f.running())
                f.cancel()

        self.assertEquals(0, len(called))
        for f in futures:
            self.assertTrue(f.cancelled())
            self.assertTrue(f.done())
Beispiel #7
0
# Setup the persistence & resumption layer.
backend = get_backend()
try:
    book_id, flow_id = sys.argv[2].split("+", 1)
    if not uuidutils.is_uuid_like(book_id):
        book_id = None
    if not uuidutils.is_uuid_like(flow_id):
        flow_id = None
except (IndexError, ValueError):
    book_id = None
    flow_id = None

# Set up how we want our engine to run, serial, parallel...
engine_conf = {
    'engine': 'parallel',
    'executor': e_utils.GreenExecutor(5),
}

# Create/fetch a logbook that will track the workflows work.
book = None
flow_detail = None
if all([book_id, flow_id]):
    with contextlib.closing(backend.get_connection()) as conn:
        try:
            book = conn.get_logbook(book_id)
            flow_detail = book.find(flow_id)
        except exc.NotFound:
            pass
if book is None and flow_detail is None:
    book = p_utils.temporary_log_book(backend)
    engine = engines.load_from_factory(create_flow,
Beispiel #8
0
    try:
        book_id, flow_id = sys.argv[2].split("+", 1)
        if not uuidutils.is_uuid_like(book_id):
            book_id = None
        if not uuidutils.is_uuid_like(flow_id):
            flow_id = None
    except (IndexError, ValueError):
        book_id = None
        flow_id = None

    # Set up how we want our engine to run, serial, parallel...
    engine_conf = {
        'engine': 'parallel',
    }
    if e_utils.EVENTLET_AVAILABLE:
        engine_conf['executor'] = e_utils.GreenExecutor(5)

    # Create/fetch a logbook that will track the workflows work.
    book = None
    flow_detail = None
    if all([book_id, flow_id]):
        with contextlib.closing(backend.get_connection()) as conn:
            try:
                book = conn.get_logbook(book_id)
                flow_detail = book.find(flow_id)
            except exc.NotFound:
                pass
    if book is None and flow_detail is None:
        book = p_utils.temporary_log_book(backend)
        engine = engines.load_from_factory(create_flow,
                                           backend=backend,
Beispiel #9
0
    # creating a flow detail (where flow and task state is stored). The
    # combination of these 2 objects unique ids (uuids) allows the users of
    # taskflow to reassociate the workflows that were potentially running (and
    # which may have partially completed) back with taskflow so that those
    # workflows can be resumed (or reverted) after a process/thread/engine
    # has failed in someway.
    logbook = p_utils.temporary_log_book(backend)
    flow_detail = p_utils.create_flow_detail(flow, logbook, backend)
    print("!! Your tracking id is: '%s+%s'" % (logbook.uuid, flow_detail.uuid))
    print("!! Please submit this on later runs for tracking purposes")
else:
    flow_detail = find_flow_detail(backend, book_id, flow_id)

# Annnnd load and run.
engine = engines.load(flow,
                      flow_detail=flow_detail,
                      backend=backend,
                      engine_conf={
                          'engine': 'parallel',
                          'executor': e_utils.GreenExecutor(10),
                      })
engine.run()

# How to use.
#
# 1. $ python me.py "sqlite:////tmp/cinder.db"
# 2. ctrl-c before this finishes
# 3. Find the tracking id (search for 'Your tracking id is')
# 4. $ python me.py "sqlite:////tmp/cinder.db" "$tracking_id"
# 5. Profit!