示例#1
0
    def test_retry_detail_save_with_task_failure(self):
        lb_id = uuidutils.generate_uuid()
        lb_name = 'lb-%s' % (lb_id)
        lb = models.LogBook(name=lb_name, uuid=lb_id)
        fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid())
        lb.add(fd)
        rd = models.RetryDetail("retry-1", uuid=uuidutils.generate_uuid())
        fail = failure.Failure.from_exception(RuntimeError('fail'))
        rd.results.append((42, {'some-task': fail}))
        fd.add(rd)

        # save it
        with contextlib.closing(self._get_connection()) as conn:
            conn.save_logbook(lb)
            conn.update_flow_details(fd)
            conn.update_atom_details(rd)

        # now read it back
        with contextlib.closing(self._get_connection()) as conn:
            lb2 = conn.get_logbook(lb_id)
        fd2 = lb2.find(fd.uuid)
        rd2 = fd2.find(rd.uuid)
        self.assertIsInstance(rd2, models.RetryDetail)
        fail2 = rd2.results[0][1].get('some-task')
        self.assertIsInstance(fail2, failure.Failure)
        self.assertTrue(fail.matches(fail2))
示例#2
0
    def test_retry_detail_save_intention(self):
        lb_id = uuidutils.generate_uuid()
        lb_name = 'lb-%s' % (lb_id)
        lb = models.LogBook(name=lb_name, uuid=lb_id)
        fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid())
        lb.add(fd)
        rd = models.RetryDetail("retry-1", uuid=uuidutils.generate_uuid())
        fd.add(rd)

        # save it
        with contextlib.closing(self._get_connection()) as conn:
            conn.save_logbook(lb)
            conn.update_flow_details(fd)
            conn.update_atom_details(rd)

        # change intention and save
        rd.intention = states.REVERT
        with contextlib.closing(self._get_connection()) as conn:
            conn.update_atom_details(rd)

        # now read it back
        with contextlib.closing(self._get_connection()) as conn:
            lb2 = conn.get_logbook(lb_id)
        fd2 = lb2.find(fd.uuid)
        rd2 = fd2.find(rd.uuid)
        self.assertEqual(rd2.intention, states.REVERT)
        self.assertIsInstance(rd2, models.RetryDetail)
示例#3
0
 def test_logbook_merge_flow_detail(self):
     lb_id = uuidutils.generate_uuid()
     lb_name = 'lb-%s' % (lb_id)
     lb = models.LogBook(name=lb_name, uuid=lb_id)
     fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid())
     lb.add(fd)
     with contextlib.closing(self._get_connection()) as conn:
         conn.save_logbook(lb)
     lb2 = models.LogBook(name=lb_name, uuid=lb_id)
     fd2 = models.FlowDetail('test2', uuid=uuidutils.generate_uuid())
     lb2.add(fd2)
     with contextlib.closing(self._get_connection()) as conn:
         conn.save_logbook(lb2)
     with contextlib.closing(self._get_connection()) as conn:
         lb3 = conn.get_logbook(lb_id)
         self.assertEqual(2, len(lb3))
示例#4
0
 def test_logbook_add_task_detail(self):
     lb_id = uuidutils.generate_uuid()
     lb_name = 'lb-%s' % (lb_id)
     lb = models.LogBook(name=lb_name, uuid=lb_id)
     fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid())
     td = models.TaskDetail("detail-1", uuid=uuidutils.generate_uuid())
     td.version = '4.2'
     fd.add(td)
     lb.add(fd)
     with contextlib.closing(self._get_connection()) as conn:
         conn.save_logbook(lb)
     with contextlib.closing(self._get_connection()) as conn:
         lb2 = conn.get_logbook(lb_id)
         self.assertEqual(1, len(lb2))
         tasks = 0
         for fd in lb:
             tasks += len(fd)
         self.assertEqual(1, tasks)
     with contextlib.closing(self._get_connection()) as conn:
         lb2 = conn.get_logbook(lb_id)
         fd2 = lb2.find(fd.uuid)
         td2 = fd2.find(td.uuid)
         self.assertIsNot(td2, None)
         self.assertEqual(td2.name, 'detail-1')
         self.assertEqual(td2.version, '4.2')
         self.assertEqual(td2.intention, states.EXECUTE)
 def test_flow_name_uuid_and_meta(self):
     flow_detail = models.FlowDetail(name='test-fd', uuid='aaaa')
     flow_detail.meta = {'a': 1}
     s = self._get_storage(flow_detail)
     self.assertEqual('test-fd', s.flow_name)
     self.assertEqual('aaaa', s.flow_uuid)
     self.assertEqual({'a': 1}, s.flow_meta)
示例#6
0
    def test_task_detail_meta_update(self):
        lb_id = uuidutils.generate_uuid()
        lb_name = 'lb-%s' % (lb_id)
        lb = models.LogBook(name=lb_name, uuid=lb_id)
        fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid())
        lb.add(fd)
        td = models.TaskDetail("detail-1", uuid=uuidutils.generate_uuid())
        td.meta = {'test': 42}
        fd.add(td)

        with contextlib.closing(self._get_connection()) as conn:
            conn.save_logbook(lb)
            conn.update_flow_details(fd)
            conn.update_atom_details(td)

        td.meta['test'] = 43
        with contextlib.closing(self._get_connection()) as conn:
            conn.update_atom_details(td)

        with contextlib.closing(self._get_connection()) as conn:
            lb2 = conn.get_logbook(lb_id)
        fd2 = lb2.find(fd.uuid)
        td2 = fd2.find(td.uuid)
        self.assertEqual(td2.meta.get('test'), 43)
        self.assertIsInstance(td2, models.TaskDetail)
示例#7
0
    def test_task_detail_with_failure(self):
        lb_id = uuidutils.generate_uuid()
        lb_name = 'lb-%s' % (lb_id)
        lb = models.LogBook(name=lb_name, uuid=lb_id)
        fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid())
        lb.add(fd)
        td = models.TaskDetail("detail-1", uuid=uuidutils.generate_uuid())

        try:
            raise RuntimeError('Woot!')
        except Exception:
            td.failure = failure.Failure()

        fd.add(td)

        with contextlib.closing(self._get_connection()) as conn:
            conn.save_logbook(lb)
            conn.update_flow_details(fd)
            conn.update_atom_details(td)

        # Read failure back
        with contextlib.closing(self._get_connection()) as conn:
            lb2 = conn.get_logbook(lb_id)
        fd2 = lb2.find(fd.uuid)
        td2 = fd2.find(td.uuid)
        self.assertEqual(td2.failure.exception_str, 'Woot!')
        self.assertIs(td2.failure.check(RuntimeError), RuntimeError)
        self.assertEqual(td2.failure.traceback_str, td.failure.traceback_str)
        self.assertIsInstance(td2, models.TaskDetail)
示例#8
0
def taskflow_base_worker(arg):
    while True:
        self = _taskflow_queue.get()
        try:
            flow = invt_taskflow_factory[self.category]()
            book = models.LogBook('logbook-%s' % (self.category))
            flow_detail = models.FlowDetail('flowdetail-%s' % (self.category),
                                            str(uuid.uuid4()))
            book.add(flow_detail)
            with contextlib.closing(
                    _taskflow_backend.get_connection()) as conn:
                conn.save_logbook(book)
            self.book_id = book.uuid
            self.flow_id = flow_detail.uuid
            #
            #todo:optimize the engine execution process later
            #use a parallel engine instead of a serial one
            #and may be we could share a executor, please refer to
            #https://docs.openstack.org/taskflow/latest/user/examples.html#sharing-a-thread-pool-executor-in-parallel
            self.engine = engines.load(flow,
                                       backend=_taskflow_backend,
                                       flow_detail=flow_detail,
                                       book=book,
                                       store=self.store)
            self.engine.run()
            self.schedule_status = E3TASKFLOW_SCHEDULE_STATUS_SUCCESSFUL
            if self.callback:
                self.callback(self)
        except Exception as e:
            self.failure = str(traceback.format_exc())
            self.schedule_status = E3TASKFLOW_SCHEDULE_STATUS_FAILED
            if self.callback:
                self.callback(self, e)
        finally:
            self.sync_state()
示例#9
0
文件: services.py 项目: yunhaia/poppy
    def submit_task(self, flow_factory, **kwargs):
        """submit a task.

        """
        with self.persistence as persistence:

            with self.driver.job_board(self.jobboard_backend_conf.copy(),
                                       persistence=persistence) as board:

                job_id = uuidutils.generate_uuid()
                job_name = '-'.join([flow_factory.__name__, job_id])
                job_logbook = models.LogBook(job_name)
                flow_detail = models.FlowDetail(job_name,
                                                uuidutils.generate_uuid())
                factory_args = ()
                factory_kwargs = {}
                engines.save_factory_details(flow_detail, flow_factory,
                                             factory_args, factory_kwargs)
                job_logbook.add(flow_detail)
                persistence.get_connection().save_logbook(job_logbook)
                job_details = {'store': kwargs}
                job = board.post(job_name,
                                 book=job_logbook,
                                 details=job_details)
                LOG.info("{0} posted".format(job))
示例#10
0
def run_poster():
    # This just posts a single job and then ends...
    print("Starting poster with pid: %s" % ME)
    my_name = "poster-%s" % ME
    persist_backend = persistence_backends.fetch(PERSISTENCE_URI)
    with contextlib.closing(persist_backend):
        with contextlib.closing(persist_backend.get_connection()) as conn:
            conn.upgrade()
        job_backend = job_backends.fetch(my_name,
                                         JB_CONF,
                                         persistence=persist_backend)
        job_backend.connect()
        with contextlib.closing(job_backend):
            # Create information in the persistence backend about the
            # unit of work we want to complete and the factory that
            # can be called to create the tasks that the work unit needs
            # to be done.
            lb = models.LogBook("post-from-%s" % my_name)
            fd = models.FlowDetail("song-from-%s" % my_name,
                                   uuidutils.generate_uuid())
            lb.add(fd)
            with contextlib.closing(persist_backend.get_connection()) as conn:
                conn.save_logbook(lb)
            engines.save_factory_details(fd,
                                         make_bottles, [HOW_MANY_BOTTLES], {},
                                         backend=persist_backend)
            # Post, and be done with it!
            jb = job_backend.post("song-from-%s" % my_name, book=lb)
            print("Posted: %s" % jb)
            print("Goodbye...")
示例#11
0
    def test_flow_detail_update_not_existing(self):
        lb_id = uuidutils.generate_uuid()
        lb_name = 'lb-%s' % (lb_id)
        lb = models.LogBook(name=lb_name, uuid=lb_id)
        fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid())
        lb.add(fd)
        with contextlib.closing(self._get_connection()) as conn:
            conn.save_logbook(lb)

        fd2 = models.FlowDetail('test-2', uuid=uuidutils.generate_uuid())
        lb.add(fd2)
        with contextlib.closing(self._get_connection()) as conn:
            conn.save_logbook(lb)

        with contextlib.closing(self._get_connection()) as conn:
            lb2 = conn.get_logbook(lb.uuid)
        self.assertIsNotNone(lb2.find(fd.uuid))
        self.assertIsNotNone(lb2.find(fd2.uuid))
示例#12
0
def compose_flow_detail(store=None):
    """
    Compose a flow detail for a logbook
    :param dict store: The store to provide to the flow from the injector
    :return obj flow_detail: The composed flow detail
    """
    flow_detail = persistence_models.FlowDetail(
        "flow_from_{}".format(CONDUCTOR_NAME), uuid=str(uuid4()))
    flow_detail.meta.update({'store': store or dict()})
    return flow_detail
示例#13
0
 def test_logbook_lazy_fetch(self):
     lb_id = uuidutils.generate_uuid()
     lb_name = 'lb-%s' % (lb_id)
     lb = models.LogBook(name=lb_name, uuid=lb_id)
     fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid())
     lb.add(fd)
     with contextlib.closing(self._get_connection()) as conn:
         conn.save_logbook(lb)
     with contextlib.closing(self._get_connection()) as conn:
         lb2 = conn.get_logbook(lb_id, lazy=True)
         self.assertEqual(0, len(lb2))
         self.assertEqual(1, len(lb))
示例#14
0
 def test_logbook_add_flow_detail(self):
     lb_id = uuidutils.generate_uuid()
     lb_name = 'lb-%s' % (lb_id)
     lb = models.LogBook(name=lb_name, uuid=lb_id)
     fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid())
     lb.add(fd)
     with contextlib.closing(self._get_connection()) as conn:
         conn.save_logbook(lb)
     with contextlib.closing(self._get_connection()) as conn:
         lb2 = conn.get_logbook(lb_id)
         self.assertEqual(1, len(lb2))
         self.assertEqual(1, len(lb))
         self.assertEqual(fd.name, lb2.find(fd.uuid).name)
示例#15
0
 def test_flow_detail_lazy_fetch(self):
     lb_id = uuidutils.generate_uuid()
     lb_name = 'lb-%s' % (lb_id)
     lb = models.LogBook(name=lb_name, uuid=lb_id)
     fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid())
     td = models.TaskDetail("detail-1", uuid=uuidutils.generate_uuid())
     td.version = '4.2'
     fd.add(td)
     lb.add(fd)
     with contextlib.closing(self._get_connection()) as conn:
         conn.save_logbook(lb)
     with contextlib.closing(self._get_connection()) as conn:
         fd2 = conn.get_flow_details(fd.uuid, lazy=True)
         self.assertEqual(0, len(fd2))
         self.assertEqual(1, len(fd))
示例#16
0
def temporary_flow_detail(backend=None):
    """Creates a temporary flow detail and logbook in the given backend.

    Mainly useful for tests and other use cases where a temporary flow detail
    and a temporary logbook is needed for a short-period of time.
    """
    flow_id = uuidutils.generate_uuid()
    book = temporary_log_book(backend)
    book.add(models.FlowDetail(name='tmp-flow-detail', uuid=flow_id))
    if backend is not None:
        with contextlib.closing(backend.get_connection()) as conn:
            conn.save_logbook(book)
    # Return the one from the saved logbook instead of the local one so
    # that the freshest version is given back.
    return book, book.find(flow_id)
示例#17
0
文件: client.py 项目: sputnik13/cue
    def post(self,
             flow_factory,
             job_args=None,
             flow_args=None,
             flow_kwargs=None,
             tx_uuid=None):
        """Method for posting a new job to the jobboard

        :param flow_factory: Flow factory function for creating a flow instance
                             that will be executed as part of the job.
        :param job_args: 'store' arguments to be supplied to the engine
                         executing the flow for the job
        :param flow_args: Positional arguments to be passed to the flow factory
                          function
        :param flow_kwargs: Keyword arguments to be passed to the flow factory
                            function
        :param tx_uuid: Transaction UUID which will be injected as 'tx_uuid' in
                        job_args.  A tx_uuid will be generated if one is not
                        provided as an argument.
        :return: A taskflow.job.Job instance that represents the job that was
                 posted.
        """
        if isinstance(job_args, dict) and 'tx_uuid' in job_args:
            raise AttributeError("tx_uuid needs to be provided as an argument"
                                 "to Client.post, not as a member of job_args")

        if tx_uuid is None:
            tx_uuid = uuidutils.generate_uuid()

        job_name = "%s[%s]" % (flow_factory.__name__, tx_uuid)
        book = persistence_models.LogBook(job_name, uuid=tx_uuid)

        if flow_factory is not None:
            flow_detail = persistence_models.FlowDetail(
                job_name, str(uuid.uuid4()))
            book.add(flow_detail)

        job_details = {'store': job_args or {}}
        job_details['store'].update({'tx_uuid': tx_uuid})
        job_details['flow_uuid'] = flow_detail.uuid

        self.persistence.get_connection().save_logbook(book)

        engines.save_factory_details(flow_detail, flow_factory, flow_args,
                                     flow_kwargs, self.persistence)

        job = self.jobboard.post(job_name, book, details=job_details)
        return job
示例#18
0
    def make_save_book(persistence,
                       job_id,
                       flow_plugin,
                       plugin_args=(),
                       plugin_kwds={}):
        flow_id = book_id = job_id  # Do these need to be different?
        book = models.LogBook(book_id)
        detail = models.FlowDetail(flow_id, uuidutils.generate_uuid())
        book.add(detail)

        factory_args = [flow_plugin] + list(plugin_args)
        factory_kwargs = plugin_kwds
        engines.save_factory_details(detail, workflow_factory, factory_args,
                                     factory_kwargs)
        with contextlib.closing(persistence.get_connection()) as conn:
            conn.save_logbook(book)
            return book
示例#19
0
    def test_flow_detail_save(self):
        lb_id = uuidutils.generate_uuid()
        lb_name = 'lb-%s' % (lb_id)
        lb = models.LogBook(name=lb_name, uuid=lb_id)
        fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid())
        lb.add(fd)

        # Ensure we can't save it since its owning logbook hasn't been
        # saved (flow details can not exist on their own without a connection
        # to a logbook).
        with contextlib.closing(self._get_connection()) as conn:
            self.assertRaises(exc.NotFound, conn.get_logbook, lb_id)
            self.assertRaises(exc.NotFound, conn.update_flow_details, fd)

        # Ok now we should be able to save both.
        with contextlib.closing(self._get_connection()) as conn:
            conn.save_logbook(lb)
            conn.update_flow_details(fd)
示例#20
0
 def issue(self, auto_sync=True):
     try:
         self.guard.acquire()
         if self.schedule_status != E3TASKFLOW_SCHEDULE_STATUS_UNKNOWN:
             return True
         if self.sync:
             #create the flow using registered flow creator
             flow = invt_taskflow_factory[self.category]()
             book = models.LogBook('logbook-%s' % (self.category))
             flow_detail = models.FlowDetail(
                 'flowdetail-%s' % (self.category), str(uuid.uuid4()))
             book.add(flow_detail)
             with contextlib.closing(
                     _taskflow_backend.get_connection()) as conn:
                 conn.save_logbook(book)
             self.book_id = book.uuid
             self.flow_id = flow_detail.uuid
             self.engine = engines.load(flow,
                                        backend=_taskflow_backend,
                                        flow_detail=flow_detail,
                                        book=book,
                                        store=self.store)
             self.schedule_status = E3TASKFLOW_SCHEDULE_STATUS_ISSUED
             #prior to the flow,synchronize state in case the tasks need it
             if auto_sync:
                 self.sync_state()
             self.engine.run()
             self.schedule_status = E3TASKFLOW_SCHEDULE_STATUS_SUCCESSFUL
             if self.callback:
                 self.callback(self)
         else:
             self.schedule_status = E3TASKFLOW_SCHEDULE_STATUS_ISSUED
             _taskflow_queue.put(self)
     except Exception as e:
         self.failure = str(traceback.format_exc())
         self.schedule_status = E3TASKFLOW_SCHEDULE_STATUS_FAILED
         if self.callback:
             self.callback(self, e)
         else:
             raise e
     finally:
         self.guard.release()
         if auto_sync:
             self.sync_state()
示例#21
0
    def test_flow_detail_meta_update(self):
        lb_id = uuidutils.generate_uuid()
        lb_name = 'lb-%s' % (lb_id)
        lb = models.LogBook(name=lb_name, uuid=lb_id)
        fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid())
        fd.meta = {'test': 42}
        lb.add(fd)

        with contextlib.closing(self._get_connection()) as conn:
            conn.save_logbook(lb)
            conn.update_flow_details(fd)

        fd.meta['test'] = 43
        with contextlib.closing(self._get_connection()) as conn:
            conn.update_flow_details(fd)
        with contextlib.closing(self._get_connection()) as conn:
            lb2 = conn.get_logbook(lb_id)
        fd2 = lb2.find(fd.uuid)
        self.assertEqual(43, fd2.meta.get('test'))
示例#22
0
def _ensure_db_initialized(conn, flow):
    conn.upgrade()

    need_save = False
    try:
        logbook = conn.get_logbook(LOGBOOK_ID)
    except exceptions.NotFound:
        logbook = models.LogBook(LOGBOOK_ID, uuid=LOGBOOK_ID)
        need_save = True

    flow_detail = logbook.find(flow.name)
    if flow_detail is None:
        flow_detail = models.FlowDetail(flow.name, flow.name)
        logbook.add(flow_detail)
        need_save = True

    if need_save:
        conn.save_logbook(logbook)

    return logbook, flow_detail
示例#23
0
    def run_poster(self, flow_factory, *args, wait=False, **kwargs):
        with self.driver.persistence_driver.get_persistence() as persistence:
            with self.driver.job_board(persistence) as job_board:
                job_id = uuidutils.generate_uuid()
                job_name = '-'.join([flow_factory.__name__, job_id])
                job_logbook = models.LogBook(job_name)
                flow_detail = models.FlowDetail(job_name, job_id)
                job_details = {'store': kwargs.pop('store')}
                job_logbook.add(flow_detail)
                persistence.get_connection().save_logbook(job_logbook)
                engines.save_factory_details(flow_detail,
                                             flow_factory,
                                             args,
                                             kwargs,
                                             backend=persistence)

                job_board.post(job_name, book=job_logbook, details=job_details)
                if wait:
                    self._wait_for_job(job_board)

                return job_id
示例#24
0
    def test_task_detail_retry_type_(self):
        lb_id = uuidutils.generate_uuid()
        lb_name = 'lb-%s' % (lb_id)
        lb = models.LogBook(name=lb_name, uuid=lb_id)
        fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid())
        lb.add(fd)
        rd = models.RetryDetail("detail-1", uuid=uuidutils.generate_uuid())
        rd.intention = states.REVERT
        fd.add(rd)

        with contextlib.closing(self._get_connection()) as conn:
            conn.save_logbook(lb)
            conn.update_flow_details(fd)
            conn.update_atom_details(rd)

        with contextlib.closing(self._get_connection()) as conn:
            lb2 = conn.get_logbook(lb_id)
        fd2 = lb2.find(fd.uuid)
        rd2 = fd2.find(rd.uuid)
        self.assertEqual(states.REVERT, rd2.intention)
        self.assertIsInstance(rd2, models.RetryDetail)
示例#25
0
    def test_task_detail_save(self):
        lb_id = uuidutils.generate_uuid()
        lb_name = 'lb-%s' % (lb_id)
        lb = models.LogBook(name=lb_name, uuid=lb_id)
        fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid())
        lb.add(fd)
        td = models.TaskDetail("detail-1", uuid=uuidutils.generate_uuid())
        fd.add(td)

        # Ensure we can't save it since its owning logbook hasn't been
        # saved (flow details/task details can not exist on their own without
        # their parent existing).
        with contextlib.closing(self._get_connection()) as conn:
            self.assertRaises(exc.NotFound, conn.update_flow_details, fd)
            self.assertRaises(exc.NotFound, conn.update_atom_details, td)

        # Ok now we should be able to save them.
        with contextlib.closing(self._get_connection()) as conn:
            conn.save_logbook(lb)
            conn.update_flow_details(fd)
            conn.update_atom_details(td)
示例#26
0
def create_flow_detail(flow, book=None, backend=None, meta=None):
    """Creates a flow detail for a flow & adds & saves it in a logbook.

    This will create a flow detail for the given flow using the flow name,
    and add it to the provided logbook and then uses the given backend to save
    the logbook and then returns the created flow detail.

    If no book is provided a temporary one will be created automatically (no
    reference to the logbook will be returned, so this should nearly *always*
    be provided or only used in situations where no logbook is needed, for
    example in tests). If no backend is provided then no saving will occur and
    the created flow detail will not be persisted even if the flow detail was
    added to a given (or temporarily generated) logbook.
    """
    flow_id = uuidutils.generate_uuid()
    flow_name = getattr(flow, 'name', None)
    if flow_name is None:
        LOG.warn("No name provided for flow %s (id %s)", flow, flow_id)
        flow_name = flow_id

    flow_detail = models.FlowDetail(name=flow_name, uuid=flow_id)
    if meta is not None:
        if flow_detail.meta is None:
            flow_detail.meta = {}
        flow_detail.meta.update(meta)

    if backend is not None and book is None:
        LOG.warn("No logbook provided for flow %s, creating one.", flow)
        book = temporary_log_book(backend)

    if book is not None:
        book.add(flow_detail)
        if backend is not None:
            with contextlib.closing(backend.get_connection()) as conn:
                conn.save_logbook(book)
        # Return the one from the saved logbook instead of the local one so
        # that the freshest version is given back.
        return book.find(flow_id)
    else:
        return flow_detail
示例#27
0
 def make_save_book(saver, review_id):
     # Record what we want to happen (sometime in the future).
     book = models.LogBook("book_%s" % review_id)
     detail = models.FlowDetail("flow_%s" % review_id,
                                uuidutils.generate_uuid())
     book.add(detail)
     # Associate the factory method we want to be called (in the future)
     # with the book, so that the conductor will be able to call into
     # that factory to retrieve the workflow objects that represent the
     # work.
     #
     # These args and kwargs *can* be used to save any specific parameters
     # into the factory when it is being called to create the workflow
     # objects (typically used to tell a factory how to create a unique
     # workflow that represents this review).
     factory_args = ()
     factory_kwargs = {}
     engines.save_factory_details(detail, create_review_workflow,
                                  factory_args, factory_kwargs)
     with contextlib.closing(saver.get_connection()) as conn:
         conn.save_logbook(book)
         return book
示例#28
0
 def test_flow_name_and_uuid(self):
     flow_detail = models.FlowDetail(name='test-fd', uuid='aaaa')
     s = self._get_storage(flow_detail)
     self.assertEqual('test-fd', s.flow_name)
     self.assertEqual('aaaa', s.flow_uuid)
        return 'ok'


def flow_factory():
    return lf.Flow('resume from backend example').add(
        TestTask(name='first'), InterruptTask(name='boom'),
        TestTask(name='second'))


# INITIALIZE PERSISTENCE ####################################

with eu.get_backend() as backend:

    # Create a place where the persistence information will be stored.
    book = models.LogBook("example")
    flow_detail = models.FlowDetail("resume from backend example",
                                    uuid=uuidutils.generate_uuid())
    book.add(flow_detail)
    with contextlib.closing(backend.get_connection()) as conn:
        conn.save_logbook(book)

    # CREATE AND RUN THE FLOW: FIRST ATTEMPT ####################

    flow = flow_factory()
    engine = taskflow.engines.load(flow,
                                   flow_detail=flow_detail,
                                   book=book,
                                   backend=backend)

    print_task_states(flow_detail, "At the beginning, there is no state")
    eu.print_wrapped("Running")
    engine.run()
示例#30
0
    def test_get_notification_recovery_workflow_details_raises_keyerror(
            self, mock_get_flows_for_book, mock_get_atoms_for_flow):

        notification = fakes.create_fake_notification(
            payload={
                'event': 'LIFECYCLE', 'instance_uuid': uuidsentinel.fake_ins,
                'vir_domain_event': 'STOPPED_FAILED'},
            source_host_uuid=uuidsentinel.fake_host,
            notification_uuid=uuidsentinel.fake_notification)

        fd = models.FlowDetail('test', uuid=notification.notification_uuid)
        atom1 = models.TaskDetail('StopInstanceTask',
                                  uuid=uuidsentinel.atom_id_1)
        atom1.meta = {
            'progress': 1.0,
            'progress_details': {
                'at_progress': 1.0,
                'details': {
                    'progress_details': [
                        {'timestamp': '2019-03-11 05:22:20.329171',
                         'message': 'Stopping instance: '
                                    '87c8ebc3-2a70-49f0-9280-d34662dc203d',
                         'progress': 0.0},
                        {'timestamp': '2019-03-11 05:22:28.902665',
                         'message': "Stopped instance: "
                                    "'87c8ebc3-2a70-49f0-9280-d34662dc203d'",
                         'progress': 1.0}]}}}
        atom1.state = 'SUCCESS'

        atom2 = models.TaskDetail('ConfirmInstanceActiveTask',
                                  uuid=uuidsentinel.atom_id_2)
        atom2.meta = {
            'progress': 1.0,
            'progress_details': {
                'at_progress': 1.0,
                'details': {
                    'progress_details': [
                        {'timestamp': '2019-03-11 05:22:29.597303',
                         'message': "Confirming instance "
                                    "'87c8ebc3-2a70-49f0-9280-d34662dc203d' "
                                    "vm_state is ACTIVE",
                         'progress': 0.0},
                        {'timestamp': '2019-03-11 05:22:31.916620',
                         'message': "Confirmed instance "
                                    "'87c8ebc3-2a70-49f0-9280-d34662dc203d'"
                                    " vm_state is ACTIVE", 'progress': 1.0}]
                }}}
        atom2.state = 'SUCCESS'

        atom3 = models.TaskDetail('StartInstanceTask',
                                  uuid=uuidsentinel.atom_id_3)
        atom3.state = 'RUNNING'

        def fd_generator():
            yield fd

        def atom_detail_generator():
            for atom in [atom1, atom2, atom3]:
                yield atom

        flow_details = fd_generator()
        atom_details = atom_detail_generator()
        mock_get_flows_for_book.return_value = flow_details
        mock_get_atoms_for_flow.return_value = atom_details
        driver.PERSISTENCE_BACKEND = 'memory://'

        progress_details = (
            self.taskflow_driver.get_notification_recovery_workflow_details(
                self.ctxt, 'auto', notification))

        # list of NotificationProgressDetails object
        expected_result = []
        expected_result.append((
            fakes.create_fake_notification_progress_details(
                name=atom1.name,
                uuid=atom1.uuid,
                progress=atom1.meta['progress'],
                state=atom1.state,
                progress_details=atom1.meta['progress_details']
                ['details']['progress_details'])))
        expected_result.append((
            fakes.create_fake_notification_progress_details(
                name=atom2.name,
                uuid=atom2.uuid,
                progress=atom2.meta['progress'],
                state=atom2.state,
                progress_details=atom2.meta['progress_details']
                ['details']['progress_details'])))

        self.assertIsNotNone(progress_details)
        mock_get_flows_for_book.assert_called_once()
        mock_get_atoms_for_flow.assert_called_once()

        self.assertObjectList(expected_result, progress_details)