def test_logbook_add_task_detail(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = models.LogBook(name=lb_name, uuid=lb_id) fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid()) td = models.TaskDetail("detail-1", uuid=uuidutils.generate_uuid()) td.version = '4.2' fd.add(td) lb.add(fd) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) self.assertEqual(1, len(lb2)) tasks = 0 for fd in lb: tasks += len(fd) self.assertEqual(1, tasks) with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) td2 = fd2.find(td.uuid) self.assertIsNotNone(td2) self.assertEqual('detail-1', td2.name) self.assertEqual('4.2', td2.version) self.assertEqual(states.EXECUTE, td2.intention)
def test_logbook_merge_flow_detail(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = models.LogBook(name=lb_name, uuid=lb_id) fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) lb2 = models.LogBook(name=lb_name, uuid=lb_id) fd2 = models.FlowDetail('test2', uuid=uuidutils.generate_uuid()) lb2.add(fd2) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb2) with contextlib.closing(self._get_connection()) as conn: lb3 = conn.get_logbook(lb_id) self.assertEqual(2, len(lb3))
def test_retry_detail_save_with_task_failure(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = models.LogBook(name=lb_name, uuid=lb_id) fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) rd = models.RetryDetail("retry-1", uuid=uuidutils.generate_uuid()) fail = failure.Failure.from_exception(RuntimeError('fail')) rd.results.append((42, {'some-task': fail})) fd.add(rd) # save it with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) conn.update_flow_details(fd) conn.update_atom_details(rd) # now read it back with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) rd2 = fd2.find(rd.uuid) self.assertIsInstance(rd2, models.RetryDetail) fail2 = rd2.results[0][1].get('some-task') self.assertIsInstance(fail2, failure.Failure) self.assertTrue(fail.matches(fail2))
def test_retry_detail_save_intention(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = models.LogBook(name=lb_name, uuid=lb_id) fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) rd = models.RetryDetail("retry-1", uuid=uuidutils.generate_uuid()) fd.add(rd) # save it with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) conn.update_flow_details(fd) conn.update_atom_details(rd) # change intention and save rd.intention = states.REVERT with contextlib.closing(self._get_connection()) as conn: conn.update_atom_details(rd) # now read it back with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) rd2 = fd2.find(rd.uuid) self.assertEqual(states.REVERT, rd2.intention) self.assertIsInstance(rd2, models.RetryDetail)
def run_poster(): # This just posts a single job and then ends... print("Starting poster with pid: %s" % ME) my_name = "poster-%s" % ME persist_backend = persistence_backends.fetch(PERSISTENCE_URI) with contextlib.closing(persist_backend): with contextlib.closing(persist_backend.get_connection()) as conn: conn.upgrade() job_backend = job_backends.fetch(my_name, JB_CONF, persistence=persist_backend) job_backend.connect() with contextlib.closing(job_backend): # Create information in the persistence backend about the # unit of work we want to complete and the factory that # can be called to create the tasks that the work unit needs # to be done. lb = models.LogBook("post-from-%s" % my_name) fd = models.FlowDetail("song-from-%s" % my_name, uuidutils.generate_uuid()) lb.add(fd) with contextlib.closing(persist_backend.get_connection()) as conn: conn.save_logbook(lb) engines.save_factory_details(fd, make_bottles, [HOW_MANY_BOTTLES], {}, backend=persist_backend) # Post, and be done with it! jb = job_backend.post("song-from-%s" % my_name, book=lb) print("Posted: %s" % jb) print("Goodbye...")
def test_task_detail_meta_update(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = models.LogBook(name=lb_name, uuid=lb_id) fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) td = models.TaskDetail("detail-1", uuid=uuidutils.generate_uuid()) td.meta = {'test': 42} fd.add(td) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) conn.update_flow_details(fd) conn.update_atom_details(td) td.meta['test'] = 43 with contextlib.closing(self._get_connection()) as conn: conn.update_atom_details(td) with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) td2 = fd2.find(td.uuid) self.assertEqual(43, td2.meta.get('test')) self.assertIsInstance(td2, models.TaskDetail)
def test_task_detail_with_failure(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = models.LogBook(name=lb_name, uuid=lb_id) fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) td = models.TaskDetail("detail-1", uuid=uuidutils.generate_uuid()) try: raise RuntimeError('Woot!') except Exception: td.failure = failure.Failure() fd.add(td) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) conn.update_flow_details(fd) conn.update_atom_details(td) # Read failure back with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) td2 = fd2.find(td.uuid) self.assertEqual('Woot!', td2.failure.exception_str) self.assertIs(td2.failure.check(RuntimeError), RuntimeError) self.assertEqual(td.failure.traceback_str, td2.failure.traceback_str) self.assertIsInstance(td2, models.TaskDetail)
def temporary_log_book(backend=None): """Creates a temporary logbook for temporary usage in the given backend. Mainly useful for tests and other use cases where a temporary logbook is needed for a short-period of time. """ book = models.LogBook('tmp') if backend is not None: with contextlib.closing(backend.get_connection()) as conn: conn.save_logbook(book) return book
def test_logbook_lazy_fetch(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = models.LogBook(name=lb_name, uuid=lb_id) fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id, lazy=True) self.assertEqual(0, len(lb2)) self.assertEqual(1, len(lb))
def test_logbook_add_flow_detail(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = models.LogBook(name=lb_name, uuid=lb_id) fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) self.assertEqual(1, len(lb2)) self.assertEqual(1, len(lb)) self.assertEqual(fd.name, lb2.find(fd.uuid).name)
def test_logbook_delete(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = models.LogBook(name=lb_name, uuid=lb_id) with contextlib.closing(self._get_connection()) as conn: self.assertRaises(exc.NotFound, conn.destroy_logbook, lb_id) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) self.assertIsNotNone(lb2) with contextlib.closing(self._get_connection()) as conn: conn.destroy_logbook(lb_id) self.assertRaises(exc.NotFound, conn.destroy_logbook, lb_id)
def create_logbook(self, job_name, store=None): logbook_id = uuidutils.generate_uuid() connection = self._persistence.get_connection() try: book = connection.get_logbook(logbook_id, lazy=True) except excp.NotFound: book = models.LogBook(job_name, logbook_id) flow_detail = models.FlowDetail(job_name, logbook_id) flow_detail.meta['store'] = store or {} book.add(flow_detail) connection.save_logbook(book) return flow_detail, book
def test_flow_detail_lazy_fetch(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = models.LogBook(name=lb_name, uuid=lb_id) fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid()) td = models.TaskDetail("detail-1", uuid=uuidutils.generate_uuid()) td.version = '4.2' fd.add(td) lb.add(fd) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) with contextlib.closing(self._get_connection()) as conn: fd2 = conn.get_flow_details(fd.uuid, lazy=True) self.assertEqual(0, len(fd2)) self.assertEqual(1, len(fd))
def test_flow_detail_update_not_existing(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = models.LogBook(name=lb_name, uuid=lb_id) fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) fd2 = models.FlowDetail('test-2', uuid=uuidutils.generate_uuid()) lb.add(fd2) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb.uuid) self.assertIsNotNone(lb2.find(fd.uuid)) self.assertIsNotNone(lb2.find(fd2.uuid))
def test_flow_detail_save(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = models.LogBook(name=lb_name, uuid=lb_id) fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) # Ensure we can't save it since its owning logbook hasn't been # saved (flow details can not exist on their own without a connection # to a logbook). with contextlib.closing(self._get_connection()) as conn: self.assertRaises(exc.NotFound, conn.get_logbook, lb_id) self.assertRaises(exc.NotFound, conn.update_flow_details, fd) # Ok now we should be able to save both. with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) conn.update_flow_details(fd)
def test_dir_backend_cache_overfill(self): if self.max_cache_size is not None: # Ensure cache never goes past the desired max size... books_ids_made = [] with contextlib.closing(self._get_connection()) as conn: for i in range(0, int(1.5 * self.max_cache_size)): lb_name = 'book-%s' % (i) lb_id = uuidutils.generate_uuid() lb = models.LogBook(name=lb_name, uuid=lb_id) self.assertRaises(exc.NotFound, conn.get_logbook, lb_id) conn.save_logbook(lb) books_ids_made.append(lb_id) self.assertLessEqual(self.backend.file_cache.currsize, self.max_cache_size) # Also ensure that we can still read all created books... with contextlib.closing(self._get_connection()) as conn: for lb_id in books_ids_made: lb = conn.get_logbook(lb_id) self.assertIsNotNone(lb)
def test_flow_detail_meta_update(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = models.LogBook(name=lb_name, uuid=lb_id) fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid()) fd.meta = {'test': 42} lb.add(fd) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) conn.update_flow_details(fd) fd.meta['test'] = 43 with contextlib.closing(self._get_connection()) as conn: conn.update_flow_details(fd) with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) self.assertEqual(43, fd2.meta.get('test'))
def test_logbook_save_retrieve_many(self): lb_ids = {} for i in range(0, 10): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s-%s' % (i, lb_id) lb = models.LogBook(name=lb_name, uuid=lb_id) lb_ids[lb_id] = True # Should not already exist with contextlib.closing(self._get_connection()) as conn: self.assertRaises(exc.NotFound, conn.get_logbook, lb_id) conn.save_logbook(lb) # Now fetch them all with contextlib.closing(self._get_connection()) as conn: lbs = conn.get_logbooks() for lb in lbs: self.assertIn(lb.uuid, lb_ids) lb_ids.pop(lb.uuid) self.assertEqual(0, len(lb_ids))
def test_logbook_save_retrieve(self): lb_id = uuidutils.generate_uuid() lb_meta = {'1': 2} lb_name = 'lb-%s' % (lb_id) lb = models.LogBook(name=lb_name, uuid=lb_id) lb.meta = lb_meta # Should not already exist with contextlib.closing(self._get_connection()) as conn: self.assertRaises(exc.NotFound, conn.get_logbook, lb_id) conn.save_logbook(lb) # Make sure we can reload it (and all of its attributes are what # we expect them to be). with contextlib.closing(self._get_connection()) as conn: lb = conn.get_logbook(lb_id) self.assertEqual(lb_name, lb.name) self.assertEqual(0, len(lb)) self.assertEqual(lb_meta, lb.meta) self.assertIsNone(lb.updated_at) self.assertIsNotNone(lb.created_at)
def test_task_detail_save(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = models.LogBook(name=lb_name, uuid=lb_id) fd = models.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) td = models.TaskDetail("detail-1", uuid=uuidutils.generate_uuid()) fd.add(td) # Ensure we can't save it since its owning logbook hasn't been # saved (flow details/task details can not exist on their own without # their parent existing). with contextlib.closing(self._get_connection()) as conn: self.assertRaises(exc.NotFound, conn.update_flow_details, fd) self.assertRaises(exc.NotFound, conn.update_atom_details, td) # Ok now we should be able to save them. with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) conn.update_flow_details(fd) conn.update_atom_details(td)
def make_save_book(saver, review_id): # Record what we want to happen (sometime in the future). book = models.LogBook("book_%s" % review_id) detail = models.FlowDetail("flow_%s" % review_id, uuidutils.generate_uuid()) book.add(detail) # Associate the factory method we want to be called (in the future) # with the book, so that the conductor will be able to call into # that factory to retrieve the workflow objects that represent the # work. # # These args and kwargs *can* be used to save any specific parameters # into the factory when it is being called to create the workflow # objects (typically used to tell a factory how to create a unique # workflow that represents this review). factory_args = () factory_kwargs = {} engines.save_factory_details(detail, create_review_workflow, factory_args, factory_kwargs) with contextlib.closing(saver.get_connection()) as conn: conn.save_logbook(book) return book
persist_path = os.path.join(tempfile.gettempdir(), "persisting.db") backend_uri = "sqlite:///%s" % (persist_path) else: persist_path = os.path.join(tempfile.gettempdir(), "persisting") backend_uri = "file:///%s" % (persist_path) if os.path.exists(persist_path): blowup = False else: blowup = True with eu.get_backend(backend_uri) as backend: # Make a flow that will blow up if the file didn't exist previously, if it # did exist, assume we won't blow up (and therefore this shows the undo # and redo that a flow will go through). book = models.LogBook("my-test") flow = make_flow(blowup=blowup) eu.print_wrapped("Running") try: eng = engines.load(flow, engine='serial', backend=backend, book=book) eng.run() if not blowup: eu.rm_path(persist_path) except Exception: # NOTE(harlowja): don't exit with non-zero status code, so that we can # print the book contents, as well as avoiding exiting also makes the # unit tests (which also runs these examples) pass. traceback.print_exc(file=sys.stdout) eu.print_wrapped("Book contents") print(book.pformat())
# No eventlet installed, just let the default be used instead. executor = None # Create/fetch a logbook that will track the workflows work. book = None flow_detail = None if all([book_id, flow_id]): # Try to find in a prior logbook and flow detail... with contextlib.closing(backend.get_connection()) as conn: try: book = conn.get_logbook(book_id) flow_detail = book.find(flow_id) except exc.NotFound: pass if book is None and flow_detail is None: book = models.LogBook("vm-boot") with contextlib.closing(backend.get_connection()) as conn: conn.save_logbook(book) engine = engines.load_from_factory(create_flow, backend=backend, book=book, engine='parallel', executor=executor) print("!! Your tracking id is: '%s+%s'" % (book.uuid, engine.storage.flow_uuid)) print("!! Please submit this on later runs for tracking purposes") else: # Attempt to load from a previously partially completed flow. engine = engines.load_from_detail(flow_detail, backend=backend, engine='parallel',
print('executing %s' % self) return 'ok' def flow_factory(): return lf.Flow('resume from backend example').add( TestTask(name='first'), InterruptTask(name='boom'), TestTask(name='second')) # INITIALIZE PERSISTENCE #################################### with eu.get_backend() as backend: # Create a place where the persistence information will be stored. book = models.LogBook("example") flow_detail = models.FlowDetail("resume from backend example", uuid=uuidutils.generate_uuid()) book.add(flow_detail) with contextlib.closing(backend.get_connection()) as conn: conn.save_logbook(book) # CREATE AND RUN THE FLOW: FIRST ATTEMPT #################### flow = flow_factory() engine = zag.engines.load(flow, flow_detail=flow_detail, book=book, backend=backend) print_task_states(flow_detail, "At the beginning, there is no state")
try: book_id, flow_id = sys.argv[2].split("+", 1) except (IndexError, ValueError): book_id = None flow_id = None if not all([book_id, flow_id]): # If no 'tracking id' (think a fedex or ups tracking id) is provided # then we create one by creating a logbook (where flow details are # stored) and creating a flow detail (where flow and task state is # stored). The combination of these 2 objects unique ids (uuids) allows # the users of zag to reassociate the workflows that were # potentially running (and which may have partially completed) back # with zag so that those workflows can be resumed (or reverted) # after a process/thread/engine has failed in someway. book = models.LogBook('resume-volume-create') flow_detail = models.FlowDetail("root", uuid=uuidutils.generate_uuid()) book.add(flow_detail) with contextlib.closing(backend.get_connection()) as conn: conn.save_logbook(book) print("!! Your tracking id is: '%s+%s'" % (book.uuid, flow_detail.uuid)) print("!! Please submit this on later runs for tracking purposes") else: flow_detail = find_flow_detail(backend, book_id, flow_id) # Load and run. engine = engines.load(flow, flow_detail=flow_detail, backend=backend, engine='serial') engine.run()