def save_logbook(self, book): # Get a existing logbook model (or create it if it isn't there). try: e_lb = self.backend.log_books[book.uuid] except KeyError: e_lb = logbook.LogBook(book.name, book.uuid, updated_at=book.updated_at, created_at=timeutils.utcnow()) self.backend.log_books[e_lb.uuid] = e_lb else: # TODO(harlowja): figure out a better way to set this property # without actually setting a 'private' property. e_lb._updated_at = timeutils.utcnow() p_utils.logbook_merge(e_lb, book, deep_copy=True) # Add anything in to the new logbook that isn't already # in the existing logbook. for flow_detail in book: try: e_fd = self.backend.flow_details[flow_detail.uuid] except KeyError: e_fd = logbook.FlowDetail(name=flow_detail.name, uuid=flow_detail.uuid) e_lb.add(flow_detail) self.backend.flow_details[flow_detail.uuid] = e_fd p_utils.flow_details_merge(e_fd, flow_detail, deep_copy=True) self._save_flowdetail_tasks(e_fd, flow_detail) return e_lb
def test_logbook_merge_flow_detail(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) lb2 = logbook.LogBook(name=lb_name, uuid=lb_id) fd2 = logbook.FlowDetail('test2', uuid=uuidutils.generate_uuid()) lb2.add(fd2) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb2) with contextlib.closing(self._get_connection()) as conn: lb3 = conn.get_logbook(lb_id) self.assertEqual(2, len(lb3))
def post_remote_pipeline_job(pipeline): ME = os.getpid() print("Starting poster with pid: %s" % ME) my_name = "poster-%s" % ME persist_backend = backend_helper.default_persistence_backend() with contextlib.closing(persist_backend): with contextlib.closing(persist_backend.get_connection()) as conn: conn.upgrade() job_backend = backend_helper.default_jobboard_backend(my_name) job_backend.connect() with contextlib.closing(job_backend): # Create information in the persistence backend about the # unit of work we want to complete and the factory that # can be called to create the tasks that the work unit needs # to be done. lb = logbook.LogBook("post-from-%s" % my_name) fd = logbook.FlowDetail("sample-from-%s" % my_name, uuidutils.generate_uuid()) lb.add(fd) with contextlib.closing(persist_backend.get_connection()) as conn: conn.save_logbook(lb) engines.save_factory_details(fd, pipeline_factory.make_pipeline_flow, [pipeline.name], pipeline.kwargs, backend=persist_backend) # Post, and be done with it! jb = job_backend.post("sample-job-from-%s" % my_name, book=lb) print("Posted: %s" % jb) return jb
def test_logbook_add_task_detail(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) td = logbook.TaskDetail("detail-1", uuid=uuidutils.generate_uuid()) td.version = '4.2' fd.add(td) lb.add(fd) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) self.assertEqual(1, len(lb2)) tasks = 0 for fd in lb: tasks += len(fd) self.assertEqual(1, tasks) with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) td2 = fd2.find(td.uuid) self.assertIsNot(td2, None) self.assertEqual(td2.name, 'detail-1') self.assertEqual(td2.version, '4.2')
def create_flow_detail(flow, book=None, backend=None, meta=None): """Creates a flow detail for the given flow and adds it to the provided logbook (if provided) and then uses the given backend (if provided) to save the logbook then returns the created flow detail. """ flow_id = uuidutils.generate_uuid() flow_name = getattr(flow, 'name', None) if flow_name is None: LOG.warn("No name provided for flow %s (id %s)" % (flow, flow_id)) flow_name = flow_id flow_detail = logbook.FlowDetail(name=flow_name, uuid=flow_id) if meta is not None: if flow_detail.meta is None: flow_detail.meta = {} flow_detail.meta.update(meta) if backend is not None and book is None: LOG.warn("No logbook provided for flow %s, creating one.", flow) book = temporary_log_book(backend) if book is not None: book.add(flow_detail) if backend is not None: with contextlib.closing(backend.get_connection()) as conn: conn.save_logbook(book) # Return the one from the saved logbook instead of the local one so # that the freshest version is given back. return book.find(flow_id) else: return flow_detail
def _convert_fd_to_external(fd): fd_c = logbook.FlowDetail(fd.name, uuid=fd.uuid) fd_c.meta = fd.meta fd_c.state = fd.state for td in fd.taskdetails: fd_c.add(_convert_td_to_external(td)) return fd_c
def _update_flow_details(self, fd, txn, create_missing=False): # Determine whether the desired data exists or not fd_path = paths.join(self.flow_path, fd.uuid) try: fd_data, _zstat = self._client.get(fd_path) except k_exc.NoNodeError: # Not-existent: create or raise exception if create_missing: txn.create(fd_path) e_fd = logbook.FlowDetail(name=fd.name, uuid=fd.uuid) else: raise exc.NotFound("No flow details found with id: %s" % fd.uuid) else: # Existent: read it out e_fd = logbook.FlowDetail.from_dict(misc.decode_json(fd_data)) # Update and write it back e_fd = e_fd.merge(fd) fd_data = e_fd.to_dict() txn.set_data(fd_path, misc.binary_encode(jsonutils.dumps(fd_data))) for ad in fd: ad_path = paths.join(fd_path, ad.uuid) # NOTE(harlowja): create an entry in the flow detail path # for the provided atom detail so that a reference exists # from the flow detail to its atom details. if not self._client.exists(ad_path): txn.create(ad_path) e_fd.add(self._update_atom_details(ad, txn, create_missing=True)) return e_fd
def test_task_detail_with_failure(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) td = logbook.TaskDetail("detail-1", uuid=uuidutils.generate_uuid()) try: raise RuntimeError('Woot!') except Exception: td.failure = misc.Failure() fd.add(td) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) conn.update_flow_details(fd) conn.update_task_details(td) # Read failure back with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) td2 = fd2.find(td.uuid) failure = td2.failure self.assertEqual(failure.exception_str, 'Woot!') self.assertIs(failure.check(RuntimeError), RuntimeError) self.assertEqual(failure.traceback_str, td.failure.traceback_str)
def test_retry_detail_save_intention(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) rd = logbook.RetryDetail("retry-1", uuid=uuidutils.generate_uuid()) fd.add(rd) # save it with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) conn.update_flow_details(fd) conn.update_atom_details(rd) # change intention and save rd.intention = states.REVERT with contextlib.closing(self._get_connection()) as conn: conn.update_atom_details(rd) # now read it back with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) rd2 = fd2.find(rd.uuid) self.assertEqual(rd2.intention, states.REVERT) self.assertIsInstance(rd2, logbook.RetryDetail)
def run_poster(): # This just posts a single job and then ends... print("Starting poster with pid: %s" % ME) my_name = "poster-%s" % ME persist_backend = persistence_backends.fetch(PERSISTENCE_URI) with contextlib.closing(persist_backend): with contextlib.closing(persist_backend.get_connection()) as conn: conn.upgrade() job_backend = job_backends.fetch(my_name, JB_CONF, persistence=persist_backend) job_backend.connect() with contextlib.closing(job_backend): # Create information in the persistence backend about the # unit of work we want to complete and the factory that # can be called to create the tasks that the work unit needs # to be done. lb = logbook.LogBook("post-from-%s" % my_name) fd = logbook.FlowDetail("song-from-%s" % my_name, uuidutils.generate_uuid()) lb.add(fd) with contextlib.closing(persist_backend.get_connection()) as conn: conn.save_logbook(lb) engines.save_factory_details(fd, make_bottles, [HOW_MANY_BOTTLES], {}, backend=persist_backend) # Post, and be done with it! jb = job_backend.post("song-from-%s" % my_name, book=lb) print("Posted: %s" % jb) print("Goodbye...")
def test_retry_detail_save_with_task_failure(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) rd = logbook.RetryDetail("retry-1", uuid=uuidutils.generate_uuid()) fail = misc.Failure.from_exception(RuntimeError('fail')) rd.results.append((42, {'some-task': fail})) fd.add(rd) # save it with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) conn.update_flow_details(fd) conn.update_atom_details(rd) # now read it back with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) rd2 = fd2.find(rd.uuid) self.assertIsInstance(rd2, logbook.RetryDetail) fail2 = rd2.results[0][1].get('some-task') self.assertIsInstance(fail2, misc.Failure) self.assertTrue(fail.matches(fail2))
def _convert_fd_to_external(fd): fd_c = logbook.FlowDetail(fd.name, uuid=fd.uuid) fd_c.meta = fd.meta fd_c.state = fd.state for ad_m in fd.atomdetails: fd_c.add(_convert_ad_to_external(ad_m)) return fd_c
def test_task_detail_meta_update(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) td = logbook.TaskDetail("detail-1", uuid=uuidutils.generate_uuid()) td.meta = {'test': 42} fd.add(td) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) conn.update_flow_details(fd) conn.update_atom_details(td) td.meta['test'] = 43 with contextlib.closing(self._get_connection()) as conn: conn.update_atom_details(td) with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) td2 = fd2.find(td.uuid) self.assertEqual(td2.meta.get('test'), 43) self.assertIsInstance(td2, logbook.TaskDetail)
def submit_task(self, flow_factory, **kwargs): """submit a task. """ with self.persistence as persistence: with self.driver.job_board( self.jobboard_backend_conf_worker.copy(), persistence=persistence) as board: job_id = uuidutils.generate_uuid() job_name = '-'.join([flow_factory.__name__, job_id]) job_logbook = logbook.LogBook(job_name) flow_detail = logbook.FlowDetail(job_name, uuidutils.generate_uuid()) factory_args = () factory_kwargs = {} engines.save_factory_details(flow_detail, flow_factory, factory_args, factory_kwargs) job_logbook.add(flow_detail) persistence.get_connection().save_logbook(job_logbook) job_details = { 'store': kwargs } job = board.post(job_name, book=job_logbook, details=job_details) LOG.info("Posted: {0}".format(job))
def test_flow_detail_update_not_existing(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) fd2 = logbook.FlowDetail('test-2', uuid=uuidutils.generate_uuid()) lb.add(fd2) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb.uuid) self.assertIsNotNone(lb2.find(fd.uuid)) self.assertIsNotNone(lb2.find(fd2.uuid))
def test_logbook_lazy_fetch(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id, lazy=True) self.assertEqual(0, len(lb2)) self.assertEqual(1, len(lb))
def test_logbook_add_flow_detail(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) self.assertEqual(1, len(lb2)) self.assertEqual(1, len(lb)) self.assertEqual(fd.name, lb2.find(fd.uuid).name)
def test_flow_detail_lazy_fetch(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) td = logbook.TaskDetail("detail-1", uuid=uuidutils.generate_uuid()) td.version = '4.2' fd.add(td) lb.add(fd) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) with contextlib.closing(self._get_connection()) as conn: fd2 = conn.get_flow_details(fd.uuid, lazy=True) self.assertEqual(0, len(fd2)) self.assertEqual(1, len(fd))
def temporary_flow_detail(backend=None): """Creates a temporary flow detail and logbook in the given backend. Mainly useful for tests and other use cases where a temporary flow detail and a temporary logbook is needed for a short-period of time. """ flow_id = uuidutils.generate_uuid() book = temporary_log_book(backend) book.add(logbook.FlowDetail(name='tmp-flow-detail', uuid=flow_id)) if backend is not None: with contextlib.closing(backend.get_connection()) as conn: conn.save_logbook(book) # Return the one from the saved logbook instead of the local one so # that the freshest version is given back. return book, book.find(flow_id)
def test_flow_detail_save(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) # Ensure we can't save it since its owning logbook hasn't been # saved (flow details can not exist on their own without a connection # to a logbook). with contextlib.closing(self._get_connection()) as conn: self.assertRaises(exc.NotFound, conn.get_logbook, lb_id) self.assertRaises(exc.NotFound, conn.update_flow_details, fd) # Ok now we should be able to save both. with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) conn.update_flow_details(fd)
def test_flow_detail_meta_update(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) fd.meta = {'test': 42} lb.add(fd) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) conn.update_flow_details(fd) fd.meta['test'] = 43 with contextlib.closing(self._get_connection()) as conn: conn.update_flow_details(fd) with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) self.assertEqual(fd2.meta.get('test'), 43)
def test_task_detail_retry_type_(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) rd = logbook.RetryDetail("detail-1", uuid=uuidutils.generate_uuid()) rd.intention = states.REVERT fd.add(rd) with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) conn.update_flow_details(fd) conn.update_atom_details(rd) with contextlib.closing(self._get_connection()) as conn: lb2 = conn.get_logbook(lb_id) fd2 = lb2.find(fd.uuid) rd2 = fd2.find(rd.uuid) self.assertEqual(rd2.intention, states.REVERT) self.assertIsInstance(rd2, logbook.RetryDetail)
def test_task_detail_save(self): lb_id = uuidutils.generate_uuid() lb_name = 'lb-%s' % (lb_id) lb = logbook.LogBook(name=lb_name, uuid=lb_id) fd = logbook.FlowDetail('test', uuid=uuidutils.generate_uuid()) lb.add(fd) td = logbook.TaskDetail("detail-1", uuid=uuidutils.generate_uuid()) fd.add(td) # Ensure we can't save it since its owning logbook hasn't been # saved (flow details/task details can not exist on their own without # their parent existing). with contextlib.closing(self._get_connection()) as conn: self.assertRaises(exc.NotFound, conn.update_flow_details, fd) self.assertRaises(exc.NotFound, conn.update_atom_details, td) # Ok now we should be able to save them. with contextlib.closing(self._get_connection()) as conn: conn.save_logbook(lb) conn.update_flow_details(fd) conn.update_atom_details(td)
def save_logbook(self, book): # Get a existing logbook model (or create it if it isn't there). try: e_lb = self.backend.log_books[book.uuid] except KeyError: e_lb = logbook.LogBook(book.name, uuid=book.uuid) self.backend.log_books[e_lb.uuid] = e_lb e_lb.merge(book, deep_copy=True) # Add anything in to the new logbook that isn't already in the existing # logbook. for flow_detail in book: try: e_fd = self.backend.flow_details[flow_detail.uuid] except KeyError: e_fd = logbook.FlowDetail(flow_detail.name, flow_detail.uuid) e_lb.add(e_fd) self.backend.flow_details[e_fd.uuid] = e_fd e_fd.merge(flow_detail, deep_copy=True) self._save_flowdetail_atoms(e_fd, flow_detail) return e_lb
def create_flow_detail(flow, book=None, backend=None, meta=None): """Creates a flow detail for a flow & adds & saves it in a logbook. This will create a flow detail for the given flow using the flow name, and add it to the provided logbook and then uses the given backend to save the logbook and then returns the created flow detail. If no book is provided a temporary one will be created automatically (no reference to the logbook will be returned, so this should nearly *always* be provided or only used in situations where no logbook is needed, for example in tests). If no backend is provided then no saving will occur and the created flow detail will not be persisted even if the flow detail was added to a given (or temporarily generated) logbook. """ flow_id = uuidutils.generate_uuid() flow_name = getattr(flow, 'name', None) if flow_name is None: LOG.warn("No name provided for flow %s (id %s)", flow, flow_id) flow_name = flow_id flow_detail = logbook.FlowDetail(name=flow_name, uuid=flow_id) if meta is not None: if flow_detail.meta is None: flow_detail.meta = {} flow_detail.meta.update(meta) if backend is not None and book is None: LOG.warn("No logbook provided for flow %s, creating one.", flow) book = temporary_log_book(backend) if book is not None: book.add(flow_detail) if backend is not None: with contextlib.closing(backend.get_connection()) as conn: conn.save_logbook(book) # Return the one from the saved logbook instead of the local one so # that the freshest version is given back. return book.find(flow_id) else: return flow_detail
def post_remote_pipeline_job_and_wait(pipeline, jobboard_name): """Post a pipeline job and wait until it is finished.""" my_name = POSTER_NAME logger.info("Starting poster with name: %s" % my_name) persist_backend = backend_helper.default_persistence_backend() with contextlib.closing(persist_backend): with contextlib.closing(persist_backend.get_connection()) as conn: conn.upgrade() jobboard = backend_helper.get_jobboard(my_name, jobboard_name) jobboard.connect() with contextlib.closing(jobboard): # Create information in the persistence backend about the # unit of work we want to complete and the factory that # can be called to create the tasks that the work unit needs # to be done. lb = logbook.LogBook("post-from-%s" % my_name) flow_uuid = uuidutils.generate_uuid() fd = logbook.FlowDetail("flow-of-%s" % my_name, flow_uuid) lb.add(fd) with contextlib.closing(persist_backend.get_connection()) as conn: conn.save_logbook(lb) engines.save_factory_details(fd, pipeline_factory.make_pipeline_flow, [pipeline.name, True], pipeline.kwargs, backend=persist_backend) # Post, and be done with it! jb = jobboard.post("job-from-%s" % my_name, book=lb) logger.info('Posted: %s' % jb) # TODO(cbao): Move wait until into a seperate method. # TODO(lukesneeringer): ...and fix the logging. state = states.UNCLAIMED print('Job status: %s' % state) while state != states.COMPLETE: if (jb.state != state): state = jb.state print('Job status: %s' % state) time.sleep(1) return jb
def test_flow_name_and_uuid(self): fd = logbook.FlowDetail(name='test-fd', uuid='aaaa') s = storage.Storage(flow_detail=fd) self.assertEquals(s.flow_name, 'test-fd') self.assertEquals(s.flow_uuid, 'aaaa')
def _unformat_flow_detail(uuid, fd_data): fd = logbook.FlowDetail(name=fd_data['name'], uuid=uuid) fd.state = fd_data.get('state') fd.meta = fd_data.get('meta') return fd
def test_flow_name_and_uuid(self): flow_detail = logbook.FlowDetail(name='test-fd', uuid='aaaa') s = self._get_storage(flow_detail) self.assertEqual(s.flow_name, 'test-fd') self.assertEqual(s.flow_uuid, 'aaaa')