def generate_uuid(self): backend = task_backend.get_backend() book = models.LogBook(r"{}_{}".format( self.name, datetime.datetime.now().strftime( xdatetime.FORMAT_WITH_SECOND_FOR_PATH))) with contextlib.closing(backend.get_connection()) as conn: conn.save_logbook(book) try: create_flow = create_flow_for_kvm self._engine = engines.load_from_factory( create_flow, backend=backend, book=book, engine='serial', factory_args=(self.name, self._id, self._debug)) self._book_uuid = book.uuid return { 'book_id': book.uuid, 'flow_id': self._engine.storage.flow_uuid } except Exception as e: _logger.error( r'TakeoverKVMEntrance generate_uuid failed {}'.format(e)) _logger.error('TakeoverKVMEntrance {}'.format( traceback.format_exc())) with contextlib.closing(backend.get_connection()) as conn: conn.destroy_logbook(book.uuid) raise e
def generate_uuid(self): backend = task_backend.get_backend() book = models.LogBook(r"{}_{}".format( self.name, datetime.datetime.now().strftime( xdatetime.FORMAT_WITH_SECOND_FOR_PATH))) with contextlib.closing(backend.get_connection()) as conn: conn.save_logbook(book) try: self._engine = engines.load_from_factory( self._flow_func, backend=backend, book=book, engine='serial', factory_args=(self.name, self.task_id)) self._book_uuid = book.uuid return { 'book_id': book.uuid, 'flow_id': self._engine.storage.flow_uuid } except Exception as e: _logger.error(r'generate_uuid failed {}'.format(e), exc_info=True) with contextlib.closing(backend.get_connection()) as conn: conn.destroy_logbook(book.uuid) raise e
def compile_flow(flow_factory, store=None, factory_args=None, factory_kwargs=None): """ Load an engine with the specified flow data and compile the flow object, returns a loaded engine. :param obj flow_factory: A function that returns a flow :param dict store: The store to post with the flow :param list factory_args: The args to pass to the flow factory during flow pickup time in the conductor :param dict factory_kwargs: The kwargs to pass to the flow factory during flow pickup time in the conductor :return obj engine: The loaded engine """ engine = engines.load_from_factory(flow_factory, factory_args=factory_args, factory_kwargs=factory_kwargs, store=store) engine.compile() engine.prepare() engine.validate() return engine
executor = futures.GreenThreadPoolExecutor(5) # Create/fetch a logbook that will track the workflows work. book = None flow_detail = None if all([book_id, flow_id]): with contextlib.closing(backend.get_connection()) as conn: try: book = conn.get_logbook(book_id) flow_detail = book.find(flow_id) except exc.NotFound: pass if book is None and flow_detail is None: book = p_utils.temporary_log_book(backend) engine = engines.load_from_factory(create_flow, backend=backend, book=book, engine='parallel', executor=executor) print("!! Your tracking id is: '%s+%s'" % (book.uuid, engine.storage.flow_uuid)) print("!! Please submit this on later runs for tracking purposes") else: # Attempt to load from a previously partially completed flow. engine = engines.load_from_detail(flow_detail, backend=backend, engine='parallel', executor=executor) # Make me my vm please! eu.print_wrapped('Running') engine.run() # How to use. #
engine_conf['executor'] = e_utils.GreenExecutor(5) # Create/fetch a logbook that will track the workflows work. book = None flow_detail = None if all([book_id, flow_id]): with contextlib.closing(backend.get_connection()) as conn: try: book = conn.get_logbook(book_id) flow_detail = book.find(flow_id) except exc.NotFound: pass if book is None and flow_detail is None: book = p_utils.temporary_log_book(backend) engine = engines.load_from_factory(create_flow, backend=backend, book=book, engine_conf=engine_conf) print("!! Your tracking id is: '%s+%s'" % (book.uuid, engine.storage.flow_uuid)) print("!! Please submit this on later runs for tracking purposes") else: # Attempt to load from a previously potentially partially completed flow. engine = engines.load_from_detail(flow_detail, backend=backend, engine_conf=engine_conf) # Make me my vm please! print_wrapped('Running') engine.run() # How to use. #
} # Create/fetch a logbook that will track the workflows work. book = None flow_detail = None if all([book_id, flow_id]): with contextlib.closing(backend.get_connection()) as conn: try: book = conn.get_logbook(book_id) flow_detail = book.find(flow_id) except exc.NotFound: pass if book is None and flow_detail is None: book = p_utils.temporary_log_book(backend) engine = engines.load_from_factory(create_flow, backend=backend, book=book, engine_conf=engine_conf) print("!! Your tracking id is: '%s+%s'" % (book.uuid, engine.storage.flow_uuid)) print("!! Please submit this on later runs for tracking purposes") else: # Attempt to load from a previously potentially partially completed flow. engine = engines.load_from_detail(flow_detail, backend=backend, engine_conf=engine_conf) # Make me my vm please! print_wrapped('Running') engine.run() # How to use.
def main(): executor = futures.GreenThreadPoolExecutor(5) engine = engines.load_from_factory(create_flow, engine='parallel', executor=executor) engine.run()
# Create/fetch a logbook that will track the workflows work. book = None flow_detail = None if all([book_id, flow_id]): with contextlib.closing(backend.get_connection()) as conn: try: book = conn.get_logbook(book_id) flow_detail = book.find(flow_id) except exc.NotFound: pass if book is None and flow_detail is None: book = p_utils.temporary_log_book(backend) engine = engines.load_from_factory(create_flow, backend=backend, book=book, engine='parallel', executor=executor) print("!! Your tracking id is: '%s+%s'" % (book.uuid, engine.storage.flow_uuid)) print("!! Please submit this on later runs for tracking purposes") else: # Attempt to load from a previously partially completed flow. engine = engines.load_from_detail(flow_detail, backend=backend, engine='parallel', executor=executor) # Make me my vm please! eu.print_wrapped('Running') engine.run()
def generate_and_save(self, host_object, plan_id, is_auto=True, restore_time=None, restore_host_snapshot_id=None): # 自动调用时候需要分析 if restore_time is None and restore_host_snapshot_id is None: info = AcquireRestoreInfo(plan_id).get_info(host_object)[0] if info['restore_time'] == -1: xlogging.raise_and_logging_error( '没有可用的还原时间', r'not restore time'.format(self.name), http_status.HTTP_501_NOT_IMPLEMENTED) if info['snapshot_id'] == -1: xlogging.raise_and_logging_error( '客户端没有备份数据', r'host:{}:{},not snapshot find'.format( info['host_name'], info['host_ident']), http_status.HTTP_501_NOT_IMPLEMENTED) restore_time = info['restore_time'] restore_host_snapshot_id = info['snapshot_id'] task_object = self.generate_task_object(host_object.ident, is_auto, plan_id) self.name += r'{}'.format(task_object.id) self._task_id = task_object.id self._plan_id = plan_id try: backend = task_backend.get_backend() book = models.LogBook(r"{}_{}".format( self.name, datetime.datetime.now().strftime( xdatetime.FORMAT_WITH_SECOND_FOR_PATH))) with contextlib.closing(backend.get_connection()) as conn: conn.save_logbook(book) except Exception as e: _logger.error(r'get_backend failed {}'.format(e), exc_info=True) task_object.finish_datetime = timezone.now() task_object.save(update_fields=['finish_datetime']) raise e try: self._engine = engines.load_from_factory( create_flow, backend=backend, book=book, engine='serial', factory_args=(self.name, self._task_id, self._plan_id, book.uuid, host_object.ident, restore_time, restore_host_snapshot_id)) self._book_uuid = book.uuid task_object.task_uuid = json.dumps({ 'book_id': book.uuid, 'flow_id': self._engine.storage.flow_uuid }) task_object.save(update_fields=['task_uuid']) return task_object except Exception as e: _logger.error(r'generate_uuid failed {}'.format(e), exc_info=True) with contextlib.closing(backend.get_connection()) as conn: conn.destroy_logbook(book.uuid) raise e