示例#1
0
 def run(self):
     with _book_id_locker:
         if self._book_uuid in _book_ids:
             # 重复运行
             _logger.warning(
                 'FileSyncFlowEntrance book uuid:{} already run'.format(
                     self._book_uuid))
             return
         else:
             _book_ids.append(self._book_uuid)
     _logger.info('FileSyncFlowEntrance _book_ids:{}'.format(_book_ids))
     try:
         with logging_listener.DynamicLoggingListener(self._engine):
             self._engine.run()
     except Exception as e:
         _logger.error(
             r'FileSyncFlowEntrance run engine {} failed {}'.format(
                 self.name, e),
             exc_info=True)
     finally:
         with contextlib.closing(
                 task_backend.get_backend().get_connection()) as conn:
             conn.destroy_logbook(self._book_uuid)
         with _book_id_locker:
             _book_ids.remove(self._book_uuid)
     self._engine = None
示例#2
0
    def generate_uuid(self):
        backend = task_backend.get_backend()
        book = models.LogBook(r"{}_{}".format(
            self.name,
            datetime.datetime.now().strftime(
                xdatetime.FORMAT_WITH_SECOND_FOR_PATH)))
        with contextlib.closing(backend.get_connection()) as conn:
            conn.save_logbook(book)

        try:
            self._engine = engines.load_from_factory(
                self._flow_func,
                backend=backend,
                book=book,
                engine='serial',
                factory_args=(self.name, self.task_id))

            self._book_uuid = book.uuid
            return {
                'book_id': book.uuid,
                'flow_id': self._engine.storage.flow_uuid
            }
        except Exception as e:
            _logger.error(r'generate_uuid failed {}'.format(e), exc_info=True)
            with contextlib.closing(backend.get_connection()) as conn:
                conn.destroy_logbook(book.uuid)
            raise e
示例#3
0
    def generate_uuid(self):
        backend = task_backend.get_backend()
        book = models.LogBook(r"{}_{}".format(
            self.name,
            datetime.datetime.now().strftime(
                xdatetime.FORMAT_WITH_SECOND_FOR_PATH)))
        with contextlib.closing(backend.get_connection()) as conn:
            conn.save_logbook(book)

        try:
            create_flow = create_flow_for_kvm
            self._engine = engines.load_from_factory(
                create_flow,
                backend=backend,
                book=book,
                engine='serial',
                factory_args=(self.name, self._id, self._debug))

            self._book_uuid = book.uuid
            return {
                'book_id': book.uuid,
                'flow_id': self._engine.storage.flow_uuid
            }
        except Exception as e:
            _logger.error(
                r'TakeoverKVMEntrance generate_uuid failed {}'.format(e))
            _logger.error('TakeoverKVMEntrance {}'.format(
                traceback.format_exc()))
            with contextlib.closing(backend.get_connection()) as conn:
                conn.destroy_logbook(book.uuid)
            raise e
示例#4
0
 def load_from_uuid(self, task_uuid):
     backend = task_backend.get_backend()
     with contextlib.closing(backend.get_connection()) as conn:
         book = conn.get_logbook(task_uuid['book_id'])
         flow_detail = book.find(task_uuid['flow_id'])
     self._engine = engines.load_from_detail(flow_detail, backend=backend, engine='serial')
     self._book_uuid = book.uuid
     self.name += r' load exist uuid {} {}'.format(task_uuid['book_id'], task_uuid['flow_id'])
示例#5
0
 def run(self):
     # _logger.info(r'WebAnalyze {} running'.format(self.name))
     try:
         with logging_listener.DynamicLoggingListener(self._engine):
             self._engine.run()
     except Exception as e:
         _logger.error(r'WebAnalyze run engine {} failed {}'.format(self.name, e), exc_info=True)
     finally:
         with contextlib.closing(task_backend.get_backend().get_connection()) as conn:
             conn.destroy_logbook(self._book_uuid)
         self.finish_strategy(self._policy_id)
     # _logger.info(r'WebAnalyze {} stopped'.format(self.name))
     self._engine = None
示例#6
0
 def run(self):
     try:
         with logging_listener.DynamicLoggingListener(self._engine):
             self._engine.run()
     except Exception as e:
         _logger.error(
             r'TakeoverKVMEntrance run engine {} failed {}'.format(
                 self.name, e))
         _logger.error('TakeoverKVMEntrance {}'.format(
             traceback.format_exc()))
     finally:
         with contextlib.closing(
                 task_backend.get_backend().get_connection()) as conn:
             conn.destroy_logbook(self._book_uuid)
     self._engine = None
示例#7
0
    def generate_and_save(self,
                          host_object,
                          plan_id,
                          is_auto=True,
                          restore_time=None,
                          restore_host_snapshot_id=None):
        # 自动调用时候需要分析
        if restore_time is None and restore_host_snapshot_id is None:
            info = AcquireRestoreInfo(plan_id).get_info(host_object)[0]
            if info['restore_time'] == -1:
                xlogging.raise_and_logging_error(
                    '没有可用的还原时间', r'not restore time'.format(self.name),
                    http_status.HTTP_501_NOT_IMPLEMENTED)
            if info['snapshot_id'] == -1:
                xlogging.raise_and_logging_error(
                    '客户端没有备份数据', r'host:{}:{},not snapshot find'.format(
                        info['host_name'], info['host_ident']),
                    http_status.HTTP_501_NOT_IMPLEMENTED)
            restore_time = info['restore_time']
            restore_host_snapshot_id = info['snapshot_id']

        task_object = self.generate_task_object(host_object.ident, is_auto,
                                                plan_id)
        self.name += r'{}'.format(task_object.id)
        self._task_id = task_object.id
        self._plan_id = plan_id

        try:
            backend = task_backend.get_backend()
            book = models.LogBook(r"{}_{}".format(
                self.name,
                datetime.datetime.now().strftime(
                    xdatetime.FORMAT_WITH_SECOND_FOR_PATH)))
            with contextlib.closing(backend.get_connection()) as conn:
                conn.save_logbook(book)
        except Exception as e:
            _logger.error(r'get_backend failed {}'.format(e), exc_info=True)
            task_object.finish_datetime = timezone.now()
            task_object.save(update_fields=['finish_datetime'])
            raise e

        try:
            self._engine = engines.load_from_factory(
                create_flow,
                backend=backend,
                book=book,
                engine='serial',
                factory_args=(self.name, self._task_id, self._plan_id,
                              book.uuid, host_object.ident, restore_time,
                              restore_host_snapshot_id))

            self._book_uuid = book.uuid

            task_object.task_uuid = json.dumps({
                'book_id':
                book.uuid,
                'flow_id':
                self._engine.storage.flow_uuid
            })
            task_object.save(update_fields=['task_uuid'])
            return task_object
        except Exception as e:
            _logger.error(r'generate_uuid failed {}'.format(e), exc_info=True)
            with contextlib.closing(backend.get_connection()) as conn:
                conn.destroy_logbook(book.uuid)
            raise e