Exemplo n.º 1
0
 def test_service_disabled_on_create_based_on_flag(self):
     self.flags(enable_new_services=False)
     host = 'foo'
     binary = 'karbor-fake'
     app = service.Service.create(host=host, binary=binary)
     app.start()
     app.stop()
     ref = db.service_get(context.get_admin_context(), app.service_id)
     db.service_destroy(context.get_admin_context(), app.service_id)
     self.assertTrue(ref['disabled'])
Exemplo n.º 2
0
 def test_service_disabled_on_create_based_on_flag(self):
     self.flags(enable_new_services=False)
     host = 'foo'
     binary = 'karbor-fake'
     app = service.Service.create(host=host, binary=binary)
     app.start()
     app.stop()
     ref = db.service_get(context.get_admin_context(), app.service_id)
     db.service_destroy(context.get_admin_context(), app.service_id)
     self.assertTrue(ref['disabled'])
Exemplo n.º 3
0
 def kill(self):
     """Destroy the service object in the datastore."""
     self.stop()
     try:
         db.service_destroy(context.get_admin_context(), self.service_id)
     except exception.NotFound:
         LOG.warning(_LW('Service killed that has no database entry'))
    def setUp(self):
        super(ThreadPoolExecutorTestCase, self).setUp()

        self._operation_manager = FakeOperationManager()
        self._executor = thread_pool_executor.ThreadPoolExecutor(
            self._operation_manager)
        self.context = context.get_admin_context()
Exemplo n.º 5
0
    def _restore_operations(self):
        limit = 100
        marker = None
        filters = {"service_id": self._service_id,
                   "state": [constants.OPERATION_STATE_REGISTERED,
                             constants.OPERATION_STATE_RUNNING]}
        columns_to_join = ['operation']
        ctxt = karbor_context.get_admin_context()
        resume_states = [constants.OPERATION_STATE_RUNNING, ]
        while True:
            states = objects.ScheduledOperationStateList.get_by_filters(
                ctxt, filters, limit, marker, columns_to_join=columns_to_join)
            if not states:
                break

            for state in states:
                operation = state.operation
                if not operation.enabled:
                    continue

                resume = (state.state in resume_states)
                self.trigger_manager.register_operation(
                    operation.trigger_id, operation.id,
                    resume=resume, end_time_for_run=state.end_time_for_run)

                self.user_trust_manager.resume_operation(
                    operation.id, operation.user_id,
                    operation.project_id, state.trust_id)
            if len(states) < limit:
                break
            marker = states[-1].id
    def _run_operation(self, operation_id, param):

        self._update_operation_state(
            operation_id,
            {'state': constants.OPERATION_STATE_RUNNING})

        try:
            check_item = [self._CHECK_ITEMS['is_canceled']]
            if self._check_operation(operation_id, check_item):
                return

            try:
                operation = objects.ScheduledOperation.get_by_id(
                    context.get_admin_context(), operation_id)
            except Exception:
                LOG.exception(_LE("Run operation(%s), get operation failed"),
                              operation_id)
                return

            try:
                param['user_id'] = operation.user_id
                param['project_id'] = operation.project_id

                self._operation_manager.run_operation(
                    operation.operation_type,
                    operation.operation_definition,
                    param=param)
            except Exception:
                LOG.exception(_LE("Run operation(%s) failed"), operation_id)

        finally:
            self._update_operation_state(
                operation_id,
                {'state': constants.OPERATION_STATE_REGISTERED})
Exemplo n.º 7
0
    def test_resume(self, client):
        log = self._create_operation_log(self._operation_db.id)
        client.return_value = self._fake_karbor_client
        now = datetime.utcnow()
        param = {
            'operation_id': self._operation_db.id,
            'triggered_time': now,
            'expect_start_time': now,
            'window_time': 30,
            'run_type': constants.OPERATION_RUN_TYPE_RESUME,
            'user_id': self._operation_db.user_id,
            'project_id': self._operation_db.project_id
        }
        self._operation_class.run(self._operation_db.operation_definition,
                                  param=param)

        logs = objects.ScheduledOperationLogList.get_by_filters(
            context.get_admin_context(), {
                'state': constants.OPERATION_EXE_STATE_SUCCESS,
                'operation_id': self._operation_db.id
            }, 1, None, ['created_at'], ['desc'])

        self.assertTrue(logs is not None)
        log1 = logs.objects[0]
        self.assertTrue(log.id, log1.id)
Exemplo n.º 8
0
    def test_resume(self, client):
        log = self._create_operation_log(self._operation_db.id)
        client.return_value = self._fake_karbor_client
        now = datetime.utcnow()
        param = {
            'operation_id': self._operation_db.id,
            'triggered_time': now,
            'expect_start_time': now,
            'window_time': 30,
            'run_type': constants.OPERATION_RUN_TYPE_RESUME,
            'user_id': self._operation_db.user_id,
            'project_id': self._operation_db.project_id
        }
        self._operation_class.run(self._operation_db.operation_definition,
                                  param=param)

        logs = objects.ScheduledOperationLogList.get_by_filters(
            context.get_admin_context(),
            {'state': constants.OPERATION_EXE_STATE_SUCCESS,
             'operation_id': self._operation_db.id}, 1,
            None, ['created_at'], ['desc'])

        self.assertTrue(logs is not None)
        log1 = logs.objects[0]
        self.assertTrue(log.id, log1.id)
    def _run_operation(self, operation_id, param):

        self._update_operation_state(
            operation_id, {'state': constants.OPERATION_STATE_RUNNING})

        try:
            check_item = [self._CHECK_ITEMS['is_canceled']]
            if self._check_operation(operation_id, check_item):
                return

            try:
                operation = objects.ScheduledOperation.get_by_id(
                    context.get_admin_context(), operation_id)
            except Exception:
                LOG.exception("Run operation(%s), get operation failed",
                              operation_id)
                return

            try:
                param['user_id'] = operation.user_id
                param['project_id'] = operation.project_id

                self._operation_manager.run_operation(
                    operation.operation_type,
                    operation.operation_definition,
                    param=param)
            except Exception:
                LOG.exception("Run operation(%s) failed", operation_id)

        finally:
            self._update_operation_state(
                operation_id, {'state': constants.OPERATION_STATE_REGISTERED})
Exemplo n.º 10
0
    def setUp(self):
        super(ThreadPoolExecutorTestCase, self).setUp()

        with mock.patch.object(operation_manager.OperationManager, 'do_init'):
            self._executor = thread_pool_executor.ThreadPoolExecutor()
            self._executor._operation_manager = FakeOperationManager()
            self.context = context.get_admin_context()
    def _run_operation(self, operation_id, param):

        try:
            try:
                operation = objects.ScheduledOperation.get_by_id(
                    context.get_admin_context(), operation_id)
            except Exception:
                LOG.exception("Run operation(%s), get operation failed",
                              operation_id)
                return

            try:
                param['user_id'] = operation.user_id
                param['project_id'] = operation.project_id
                param['trigger_id'] = operation.trigger_id
                param['scheduled_operation_id'] = operation.id

                self._operation_manager.run_operation(
                    operation.operation_type,
                    operation.operation_definition,
                    param=param)
            except Exception:
                LOG.exception("Run operation(%s) failed", operation_id)

        finally:
            self._update_operation_state(
                operation_id, {'state': constants.OPERATION_STATE_REGISTERED})
Exemplo n.º 12
0
 def kill(self):
     """Destroy the service object in the datastore."""
     self.stop()
     try:
         db.service_destroy(context.get_admin_context(), self.service_id)
     except exception.NotFound:
         LOG.warning('Service killed that has no database entry')
    def test_execute(self, client):
        client.return_value = self._fake_karbor_client
        self._fake_karbor_client.create_all_check_points()
        now = datetime.utcnow()
        param = {
            'operation_id': self._operation_db.id,
            'triggered_time': now,
            'expect_start_time': now,
            'window_time': 30,
            'run_type': constants.OPERATION_RUN_TYPE_EXECUTE,
            'user_id': self._operation_db.user_id,
            'project_id': self._operation_db.project_id
        }
        self._operation.run(self._operation_db.operation_definition,
                            param=param)

        logs = objects.ScheduledOperationLogList.get_by_filters(
            context.get_admin_context(), {
                'state': constants.OPERATION_EXE_DURATION_STATE_SUCCESS,
                'operation_id': self._operation_db.id
            }, 1, None, ['created_at'], ['desc'])
        self.assertIsNotNone(logs)
        log = logs.objects[0]
        self.assertTrue(now, log.triggered_time)
        checkpoints = self._fake_karbor_client.checkpoints.list("123")
        self.assertEqual(2, len(checkpoints))
    def test_get_state_and_operation(self):
        ctx = context.get_admin_context()
        service, trigger, operation, state = FakeEnv(ctx).do_init()

        state_obj = self.State_Class.get_by_operation_id(self.context, operation.id, ["operation"])

        self.assertEqual(operation.id, state_obj.operation.id)
    def setUp(self):
        super(ThreadPoolExecutorTestCase, self).setUp()

        with mock.patch.object(operation_manager.OperationManager, 'do_init'):
            self._executor = thread_pool_executor.ThreadPoolExecutor()
            self._executor._operation_manager = FakeOperationManager()
            self.context = context.get_admin_context()
Exemplo n.º 16
0
    def _trigger_execution_delete(cls, execution_id=None, trigger_id=None):
        if execution_id is None and trigger_id is None:
            raise exception.InvalidParameterValue('supply at least one id')

        ctxt = karbor_context.get_admin_context()
        num_deleted = db.trigger_execution_delete(ctxt, execution_id,
                                                  trigger_id)
        return num_deleted > 0
Exemplo n.º 17
0
    def test_get_state_and_operation(self):
        ctx = context.get_admin_context()
        service, trigger, operation, state = FakeEnv(ctx).do_init()

        state_obj = self.State_Class.get_by_operation_id(
            self.context, operation.id, ['operation'])

        self.assertEqual(operation.id, state_obj.operation.id)
Exemplo n.º 18
0
 def setUp(self):
     super(ReservationDbTestCase, self).setUp()
     self.ctxt = context.get_admin_context()
     self.project_id = "586cc6ce-e286-40bd-b2b5-dd32694d9944"
     self.resource = "volume_backups"
     self.in_use = 10
     self.reserved = 10
     self.until_refresh = 0
Exemplo n.º 19
0
 def _create_operation_log(self, operation_id):
     log_info = {
         'operation_id': operation_id,
         'state': constants.OPERATION_EXE_STATE_IN_PROGRESS,
     }
     log = objects.ScheduledOperationLog(context.get_admin_context(),
                                         **log_info)
     log.create()
     return log
Exemplo n.º 20
0
 def _trigger_execution_new(cls, trigger_id, time):
     # Find the first time.
     # We don't known when using this trigger first time.
     ctxt = karbor_context.get_admin_context()
     try:
         db.trigger_execution_create(ctxt, trigger_id, time)
         return True
     except Exception:
         return False
Exemplo n.º 21
0
 def _create_operation_log(self, operation_id):
     log_info = {
         'operation_id': operation_id,
         'state': constants.OPERATION_EXE_STATE_IN_PROGRESS,
     }
     log = objects.ScheduledOperationLog(context.get_admin_context(),
                                         **log_info)
     log.create()
     return log
Exemplo n.º 22
0
 def _delete_oldest_operation_log(self, operation_id):
     # delete the oldest logs to keep the number of logs
     # in a reasonable range
     try:
         objects.ScheduledOperationLog.destroy_oldest(
             context.get_admin_context(), operation_id,
             CONF.retained_operation_log_number)
     except Exception:
         pass
Exemplo n.º 23
0
 def _delete_oldest_operation_log(cls, operation_id):
     # delete the oldest logs to keep the number of logs
     # in a reasonable range
     try:
         objects.ScheduledOperationLog.destroy_oldest(
             context.get_admin_context(), operation_id, CONF.retained_operation_log_number
         )
     except Exception:
         pass
Exemplo n.º 24
0
    def _get_operation_log(cls, operation_id, operation_state):
        try:
            logs = objects.ScheduledOperationLogList.get_by_filters(
                context.get_admin_context(), {"state": operation_state, "operation_id": operation_id}, limit=2
            )

            return logs.objects
        except Exception:
            pass
Exemplo n.º 25
0
    def _create_karbor_client(cls, user_id, project_id):
        token = user_trust_manager.UserTrustManager().get_token(user_id, project_id)
        if not token:
            return None
        ctx = context.get_admin_context()
        ctx.auth_token = token
        ctx.project_id = project_id

        karbor_url = cls.KARBOR_ENDPOINT.replace("$(tenant_id)s", project_id)
        return karbor_client.create(ctx, endpoint=karbor_url)
Exemplo n.º 26
0
    def _create_karbor_client(self, user_id, project_id):
        token = self._user_trust_manager.get_token(user_id, project_id)
        if not token:
            return None
        ctx = context.get_admin_context()
        ctx.auth_token = token
        ctx.project_id = project_id

        karbor_url = self.karbor_endpoint % {"project_id": project_id}
        return karbor_client.create(ctx, endpoint=karbor_url)
Exemplo n.º 27
0
    def setUp(self):
        super(OperationEngineManagerTestCase, self).setUp()

        self.manager = service_manager.OperationEngineManager()
        self.manager._service_id = 0
        self.manager._trigger_manager = FakeTriggerManager()
        self.manager._user_trust_manager = FakeUserTrustManager()

        self.ctxt = context.get_admin_context()
        self._trigger = self._create_one_trigger()
        self._operation = self._create_scheduled_operation(self._trigger.id)
Exemplo n.º 28
0
    def _create_karbor_client(cls, user_id, project_id):
        token = user_trust_manager.UserTrustManager().get_token(
            user_id, project_id)
        if not token:
            return None
        ctx = context.get_admin_context()
        ctx.auth_token = token
        ctx.project_id = project_id

        karbor_url = cls.KARBOR_ENDPOINT.replace("$(tenant_id)s", project_id)
        return karbor_client.create(ctx, endpoint=karbor_url)
Exemplo n.º 29
0
    def setUp(self):
        super(GreenThreadExecutorTestCase, self).setUp()

        with mock.patch.object(operation_manager.OperationManager, 'do_init'):
            self._executor = green_thread_executor.GreenThreadExecutor()
            self._executor._operation_manager = FakeOperationManager()
            self.context = context.get_admin_context()

        operation = self._create_operation()
        self._create_operation_state(operation.id, 0)
        self._op_id = operation.id
Exemplo n.º 30
0
    def setUp(self):
        super(OperationEngineManagerTestCase, self).setUp()

        self.manager = service_manager.OperationEngineManager()
        self.manager._service_id = 0
        self.manager._trigger_manager = FakeTriggerManager()
        self.manager._user_trust_manager = FakeUserTrustManager()

        self.ctxt = context.get_admin_context()
        self._trigger = self._create_one_trigger()
        self._operation = self._create_scheduled_operation(self._trigger.id)
Exemplo n.º 31
0
    def setUp(self):
        super(GreenThreadExecutorTestCase, self).setUp()

        with mock.patch.object(operation_manager.OperationManager, 'do_init'):
            self._executor = green_thread_executor.GreenThreadExecutor()
            self._executor._operation_manager = FakeOperationManager()
            self.context = context.get_admin_context()

        operation = self._create_operation()
        self._create_operation_state(operation.id, 0)
        self._op_id = operation.id
    def setUp(self):
        super(GreenThreadExecutorTestCase, self).setUp()

        self._operation_manager = FakeOperationManager()
        self._executor = green_thread_executor.GreenThreadExecutor(
            self._operation_manager)
        self.context = context.get_admin_context()

        operation = self._create_operation()
        self._create_operation_state(operation.id, 0)
        self._op_id = operation.id
Exemplo n.º 33
0
    def _update_operation_state(self, operation_id, updates):

        ctxt = context.get_admin_context()
        try:
            state_ref = objects.ScheduledOperationState.get_by_operation_id(ctxt, operation_id)
            for item, value in updates.items():
                setattr(state_ref, item, value)
            state_ref.save()
        except Exception:
            LOG.exception(_LE("Execute operation(%s), update state failed"), operation_id)
            return False
        return True
Exemplo n.º 34
0
    def _get_operation_log(self, operation_id, operation_state):
        try:
            logs = objects.ScheduledOperationLogList.get_by_filters(
                context.get_admin_context(), {
                    'state': operation_state,
                    'operation_id': operation_id
                },
                limit=2)

            return logs.objects
        except Exception:
            pass
Exemplo n.º 35
0
    def purge(self, age_in_days):
        """Purge deleted rows older than a given age from karbor tables."""
        age_in_days = int(age_in_days)
        if age_in_days <= 0:
            print(_("Must supply a positive, non-zero value for age"))
            sys.exit(1)
        ctxt = context.get_admin_context()

        try:
            db.purge_deleted_rows(ctxt, age_in_days)
        except Exception as e:
            print(_("Purge command failed, check karbor-manage "
                    "logs for more details. %s") % e)
            sys.exit(1)
    def _update_operation_state(self, operation_id, updates):

        ctxt = context.get_admin_context()
        try:
            state_ref = objects.ScheduledOperationState.get_by_operation_id(
                ctxt, operation_id)
            for item, value in updates.items():
                setattr(state_ref, item, value)
            state_ref.save()
        except Exception:
            LOG.exception("Execute operation(%s), update state failed",
                          operation_id)
            return False
        return True
Exemplo n.º 37
0
    def report_state(self):
        """Update the state of this service in the datastore."""
        if not self.manager.is_working():
            # NOTE(dulek): If manager reports a problem we're not sending
            # heartbeats - to indicate that service is actually down.
            LOG.error(
                'Manager for service %(binary)s %(host)s is '
                'reporting problems, not sending heartbeat. '
                'Service will appear "down".', {
                    'binary': self.binary,
                    'host': self.host
                })
            return

        ctxt = context.get_admin_context()
        state_catalog = {}
        try:
            try:
                service_ref = db.service_get(ctxt, self.service_id)
            except exception.NotFound:
                LOG.debug('The service database object disappeared, '
                          'recreating it.')
                self._create_service_ref(ctxt)
                service_ref = db.service_get(ctxt, self.service_id)

            state_catalog['report_count'] = service_ref['report_count'] + 1

            db.service_update(ctxt, self.service_id, state_catalog)

            # TODO(termie): make this pattern be more elegant.
            if getattr(self, 'model_disconnected', False):
                self.model_disconnected = False
                LOG.error('Recovered model server connection!')

        except db_exc.DBConnectionError:
            if not getattr(self, 'model_disconnected', False):
                self.model_disconnected = True
                LOG.exception('model server went away')

        # NOTE(jsbryant) Other DB errors can happen in HA configurations.
        # such errors shouldn't kill this thread, so we handle them here.
        except db_exc.DBError:
            if not getattr(self, 'model_disconnected', False):
                self.model_disconnected = True
                LOG.exception('DBError encountered: ')

        except Exception:
            if not getattr(self, 'model_disconnected', False):
                self.model_disconnected = True
                LOG.exception('Exception encountered: ')
Exemplo n.º 38
0
    def report_state(self):
        """Update the state of this service in the datastore."""
        if not self.manager.is_working():
            # NOTE(dulek): If manager reports a problem we're not sending
            # heartbeats - to indicate that service is actually down.
            LOG.error(_LE('Manager for service %(binary)s %(host)s is '
                          'reporting problems, not sending heartbeat. '
                          'Service will appear "down".'),
                      {'binary': self.binary,
                       'host': self.host})
            return

        ctxt = context.get_admin_context()
        state_catalog = {}
        try:
            try:
                service_ref = db.service_get(ctxt, self.service_id)
            except exception.NotFound:
                LOG.debug('The service database object disappeared, '
                          'recreating it.')
                self._create_service_ref(ctxt)
                service_ref = db.service_get(ctxt, self.service_id)

            state_catalog['report_count'] = service_ref['report_count'] + 1

            db.service_update(ctxt,
                              self.service_id, state_catalog)

            # TODO(termie): make this pattern be more elegant.
            if getattr(self, 'model_disconnected', False):
                self.model_disconnected = False
                LOG.error(_LE('Recovered model server connection!'))

        except db_exc.DBConnectionError:
            if not getattr(self, 'model_disconnected', False):
                self.model_disconnected = True
                LOG.exception(_LE('model server went away'))

        # NOTE(jsbryant) Other DB errors can happen in HA configurations.
        # such errors shouldn't kill this thread, so we handle them here.
        except db_exc.DBError:
            if not getattr(self, 'model_disconnected', False):
                self.model_disconnected = True
                LOG.exception(_LE('DBError encountered: '))

        except Exception:
            if not getattr(self, 'model_disconnected', False):
                self.model_disconnected = True
                LOG.exception(_LE('Exception encountered: '))
Exemplo n.º 39
0
    def purge(self, age_in_days):
        """Purge deleted rows older than a given age from karbor tables."""
        age_in_days = int(age_in_days)
        if age_in_days <= 0:
            print(_("Must supply a positive, non-zero value for age"))
            sys.exit(1)
        ctxt = context.get_admin_context()

        try:
            db.purge_deleted_rows(ctxt, age_in_days)
        except Exception as e:
            print(
                _("Purge command failed, check karbor-manage "
                  "logs for more details. %s") % e)
            sys.exit(1)
Exemplo n.º 40
0
    def test_create_client(self, get_service_endpoint):
        ctx = context.get_admin_context()
        ctx.project_id = '123'

        cfg.CONF.set_default('version', '1', 'karbor_client')

        karbor_url = "http://127.0.0.1:9090"
        sc = karbor_client.create(ctx, endpoint=karbor_url)
        self.assertEqual(karbor_url, sc.http_client.endpoint)

        karbor_url = "http://127.0.0.1:9090/$(project_id)s"
        get_service_endpoint.return_value = karbor_url
        endpoint = karbor_url.replace("$(project_id)s", ctx.project_id)
        sc = karbor_client.create(ctx)
        self.assertEqual(endpoint, sc.http_client.endpoint)
Exemplo n.º 41
0
    def _restore_triggers(self):
        limit = 100
        marker = None
        filters = {}
        ctxt = karbor_context.get_admin_context()
        while True:
            triggers = objects.TriggerList.get_by_filters(ctxt, filters, limit, marker)
            if not triggers:
                break

            for trigger in triggers:
                self._trigger_manager.add_trigger(trigger.id, trigger.type, trigger.properties)
            if len(triggers) < limit:
                break
            marker = triggers[-1].id
Exemplo n.º 42
0
    def check_operation_definition(self, operation_definition):
        provider_id = operation_definition.get("provider_id")
        if not provider_id or not uuidutils.is_uuid_like(provider_id):
            reason = _("Provider_id is invalid")
            raise exception.InvalidOperationDefinition(reason=reason)

        plan_id = operation_definition.get("plan_id")
        if not plan_id or not uuidutils.is_uuid_like(plan_id):
            reason = _("Plan_id is invalid")
            raise exception.InvalidOperationDefinition(reason=reason)

        plan = objects.Plan.get_by_id(context.get_admin_context(), plan_id)
        if provider_id != plan.provider_id:
            reason = _("Provider_id is conflict")
            raise exception.InvalidOperationDefinition(reason=reason)
Exemplo n.º 43
0
    def test_create_client(self, get_service_endpoint, do_init):
        ctx = context.get_admin_context()
        ctx.project_id = '123'

        cfg.CONF.set_default('version', '1', 'karbor_client')

        karbor_url = "http://127.0.0.1:9090"
        sc = karbor_client.create(ctx, endpoint=karbor_url)
        self.assertEqual(karbor_url, sc.http_client.endpoint)

        karbor_url = "http://127.0.0.1:9090/$(tenant_id)s"
        get_service_endpoint.return_value = karbor_url
        endpoint = karbor_url.replace("$(tenant_id)s", ctx.project_id)
        sc = karbor_client.create(ctx)
        self.assertEqual(endpoint, sc.http_client.endpoint)
Exemplo n.º 44
0
    def check_operation_definition(cls, operation_definition):
        provider_id = operation_definition.get("provider_id")
        if not provider_id or not uuidutils.is_uuid_like(provider_id):
            reason = _("Provider_id is invalid")
            raise exception.InvalidOperationDefinition(reason=reason)

        plan_id = operation_definition.get("plan_id")
        if not plan_id or not uuidutils.is_uuid_like(plan_id):
            reason = _("Plan_id is invalid")
            raise exception.InvalidOperationDefinition(reason=reason)

        plan = objects.Plan.get_by_id(context.get_admin_context(), plan_id)
        if provider_id != plan.provider_id:
            reason = _("Provider_id is invalid")
            raise exception.InvalidOperationDefinition(reason=reason)
Exemplo n.º 45
0
    def _restore_triggers(self):
        limit = 100
        marker = None
        filters = {}
        ctxt = karbor_context.get_admin_context()
        while True:
            triggers = objects.TriggerList.get_by_filters(
                ctxt, filters, limit, marker)
            if not triggers:
                break

            for trigger in triggers:
                self.trigger_manager.add_trigger(trigger.id, trigger.type,
                                                 trigger.properties)
            if len(triggers) < limit:
                break
            marker = triggers[-1].id
Exemplo n.º 46
0
    def list(self):
        """Show a list of all karbor services."""

        ctxt = context.get_admin_context()
        services = db.service_get_all(ctxt)
        print_format = "%-16s %-36s %-10s %-5s %-10s"
        print(
            print_format %
            (_('Binary'), _('Host'), _('Status'), _('State'), _('Updated At')))
        for svc in services:
            alive = utils.service_is_up(svc)
            art = ":-)" if alive else "XXX"
            status = 'enabled'
            if svc['disabled']:
                status = 'disabled'
            print(print_format % (svc['binary'], svc['host'].partition('.')[0],
                                  status, art, svc['updated_at']))
Exemplo n.º 47
0
 def _create_operation(self):
     operation_info = {
         'name': 'protect vm',
         'description': 'protect vm resource',
         'operation_type': 'protect',
         'user_id': '123',
         'project_id': '123',
         'trigger_id': '123',
         'operation_definition': {
             'provider_id': '123',
             'plan_id': '123'
         }
     }
     operation = objects.ScheduledOperation(context.get_admin_context(),
                                            **operation_info)
     operation.create()
     return operation
Exemplo n.º 48
0
 def _create_operation(self):
     operation_info = {
         'name': 'protect vm',
         'description': 'protect vm resource',
         'operation_type': 'protect',
         'user_id': '123',
         'project_id': '123',
         'trigger_id': '123',
         'operation_definition': {
             'provider_id': '123',
             'plan_id': '123'
         }
     }
     operation = objects.ScheduledOperation(context.get_admin_context(),
                                            **operation_info)
     operation.create()
     return operation
Exemplo n.º 49
0
    def start(self):
        version_string = version.version_string()
        LOG.info('Starting %(topic)s node (version %(version_string)s)',
                 {'topic': self.topic, 'version_string': version_string})
        self.model_disconnected = False
        ctxt = context.get_admin_context()
        try:
            service_ref = db.service_get_by_args(ctxt,
                                                 self.host,
                                                 self.binary)
            self.service_id = service_ref['id']
        except exception.NotFound:
            self._create_service_ref(ctxt)

        self.manager.init_host(service_id=self.service_id)

        LOG.debug("Creating RPC server for service %s", self.topic)

        target = messaging.Target(topic=self.topic, server=self.host)
        endpoints = [self.manager]
        endpoints.extend(self.manager.additional_endpoints)
        serializer = objects_base.KarborObjectSerializer()
        self.rpcserver = rpc.get_server(target, endpoints, serializer)
        self.rpcserver.start()

        self.manager.init_host_with_rpc()

        if self.report_interval:
            pulse = loopingcall.FixedIntervalLoopingCall(
                self.report_state)
            pulse.start(interval=self.report_interval,
                        initial_delay=self.report_interval)
            self.timers.append(pulse)

        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            periodic = loopingcall.FixedIntervalLoopingCall(
                self.periodic_tasks)
            periodic.start(interval=self.periodic_interval,
                           initial_delay=initial_delay)
            self.timers.append(periodic)
Exemplo n.º 50
0
    def start(self):
        version_string = version.version_string()
        LOG.info(_LI('Starting %(topic)s node (version %(version_string)s)'),
                 {'topic': self.topic, 'version_string': version_string})
        self.model_disconnected = False
        ctxt = context.get_admin_context()
        try:
            service_ref = db.service_get_by_args(ctxt,
                                                 self.host,
                                                 self.binary)
            self.service_id = service_ref['id']
        except exception.NotFound:
            self._create_service_ref(ctxt)

        self.manager.init_host(service_id=self.service_id)

        LOG.debug("Creating RPC server for service %s", self.topic)

        target = messaging.Target(topic=self.topic, server=self.host)
        endpoints = [self.manager]
        endpoints.extend(self.manager.additional_endpoints)
        serializer = objects_base.KarborObjectSerializer()
        self.rpcserver = rpc.get_server(target, endpoints, serializer)
        self.rpcserver.start()

        self.manager.init_host_with_rpc()

        if self.report_interval:
            pulse = loopingcall.FixedIntervalLoopingCall(
                self.report_state)
            pulse.start(interval=self.report_interval,
                        initial_delay=self.report_interval)
            self.timers.append(pulse)

        if self.periodic_interval:
            if self.periodic_fuzzy_delay:
                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
            else:
                initial_delay = None

            periodic = loopingcall.FixedIntervalLoopingCall(
                self.periodic_tasks)
            periodic.start(interval=self.periodic_interval,
                           initial_delay=initial_delay)
            self.timers.append(periodic)
Exemplo n.º 51
0
    def _create_operation_log(cls, param, updated_log_info=None):
        log_info = {
            "operation_id": param["operation_id"],
            "expect_start_time": param["expect_start_time"],
            "triggered_time": param["triggered_time"],
            "actual_start_time": datetime.utcnow(),
            "state": constants.OPERATION_EXE_STATE_IN_PROGRESS,
        }
        if updated_log_info:
            log_info.update(updated_log_info)

        log_ref = objects.ScheduledOperationLog(context.get_admin_context(), **log_info)
        try:
            log_ref.create()
        except Exception:
            LOG.exception(_LE("Execute operation(%s), create log obj failed"), param["operation_id"])
            return
        return log_ref
Exemplo n.º 52
0
    def setUp(self):
        super(PurgeDeletedTest, self).setUp()
        self.context = context.get_admin_context()
        self.engine = db_api.get_engine()
        self.session = db_api.get_session()
        self.conn = self.engine.connect()
        self.plans = sqlalchemyutils.get_table(
            self.engine, "plans")
        # The resources table has a FK of plans.id
        self.resources = sqlalchemyutils.get_table(
            self.engine, "resources")

        self.uuidstrs = []
        for unused in range(6):
            self.uuidstrs.append(uuid.uuid4().hex)
        # Add 6 rows to table
        for uuidstr in self.uuidstrs:
            ins_stmt = self.plans.insert().values(id=uuidstr)
            self.conn.execute(ins_stmt)
            ins_stmt = self.resources.insert().values(plan_id=uuidstr)
            self.conn.execute(ins_stmt)

        # Set 4 of them deleted, 2 are 60 days ago, 2 are 20 days ago
        old = timeutils.utcnow() - datetime.timedelta(days=20)
        older = timeutils.utcnow() - datetime.timedelta(days=60)
        make_plans_old = self.plans.update().where(
            self.plans.c.id.in_(self.uuidstrs[1:3])).values(
            deleted_at=old)
        make_plans_older = self.plans.update().where(
            self.plans.c.id.in_(self.uuidstrs[4:6])).values(
            deleted_at=older)
        make_resources_old = self.resources.update().where(
            self.resources.c.plan_id.in_(self.uuidstrs[1:3])).values(
            deleted_at=old)
        make_resources_older = self.resources.update().where(
            self.resources.c.plan_id.in_(self.uuidstrs[4:6])).values(
            deleted_at=older)

        self.conn.execute(make_plans_old)
        self.conn.execute(make_plans_older)
        self.conn.execute(make_resources_old)
        self.conn.execute(make_resources_older)
Exemplo n.º 53
0
    def list(self):
        """Show a list of all karbor services."""

        ctxt = context.get_admin_context()
        services = db.service_get_all(ctxt)
        print_format = "%-16s %-36s %-10s %-5s %-10s"
        print(print_format % (_('Binary'),
                              _('Host'),
                              _('Status'),
                              _('State'),
                              _('Updated At')))
        for svc in services:
            alive = utils.service_is_up(svc)
            art = ":-)" if alive else "XXX"
            status = 'enabled'
            if svc['disabled']:
                status = 'disabled'
            print(print_format % (svc['binary'], svc['host'].partition('.')[0],
                                  status, art,
                                  svc['updated_at']))
Exemplo n.º 54
0
    def test_run_execute(self):
        now = datetime.utcnow() - timedelta(hours=1)
        param = {
            'operation_id': self._operation_db.id,
            'triggered_time': now,
            'expect_start_time': now,
            'window_time': 30,
            'run_type': constants.OPERATION_RUN_TYPE_EXECUTE,
            'user_id': self._operation_db.user_id,
            'project_id': self._operation_db.project_id
        }
        self._operation_class.run(self._operation_db.operation_definition,
                                  param=param)

        logs = objects.ScheduledOperationLogList.get_by_filters(
            context.get_admin_context(),
            {'state': constants.OPERATION_EXE_STATE_DROPPED_OUT_OF_WINDOW,
             'operation_id': self._operation_db.id}, 1,
            None, ['created_at'], ['desc'])

        self.assertTrue(logs is not None)
        log = logs.objects[0]
        self.assertTrue(now, log.triggered_time)
Exemplo n.º 55
0
    def _run_operation(self, operation_id, param):

        self._update_operation_state(operation_id, {"state": constants.OPERATION_STATE_RUNNING})

        try:
            try:
                operation = objects.ScheduledOperation.get_by_id(context.get_admin_context(), operation_id)
            except Exception:
                LOG.exception(_LE("Run operation(%s), get operation failed"), operation_id)
                return

            try:
                param["user_id"] = operation.user_id
                param["project_id"] = operation.project_id

                self._operation_manager.run_operation(
                    operation.operation_type, operation.operation_definition, param=param
                )
            except Exception:
                LOG.exception(_LE("Run operation(%s) failed"), operation_id)

        finally:
            self._update_operation_state(operation_id, {"state": constants.OPERATION_STATE_REGISTERED})
Exemplo n.º 56
0
    def _restore_operations(self):
        limit = 100
        marker = None
        filters = {
            "service_id": self._service_id,
            "state": [
                constants.OPERATION_STATE_REGISTERED,
                constants.OPERATION_STATE_TRIGGERED,
                constants.OPERATION_STATE_RUNNING,
            ],
        }
        columns_to_join = ["operation"]
        ctxt = karbor_context.get_admin_context()
        resume_states = [constants.OPERATION_STATE_TRIGGERED, constants.OPERATION_STATE_RUNNING]
        while True:
            states = objects.ScheduledOperationStateList.get_by_filters(
                ctxt, filters, limit, marker, columns_to_join=columns_to_join
            )
            if not states:
                break

            for state in states:
                operation = state.operation
                if not operation.enabled:
                    continue

                resume = state.state in resume_states
                self._trigger_manager.register_operation(
                    operation.trigger_id, operation.id, resume=resume, end_time_for_run=state.end_time_for_run
                )

                self._user_trust_manager.resume_operation(
                    operation.id, operation.user_id, operation.project_id, state.trust_id
                )
            if len(states) < limit:
                break
            marker = states[-1].id
Exemplo n.º 57
0
 def setUp(self):
     super(RestoreDbTestCase, self).setUp()
     self.ctxt = context.get_admin_context()
Exemplo n.º 58
0
 def setUp(self):
     super(OperationLogTestCase, self).setUp()
     self.ctxt = context.get_admin_context()
Exemplo n.º 59
0
 def setUp(self):
     super(CheckpointRecordTestCase, self).setUp()
     self.ctxt = context.get_admin_context()
 def setUp(self):
     super(TestScheduledOperationStateList, self).setUp()
     self.context = context.get_admin_context()