def test_with_alternate_context(self): context1 = watcher_context.RequestContext('foo', 'foo') context2 = watcher_context.RequestContext('bar', project_id='alternate') obj = MyObj.query(context1) obj.update_test(context2) self.assertEqual('alternate-context', obj.bar) self.assertRemotes()
def setUp(self): super(TestCase, self).setUp() self.useFixture(conf_fixture.ConfReloadFixture()) self.policy = self.useFixture(policy_fixture.PolicyFixture()) self.messaging_conf = self.useFixture(conffixture.ConfFixture(CONF)) self.messaging_conf.transport_driver = 'fake' cfg.CONF.set_override("auth_type", "admin_token", group='keystone_authtoken', enforce_type=True) cfg.CONF.set_override("auth_uri", "http://127.0.0.1/identity", group='keystone_authtoken', enforce_type=True) app_config_path = os.path.join(os.path.dirname(__file__), 'config.py') self.app = testing.load_test_app(app_config_path) self.token_info = { 'token': { 'project': { 'id': 'fake_project' }, 'user': { 'id': 'fake_user' } } } self.context = watcher_context.RequestContext( auth_token_info=self.token_info, project_id='fake_project', user_id='fake_user') self.policy = self.useFixture(policy_fixture.PolicyFixture()) def make_context(*args, **kwargs): # If context hasn't been constructed with token_info if not kwargs.get('auth_token_info'): kwargs['auth_token_info'] = copy.deepcopy(self.token_info) if not kwargs.get('project_id'): kwargs['project_id'] = 'fake_project' if not kwargs.get('user_id'): kwargs['user_id'] = 'fake_user' context = watcher_context.RequestContext(*args, **kwargs) return watcher_context.RequestContext.from_dict(context.to_dict()) p = mock.patch.object(watcher_context, 'make_context', side_effect=make_context) self.mock_make_context = p.start() self.addCleanup(p.stop) self.useFixture(conf_fixture.ConfFixture(cfg.CONF)) self._reset_singletons() self._base_test_obj_backup = copy.copy( objects_base.WatcherObject._obj_classes) self.addCleanup(self._restore_obj_registry) self.addCleanup(self._reset_singletons)
def setUp(self): super(TestPurgeCommand, self).setUp() self.cmd = purge.PurgeCommand() token_info = { 'token': { 'project': { 'id': 'fake_project' }, 'user': { 'id': 'fake_user' } } } self.context = watcher_context.RequestContext( auth_token_info=token_info, project_id='fake_project', user_id='fake_user', show_deleted=True, ) self.fake_today = '2016-02-24T09:52:05.219414+00:00' self.expired_date = '2016-01-24T09:52:05.219414+00:00' self.m_input = mock.Mock() p = mock.patch("watcher.db.purge.input", self.m_input) self.m_input.return_value = 'y' p.start() self.addCleanup(p.stop) self._id_generator = None self._data_setup()
def __init__(self, messaging): super(ContinuousAuditHandler, self).__init__(messaging) self._scheduler = None self.jobs = [] self._start() self.context_show_deleted = context.RequestContext(is_admin=True, show_deleted=True)
def launch_audits_periodically(self): audit_context = context.RequestContext(is_admin=True) audit_filters = { 'audit_type': objects.audit.AuditType.CONTINUOUS.value, 'state__in': (objects.audit.State.PENDING, objects.audit.State.ONGOING, objects.audit.State.SUCCEEDED) } audits = objects.Audit.list(audit_context, filters=audit_filters, eager=True) scheduler_job_args = [ job.args for job in self.scheduler.get_jobs() if job.name == 'execute_audit' ] for audit in audits: if audit.uuid not in [arg[0].uuid for arg in scheduler_job_args]: job = self.scheduler.add_job( self.execute_audit, 'interval', args=[audit, audit_context], seconds=audit.interval, name='execute_audit', next_run_time=datetime.datetime.now()) self.jobs.append({audit.uuid: job})
def setUp(self): super(AdminRoleTest, self).setUp() token_info = { 'token': { 'project': { 'id': 'admin' }, 'user': { 'id': 'admin' } } } self.context = watcher_context.RequestContext( auth_token_info=token_info, project_id='admin', user_id='admin') def make_context(*args, **kwargs): # If context hasn't been constructed with token_info if not kwargs.get('auth_token_info'): kwargs['auth_token_info'] = copy.deepcopy(token_info) if not kwargs.get('project_id'): kwargs['project_id'] = 'admin' if not kwargs.get('user_id'): kwargs['user_id'] = 'admin' if not kwargs.get('roles'): kwargs['roles'] = ['admin'] context = watcher_context.RequestContext(*args, **kwargs) return watcher_context.RequestContext.from_dict(context.to_dict()) p = mock.patch.object(watcher_context, 'make_context', side_effect=make_context) self.mock_make_context = p.start() self.addCleanup(p.stop)
def __init__(self): super(ContinuousAuditHandler, self).__init__() # scheduler for executing audits self._audit_scheduler = None # scheduler for a periodic task to launch audit self._period_scheduler = None self.context_show_deleted = context.RequestContext(is_admin=True, show_deleted=True)
def launch_audits_periodically(self): audit_context = context.RequestContext(is_admin=True) audit_filters = { 'audit_type': objects.audit.AuditType.CONTINUOUS.value, 'state__in': (objects.audit.State.PENDING, objects.audit.State.ONGOING, objects.audit.State.SUCCEEDED) } audits = objects.Audit.list( audit_context, filters=audit_filters, eager=True) scheduler_job_args = [ (job.args[0].uuid, job) for job in self.scheduler.get_jobs() if job.name == 'execute_audit'] scheduler_jobs = dict(scheduler_job_args) # if audit isn't in active states, audit's job should be removed for job in scheduler_jobs.values(): if self._is_audit_inactive(job.args[0]): scheduler_jobs.pop(job.args[0].uuid) for audit in audits: existing_job = scheduler_jobs.get(audit.uuid, None) # if audit is not presented in scheduled audits yet, # just add a new audit job. # if audit is already in the job queue, and interval has changed, # we need to remove the old job and add a new one. if (existing_job is None) or ( existing_job and audit.interval != existing_job.args[0].interval): if existing_job: self.scheduler.remove_job(existing_job.id) # if interval is provided with seconds if utils.is_int_like(audit.interval): # if audit has already been provided and we need # to restore it after shutdown if audit.next_run_time is not None: old_run_time = audit.next_run_time current = datetime.datetime.utcnow() if old_run_time < current: delta = datetime.timedelta( seconds=(int(audit.interval) - ( current - old_run_time).seconds % int(audit.interval))) audit.next_run_time = current + delta next_run_time = audit.next_run_time # if audit is new one else: next_run_time = datetime.datetime.utcnow() self._add_job('interval', audit, audit_context, seconds=int(audit.interval), next_run_time=next_run_time) else: audit.next_run_time = self._next_cron_time(audit) self._add_job('date', audit, audit_context, run_date=audit.next_run_time) audit.save()
def make_context(*args, **kwargs): # If context hasn't been constructed with token_info if not kwargs.get('auth_token_info'): kwargs['auth_token_info'] = copy.deepcopy(self.token_info) if not kwargs.get('project_id'): kwargs['project_id'] = 'fake_project' if not kwargs.get('user_id'): kwargs['user_id'] = 'fake_user' context = watcher_context.RequestContext(*args, **kwargs) return watcher_context.RequestContext.from_dict(context.to_dict())
def launch_audits_periodically(self): audit_context = context.RequestContext(is_admin=True) audit_filters = { 'audit_type': objects.audit.AuditType.CONTINUOUS.value, 'state__in': (objects.audit.State.PENDING, objects.audit.State.ONGOING, objects.audit.State.SUCCEEDED) } audits = objects.Audit.list(audit_context, filters=audit_filters, eager=True) scheduler_job_args = [ job.args for job in self.scheduler.get_jobs() if job.name == 'execute_audit' ] for args in scheduler_job_args: if self._is_audit_inactive(args[0]): scheduler_job_args.remove(args) for audit in audits: # if audit is not presented in scheduled audits yet. if audit.uuid not in [arg[0].uuid for arg in scheduler_job_args]: # if interval is provided with seconds if utils.is_int_like(audit.interval): # if audit has already been provided and we need # to restore it after shutdown if audit.next_run_time is not None: old_run_time = audit.next_run_time current = datetime.datetime.utcnow() if old_run_time < current: delta = datetime.timedelta( seconds=(int(audit.interval) - (current - old_run_time).seconds % int(audit.interval))) audit.next_run_time = current + delta next_run_time = audit.next_run_time # if audit is new one else: next_run_time = datetime.datetime.utcnow() self._add_job('interval', audit, audit_context, seconds=int(audit.interval), next_run_time=next_run_time) else: audit.next_run_time = self._next_cron_time(audit) self._add_job('date', audit, audit_context, run_date=audit.next_run_time) audit.save()
def set_context(self): headers = self.request.headers creds = { 'user': headers.get('X-User') or headers.get('X-User-Id'), 'domain_id': headers.get('X-User-Domain-Id'), 'domain_name': headers.get('X-User-Domain-Name'), 'auth_token': headers.get('X-Auth-Token'), 'roles': headers.get('X-Roles', '').split(','), } is_admin = ('admin' in creds['roles'] or 'administrator' in creds['roles']) is_public_api = self.request.environ.get('is_public_api', False) self.request.context = context.RequestContext( is_admin=is_admin, is_public_api=is_public_api, **creds)
def setUp(self): super(TestCase, self).setUp() self.app = testing.load_test_app( os.path.join(os.path.dirname(__file__), 'config.py')) token_info = { 'token': { 'project': { 'id': 'fake_project' }, 'user': { 'id': 'fake_user' } } } self.context = watcher_context.RequestContext( auth_token_info=token_info, project_id='fake_project', user_id='fake_user') def make_context(*args, **kwargs): # If context hasn't been constructed with token_info if not kwargs.get('auth_token_info'): kwargs['auth_token_info'] = copy.deepcopy(token_info) if not kwargs.get('project_id'): kwargs['project_id'] = 'fake_project' if not kwargs.get('user_id'): kwargs['user_id'] = 'fake_user' context = watcher_context.RequestContext(*args, **kwargs) return watcher_context.RequestContext.from_dict(context.to_dict()) p = mock.patch.object(watcher_context, 'make_context', side_effect=make_context) self.mock_make_context = p.start() self.addCleanup(p.stop) self.useFixture(conf_fixture.ConfFixture(cfg.CONF)) self._base_test_obj_backup = copy.copy( objects_base.WatcherObject._obj_classes) self.addCleanup(self._restore_obj_registry)
def start(self): super(RPCService, self).start() admin_context = context.RequestContext('admin', 'admin', is_admin=True) target = messaging.Target(topic=self.topic, server=self.host) endpoints = [self.manager] serializer = objects_base.IronicObjectSerializer() self.rpcserver = rpc.get_server(target, endpoints, serializer) self.rpcserver.start() self.handle_signal() self.manager.init_host() self.tg.add_dynamic_timer( self.manager.periodic_tasks, periodic_interval_max=cfg.CONF.periodic_interval, context=admin_context) LOG.info(_LI('Created RPC server for service %(service)s on host ' '%(host)s.'), {'service': self.topic, 'host': self.host})
def test_with_alternate_context(self): ctxt1 = context.RequestContext('foo', 'foo') ctxt2 = context.RequestContext(user='******') obj = MyObj.query(ctxt1) obj.update_test(ctxt2) self.assertEqual('alternate-context', obj.bar)
class Service(base.APIBase): """API representation of a service. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a service. """ _status = None _context = context.RequestContext(is_admin=True) def _get_status(self): return self._status def _set_status(self, id): service = objects.Service.get(pecan.request.context, id) last_heartbeat = (service.last_seen_up or service.updated_at or service.created_at) if isinstance(last_heartbeat, six.string_types): # NOTE(russellb) If this service came in over rpc via # conductor, then the timestamp will be a string and needs to be # converted back to a datetime. last_heartbeat = timeutils.parse_strtime(last_heartbeat) else: # Objects have proper UTC timezones, but the timeutils comparison # below does not (and will fail) last_heartbeat = last_heartbeat.replace(tzinfo=None) elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) is_up = abs(elapsed) <= CONF.service_down_time if not is_up: LOG.warning( 'Seems service %(name)s on host %(host)s is down. ' 'Last heartbeat was %(lhb)s.' 'Elapsed time is %(el)s', { 'name': service.name, 'host': service.host, 'lhb': str(last_heartbeat), 'el': str(elapsed) }) self._status = objects.service.ServiceStatus.FAILED else: self._status = objects.service.ServiceStatus.ACTIVE id = wsme.wsattr(int, readonly=True) """ID for this service.""" name = wtypes.text """Name of the service.""" host = wtypes.text """Host where service is placed on.""" last_seen_up = wsme.wsattr(datetime.datetime, readonly=True) """Time when Watcher service sent latest heartbeat.""" status = wsme.wsproperty(wtypes.text, _get_status, _set_status, mandatory=True) links = wsme.wsattr([link.Link], readonly=True) """A list containing a self link.""" def __init__(self, **kwargs): super(Service, self).__init__() fields = list(objects.Service.fields) + ['status'] self.fields = [] for field in fields: self.fields.append(field) setattr( self, field, kwargs.get(field if field != 'status' else 'id', wtypes.Unset)) @staticmethod def _convert_with_links(service, url, expand=True): if not expand: service.unset_fields_except(['id', 'name', 'host', 'status']) service.links = [ link.Link.make_link('self', url, 'services', str(service.id)), link.Link.make_link('bookmark', url, 'services', str(service.id), bookmark=True) ] return service @classmethod def convert_with_links(cls, service, expand=True): service = Service(**service.as_dict()) return cls._convert_with_links(service, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(id=1, name='watcher-applier', host='Controller', last_seen_up=datetime.datetime(2016, 1, 1)) return cls._convert_with_links(sample, 'http://localhost:9322', expand)
def __init__(self): super(ContinuousAuditHandler, self).__init__() self._scheduler = None self.context_show_deleted = context.RequestContext(is_admin=True, show_deleted=True)
def launch_audits_periodically(self): # if audit scheduler stop, restart it if not self.scheduler.running: self.scheduler.start() audit_context = context.RequestContext(is_admin=True) audit_filters = { 'audit_type': objects.audit.AuditType.CONTINUOUS.value, 'state__in': (objects.audit.State.PENDING, objects.audit.State.ONGOING), } audit_filters['hostname'] = None unscheduled_audits = objects.Audit.list(audit_context, filters=audit_filters, eager=True) for audit in unscheduled_audits: # If continuous audit doesn't have a hostname yet, # Watcher will set current CONF.host value. # TODO(alexchadin): Add scheduling of new continuous audits. audit.hostname = CONF.host audit.save() scheduler_job_args = [(job.args[0].uuid, job) for job in self.scheduler.get_jobs() if job.name == 'execute_audit'] scheduler_jobs = dict(scheduler_job_args) # if audit isn't in active states, audit's job should be removed jobs_to_remove = [] for job in scheduler_jobs.values(): if self._is_audit_inactive(job.args[0]): jobs_to_remove.append(job.args[0].uuid) for audit_uuid in jobs_to_remove: scheduler_jobs.pop(audit_uuid) audit_filters['hostname'] = CONF.host audits = objects.Audit.list(audit_context, filters=audit_filters, eager=True) for audit in audits: if self.check_audit_expired(audit): continue existing_job = scheduler_jobs.get(audit.uuid, None) # if audit is not presented in scheduled audits yet, # just add a new audit job. # if audit is already in the job queue, and interval has changed, # we need to remove the old job and add a new one. if (existing_job is None) or (existing_job and audit.interval != existing_job.args[0].interval): if existing_job: self.scheduler.remove_job(existing_job.id) # if interval is provided with seconds if utils.is_int_like(audit.interval): # if audit has already been provided and we need # to restore it after shutdown if audit.next_run_time is not None: old_run_time = audit.next_run_time current = datetime.datetime.utcnow() if old_run_time < current: delta = datetime.timedelta( seconds=(int(audit.interval) - (current - old_run_time).seconds % int(audit.interval))) audit.next_run_time = current + delta next_run_time = audit.next_run_time # if audit is new one else: next_run_time = datetime.datetime.utcnow() self._add_job('interval', audit, audit_context, seconds=int(audit.interval), next_run_time=next_run_time) else: audit.next_run_time = self._next_cron_time(audit) self._add_job('date', audit, audit_context, run_date=audit.next_run_time) audit.hostname = CONF.host audit.save()