def detach(self, cluster): """Routine to be called when the policy is detached from a cluster. :param cluster: The cluster from which the policy is to be detached. :returns: When the operation was successful, returns a tuple of (True, data) where the data contains references to the resources created; otherwise returns a tuple of (False, error) where the err contains an error message. """ reason = _('Servergroup resource deletion succeeded.') ctx = context.get_admin_context() binding = cpo.ClusterPolicy.get(ctx, cluster.id, self.id) if not binding or not binding.data: return True, reason policy_data = self._extract_policy_data(binding.data) if not policy_data: return True, reason group_id = policy_data.get('servergroup_id', None) inherited_group = policy_data.get('inherited_group', False) if group_id and not inherited_group: try: nc = self.nova(cluster.user, cluster.project) nc.server_group_delete(group_id) except Exception as ex: msg = _('Failed in deleting servergroup.') LOG.exception('%(msg)s: %(ex)s', {'msg': msg, 'ex': ex}) return False, msg return True, reason
def do_create(self, obj): """Create a container instance using the given profile. :param obj: The node object for this container. :returns: ID of the container instance or ``None`` if driver fails. :raises: `EResourceCreation` """ name = self.properties[self.NAME] if name is None: name = '-'.join([obj.name, utils.random_name()]) params = { 'image': self.properties[self.IMAGE], 'name': self.properties[self.NAME], 'command': self.properties[self.COMMAND], } try: ctx = context.get_admin_context() dockerclient = self.docker(obj) db_api.node_add_dependents(ctx, self.host.id, obj.id) container = dockerclient.container_create(**params) except exc.InternalError as ex: raise exc.EResourceCreation(type='container', message=six.text_type(ex)) self.container_id = container['Id'][:36] return self.container_id
def notify(engine_id, method, **kwargs): """Send notification to health manager service. :param engine_id: dispatcher to notify; broadcast if value is None :param method: remote method to call """ timeout = cfg.CONF.engine_life_check_timeout client = rpc_messaging.get_rpc_client(version=consts.RPC_API_VERSION) if engine_id: # Notify specific dispatcher identified by engine_id call_context = client.prepare( version=consts.RPC_API_VERSION, timeout=timeout, topic=consts.ENGINE_HEALTH_MGR_TOPIC, server=engine_id) else: # Broadcast to all disptachers call_context = client.prepare( version=consts.RPC_API_VERSION, timeout=timeout, topic=consts.ENGINE_HEALTH_MGR_TOPIC) ctx = context.get_admin_context() try: call_context.call(ctx, method, **kwargs) return True except oslo_messaging.MessagingTimeout: return False
def notify(engine_id, method, **kwargs): """Send notification to health manager service. Note that the health manager only handles JSON type of parameter passing. :param engine_id: dispatcher to notify; broadcast if value is None :param method: remote method to call """ timeout = cfg.CONF.engine_life_check_timeout client = rpc.get_rpc_client(consts.HEALTH_MANAGER_TOPIC, None) if engine_id: # Notify specific dispatcher identified by engine_id call_context = client.prepare(timeout=timeout, server=engine_id) else: # Broadcast to all disptachers call_context = client.prepare(timeout=timeout) ctx = context.get_admin_context() try: call_context.call(ctx, method, **kwargs) return True except messaging.MessagingTimeout: return False
def docker(self, obj): """Construct docker client based on object. :param obj: Object for which the client is created. It is expected to be None when retrieving an existing client. When creating a client, it contains the user and project to be used. """ if self._dockerclient is not None: return self._dockerclient host_node = self.properties.get(self.HOST_NODE, None) host_cluster = self.properties.get(self.HOST_CLUSTER, None) ctx = context.get_admin_context() self.host = self._get_host(ctx, host_node, host_cluster) # TODO(Anyone): Check node.data for per-node host selection host_type = self.host.rt['profile'].type_name if host_type not in self._VALID_HOST_TYPES: msg = _("Type of host node (%s) is not supported") % host_type raise exc.InternalError(message=msg) host_ip = self._get_host_ip(obj, self.host.physical_id, host_type) if host_ip is None: msg = _("Unable to determine the IP address of host node") raise exc.InternalError(message=msg) url = 'tcp://%(ip)s:%(port)d' % { 'ip': host_ip, 'port': self.properties[self.PORT] } self._dockerclient = docker_driver.DockerClient(url) return self._dockerclient
def to_dict(self): context = senlin_context.get_admin_context() profile = db_api.profile_get(context, self.profile_id, project_safe=False) return { 'id': self.id, 'name': self.name, 'profile_id': self.profile_id, 'user': self.user, 'project': self.project, 'domain': self.domain, 'init_at': utils.isotime(self.init_at), 'created_at': utils.isotime(self.created_at), 'updated_at': utils.isotime(self.updated_at), 'min_size': self.min_size, 'max_size': self.max_size, 'desired_capacity': self.desired_capacity, 'timeout': self.timeout, 'status': self.status, 'status_reason': self.status_reason, 'metadata': self.metadata or {}, 'data': self.data or {}, 'dependents': self.dependents or {}, 'profile_name': profile.name, 'nodes': db_api.node_ids_by_cluster(context, self.id), 'policies': db_api.cluster_policy_ids_by_cluster(context, self.id) }
def detach(self, cluster): """Routine to be called when the policy is detached from a cluster. :param cluster: The cluster from which the policy is to be detached. :returns: When the operation was successful, returns a tuple of (True, data) where the data contains references to the resources created; otherwise returns a tuple of (False, error) where the err contains a error message. """ reason = _('Servergroup resource deletion succeeded.') ctx = context.get_admin_context() binding = cpo.ClusterPolicy.get(ctx, cluster.id, self.id) if not binding or not binding.data: return True, reason policy_data = self._extract_policy_data(binding.data) if not policy_data: return True, reason group_id = policy_data.get('servergroup_id', None) inherited_group = policy_data.get('inherited_group', False) if group_id and not inherited_group: try: self.nova(cluster).delete_server_group(group_id) except Exception as ex: msg = _('Failed in deleting servergroup.') LOG.exception(_LE('%(msg)s: %(ex)s') % { 'msg': msg, 'ex': six.text_type(ex)}) return False, msg return True, reason
def get_manager_engine(cluster_id): ctx = context.get_admin_context() registry = objects.HealthRegistry.get(ctx, cluster_id) if not registry: return None return registry.engine_id
def __init__(self, engine_service, topic, version): super(HealthManager, self).__init__() self.TG = threadgroup.ThreadGroup() self.engine_id = engine_service.engine_id self.topic = topic self.version = version self.ctx = context.get_admin_context() self.rpc_client = rpc_client.EngineClient() self.rt = { 'registries': [], }
def __init__(self, engine_service, topic, version): super(HealthManager, self).__init__() self.TG = threadgroup.ThreadGroup( thread_pool_size=cfg.CONF.health_manager_thread_pool_size) self.engine_id = engine_service.engine_id self.topic = topic self.version = version self.ctx = context.get_admin_context() self.rpc_client = rpc_client.EngineClient() self.health_registry = RuntimeHealthRegistry(ctx=self.ctx, engine_id=self.engine_id, thread_group=self.TG)
def __init__(self, host, topic): super(HealthManagerService, self).__init__(self.service_name, host, topic, threads=CONF.health_manager.threads) self.version = consts.RPC_API_VERSION self.ctx = context.get_admin_context() # The following are initialized here and will be assigned in start() # which happens after the fork when spawning multiple worker processes self.health_registry = None self.server = None self.service_id = None self.target = None
def _update_zone_info(self, obj, server): """Update the actual zone placement data. :param obj: The node object associated with this server. :param server: The server object returned from creation. """ if server.availability_zone: placement = obj.data.get('placement', None) if not placement: obj.data['placement'] = {'zone': server.availability_zone} else: obj.data['placement'].setdefault('zone', server.availability_zone) # It is safe to use admin context here ctx = context.get_admin_context() node_obj.Node.update(ctx, obj.id, {'data': obj.data})
def setup(binary, host): if cfg.CONF.profiler.enabled: _notifier = osprofiler.notifier.create( "Messaging", oslo_messaging, context.get_admin_context().to_dict(), messaging.TRANSPORT, "senlin", binary, host) osprofiler.notifier.set(_notifier) osprofiler.web.enable(cfg.CONF.profiler.hmac_keys) LOG.warning(_LW("OSProfiler is enabled.\nIt means that any person who " "knows any of hmac_keys that are specified in " "/etc/senlin/senlin.conf can trace his requests. \n" "In real life only an operator can read this file so " "there is no security issue. Note that even if any " "person can trigger the profiler, only an admin user " "can retrieve trace.\n" "To disable OSProfiler set in senlin.conf:\n" "[profiler]\nenabled=false")) else: osprofiler.web.disable()
def do_delete(self, obj): """Delete a container node. :param obj: The node object representing the container. :returns: `None` """ if not obj.physical_id: return try: self.docker(obj).container_delete(obj.physical_id) except exc.InternalError as ex: raise exc.EResourceDeletion(type='container', id=obj.physical_id, message=six.text_type(ex)) ctx = context.get_admin_context() db_api.node_remove_dependents(ctx, self.host.id, obj.id) return
def start(self): super(HealthManagerService, self).start() self.service_id = uuidutils.generate_uuid() self.health_registry = health_manager.RuntimeHealthRegistry( ctx=self.ctx, engine_id=self.service_id, thread_group=self.tg) # create service record ctx = senlin_context.get_admin_context() service_obj.Service.create(ctx, self.service_id, self.host, self.service_name, self.topic) self.tg.add_timer(CONF.periodic_interval, self.service_manage_report) self.target = messaging.Target(server=self.service_id, topic=self.topic, version=self.version) self.server = rpc.get_rpc_server(self.target, self) self.server.start() self.tg.add_dynamic_timer(self.task, None, cfg.CONF.periodic_interval)
def start(self): """Start the engine. Note that the engine is an internal server, we are not using versioned object for parameter passing. """ super(EngineService, self).start() self.service_id = uuidutils.generate_uuid() self.target = oslo_messaging.Target(server=self.service_id, topic=self.topic, version=self.version) self.server = messaging.get_rpc_server(self.target, self) self.server.start() # create service record ctx = senlin_context.get_admin_context() service_obj.Service.create(ctx, self.service_id, self.host, self.service_name, self.topic) self.tg.add_timer(CONF.periodic_interval, self.service_manage_report)
def __init__(self): self.ctx = context.get_admin_context()
def test_get_admin_context(self): ctx1 = context.get_admin_context() self.assertTrue(ctx1.is_admin)
def service_manage_report(self): try: ctx = senlin_context.get_admin_context() service_obj.Service.update(ctx, self.service_id) except Exception as ex: LOG.error('Error while updating dispatcher service: %s', ex)