def start(self): verstr = version.version_string_with_package() LOG.info(_LI('Starting %(topic)s node (version %(version)s)'), {'topic': self.topic, 'version': verstr}) self.basic_config_check() self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() self.service_ref = objects.Service.get_by_host_and_binary( ctxt, self.host, self.binary) if self.service_ref: _update_service_ref(self.service_ref) else: try: self.service_ref = _create_service_ref(self, ctxt) except (exception.ServiceTopicExists, exception.ServiceBinaryExists): # NOTE(danms): If we race to create a record with a sibling # worker, don't fail here. self.service_ref = objects.Service.get_by_host_and_binary( ctxt, self.host, self.binary) self.manager.pre_start_hook() if self.backdoor_port is not None: self.manager.backdoor_port = self.backdoor_port LOG.debug("Creating RPC server for service %s", self.topic) target = messaging.Target(topic=self.topic, server=self.host) endpoints = [ self.manager, baserpc.BaseRPCAPI(self.manager.service_name, self.backdoor_port) ] endpoints.extend(self.manager.additional_endpoints) serializer = objects_base.NovaObjectSerializer() self.rpcserver = rpc.get_server(target, endpoints, serializer) self.rpcserver.start() self.manager.post_start_hook() LOG.debug("Join ServiceGroup membership for this service %s", self.topic) # Add service to the ServiceGroup membership group. self.servicegroup_api.join(self.host, self.topic, self) if self.periodic_enable: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None self.tg.add_dynamic_timer(self.periodic_tasks, initial_delay=initial_delay, periodic_interval_max= self.periodic_interval_max)
def start(self): verstr = version.version_string_with_package() LOG.audit(_('Starting %(topic)s node (version %(version)s)'), {'topic': self.topic, 'version': verstr}) self.basic_config_check() self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() try: self.service_ref = self.conductor_api.service_get_by_args(ctxt, self.host, self.binary) self.service_id = self.service_ref['id'] except exception.NotFound: try: self.service_ref = self._create_service_ref(ctxt) except (exception.ServiceTopicExists, exception.ServiceBinaryExists): # NOTE(danms): If we race to create a record with a sibling # worker, don't fail here. self.service_ref = self.conductor_api.service_get_by_args(ctxt, self.host, self.binary) self.manager.pre_start_hook() if self.backdoor_port is not None: self.manager.backdoor_port = self.backdoor_port LOG.debug("Creating RPC server for service %s", self.topic) #target用来指定该rpc server监听在哪些队列上 target = messaging.Target(topic=self.topic, server=self.host) endpoints = [ self.manager, baserpc.BaseRPCAPI(self.manager.service_name, self.backdoor_port) ] endpoints.extend(self.manager.additional_endpoints) serializer = objects_base.NovaObjectSerializer() #建立RPC调用服务 self.rpcserver = rpc.get_server(target, endpoints, serializer) self.rpcserver.start() # Manager对象其实就是RPC API的入口。每个RPC API最终会转化为对Manger相应方法的调用,这个方法就是该RPC API的最终实现。 self.manager.post_start_hook() LOG.debug("Join ServiceGroup membership for this service %s", self.topic) # Add service to the ServiceGroup membership group. self.servicegroup_api.join(self.host, self.topic, self) if self.periodic_enable: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None self.tg.add_dynamic_timer(self.periodic_tasks, initial_delay=initial_delay, periodic_interval_max= self.periodic_interval_max)
def create_rpc_dispatcher(self, backdoor_port=None): '''Get the rpc dispatcher for this manager. If a manager would like to set an rpc API version, or support more than one class as the target of rpc messages, override this method. ''' base_rpc = baserpc.BaseRPCAPI(self.service_name, backdoor_port) return rpc_dispatcher.RpcDispatcher([self, base_rpc])
def create_rpc_dispatcher(self, backdoor_port=None, additional_apis=None): '''Get the rpc dispatcher for this manager. If a manager would like to set an rpc API version, or support more than one class as the target of rpc messages, override this method. ''' apis = [] if additional_apis: apis.extend(additional_apis) base_rpc = baserpc.BaseRPCAPI(self.service_name, backdoor_port) apis.extend([self, base_rpc]) serializer = objects_base.NovaObjectSerializer() return rpc_dispatcher.RpcDispatcher(apis, serializer)
def add_host_rpc(self, host): """Add host rpc server for spec host.""" LOG.info("Creating RPC server for host '%s'", host) if self.rpcservers.has_key(host): LOG.warn("RPC server '%s' already exits." % host) return target = messaging.Target(topic=self.topic, server=host) endpoints = [ self.manager, baserpc.BaseRPCAPI(self.manager.service_name, self.backdoor_port) ] endpoints.extend(self.manager.additional_endpoints) serializer = objects_base.NovaObjectSerializer() rpcserver = rpc.get_server(target, endpoints, serializer) rpcserver.start() self.rpcservers[host] = rpcserver LOG.info("Creating RPC server for host '%s' end.", host)
def start(self): """Start the service. This includes starting an RPC service, initializing periodic tasks, etc. """ # NOTE(melwitt): Clear the cell cache holding database transaction # context manager objects. We do this to ensure we create new internal # oslo.db locks to avoid a situation where a child process receives an # already locked oslo.db lock when it is forked. When a child process # inherits a locked oslo.db lock, database accesses through that # transaction context manager will never be able to acquire the lock # and requests will fail with CellTimeout errors. # See https://bugs.python.org/issue6721 for more information. # With python 3.7, it would be possible for oslo.db to make use of the # os.register_at_fork() method to reinitialize its lock. Until we # require python 3.7 as a mininum version, we must handle the situation # outside of oslo.db. context.CELL_CACHE = {} verstr = version.version_string_with_package() LOG.info('Starting %(topic)s node (version %(version)s)', {'topic': self.topic, 'version': verstr}) self.basic_config_check() self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() self.service_ref = objects.Service.get_by_host_and_binary( ctxt, self.host, self.binary) if self.service_ref: _update_service_ref(self.service_ref) else: try: self.service_ref = _create_service_ref(self, ctxt) except (exception.ServiceTopicExists, exception.ServiceBinaryExists): # NOTE(danms): If we race to create a record with a sibling # worker, don't fail here. self.service_ref = objects.Service.get_by_host_and_binary( ctxt, self.host, self.binary) self.manager.pre_start_hook() if self.backdoor_port is not None: self.manager.backdoor_port = self.backdoor_port LOG.debug("Creating RPC server for service %s", self.topic) target = messaging.Target(topic=self.topic, server=self.host) endpoints = [ self.manager, baserpc.BaseRPCAPI(self.manager.service_name, self.backdoor_port) ] endpoints.extend(self.manager.additional_endpoints) serializer = objects_base.NovaObjectSerializer() self.rpcserver = rpc.get_server(target, endpoints, serializer) self.rpcserver.start() self.manager.post_start_hook() LOG.debug("Join ServiceGroup membership for this service %s", self.topic) # Add service to the ServiceGroup membership group. self.servicegroup_api.join(self.host, self.topic, self) if self.periodic_enable: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None self.tg.add_dynamic_timer(self.periodic_tasks, initial_delay=initial_delay, periodic_interval_max= self.periodic_interval_max)
def _start_compute_worker(self): # All the subprocess should do this re-init job to enable rpc clients rpc.init(CONF) objects_base.NovaObject.indirection_api =\ conductor_rpcapi.ConductorAPI() self.__init__(self.host, self.binary, self.topic, self.manager_class_name, self.report_interval, self.periodic_enable, self.periodic_fuzzy_delay, self.periodic_interval_max, db_allowed=False) driver_class = self.servicegroup_api._driver_name_class_mapping['db'] self.servicegroup_api._driver = importutils.import_object( driver_class, db_allowed=False) # Some subprocess only start RPC servers to answer the requests if self._is_main_worker() is False: LOG.debug("Creating RPC server for service %s", self.topic) if self.backdoor_port is not None: self.manager.backdoor_port = self.backdoor_port target = messaging.Target(topic=self.topic, server=self.host) endpoints = [ self.manager, baserpc.BaseRPCAPI(self.manager.service_name, self.backdoor_port) ] endpoints.extend(self.manager.additional_endpoints) serializer = objects_base.NovaObjectSerializer() self.rpcserver = rpc.get_server(target, endpoints, serializer) self.rpcserver.start() return # Main subprocess do the initial and period jobs verstr = version.version_string_with_package() LOG.audit(_('Starting %(topic)s node (version %(version)s)'), { 'topic': self.topic, 'version': verstr }) self.basic_config_check() self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() try: self.service_ref = self.conductor_api.service_get_by_args( ctxt, self.host, self.binary) self.service_id = self.service_ref['id'] except exception.NotFound: try: self.service_ref = self._create_service_ref(ctxt) except (exception.ServiceTopicExists, exception.ServiceBinaryExists): # NOTE(danms): If we race to create a record with a sibling # worker, don't fail here. self.service_ref = self.conductor_api.service_get_by_args( ctxt, self.host, self.binary) self.manager.pre_start_hook() self.manager.post_start_hook() # Add service to the ServiceGroup membership group. LOG.debug("Join ServiceGroup membership for this service %s", self.topic) self.servicegroup_api.join(self.host, self.topic, self) if self.periodic_enable: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None self.tg.add_dynamic_timer( self.periodic_tasks, initial_delay=initial_delay, periodic_interval_max=self.periodic_interval_max)
def start(self): verstr = version.version_string_with_package() LOG.info(_LI('Starting %(topic)s node (version %(version)s)'), {'topic': self.topic, 'version': verstr}) self.basic_config_check() self.manager.init_host() self.model_disconnected = False ctxt = context.get_admin_context() self.service_ref = objects.Service.get_by_host_and_binary( ctxt, self.host, self.binary) if self.service_ref: _update_service_ref(self.service_ref) else: try: self.service_ref = _create_service_ref(self, ctxt) except (exception.ServiceTopicExists, exception.ServiceBinaryExists): # NOTE(danms): If we race to create a record with a sibling # worker, don't fail here. self.service_ref = objects.Service.get_by_host_and_binary( ctxt, self.host, self.binary) self.manager.pre_start_hook() if self.backdoor_port is not None: self.manager.backdoor_port = self.backdoor_port LOG.debug("Creating RPC server for service %s", self.topic) target = messaging.Target(topic=self.topic, server=self.host) endpoints = [ self.manager, baserpc.BaseRPCAPI(self.manager.service_name, self.backdoor_port) ] endpoints.extend(self.manager.additional_endpoints) serializer = objects_base.NovaObjectSerializer() self.rpcserver = rpc.get_server(target, endpoints, serializer) self.rpcserver.start() self.manager.post_start_hook() LOG.debug("Join ServiceGroup membership for this service %s", self.topic) # Add service to the ServiceGroup membership group. self.servicegroup_api.join(self.host, self.topic, self) # WRS: Write volatile flag file indicating service has started. if CONF.service_enabled_flag: volatile_dir = '/var/run/nova' if os.path.isdir(volatile_dir): flag = "{}/.nova_{}_enabled".format(volatile_dir, self.topic) try: open(flag, 'w').close() LOG.info('service %(topic)s ready', {'topic': self.topic}) except Exception as e: LOG.error( 'Cannot create file: %(file)s, error=%(error)s', {'file': flag, 'error': e}) if self.periodic_enable: if self.periodic_fuzzy_delay: initial_delay = random.randint(0, self.periodic_fuzzy_delay) else: initial_delay = None # WRS - Do not delay start of non-compute services. This improves # responsiveness of no-reboot-patching of controllers by making # the nova-scheduler weigher patch audit run immediately. if 'compute' not in self.binary: initial_delay = None self.tg.add_dynamic_timer(self.periodic_tasks, initial_delay=initial_delay, periodic_interval_max= self.periodic_interval_max)