def __init__(self, **kwargs): ProcessRPCServer.__init__(self, **kwargs) self._conv_type = RPCConversationType() if hasattr(self._process, 'name'): self._participant = Participant(self._process.name) else: self._participant = Participant(self._recv_name)
def __init__(self, **kwargs): ProcessRPCServer.__init__(self, **kwargs) self._conv_type = RPCConversationType() if hasattr(self._process, "name"): self._participant = Participant(self._process.name) else: self._participant = Participant(self._recv_name)
def test_create_endpoint(self, mockce): prps = ProcessRPCServer(process=sentinel.process) prps.routing_call = sentinel.rcall prps.create_endpoint(to_name=sentinel.to_name) mockce.assert_called_once_with(prps, process=sentinel.process, to_name=sentinel.to_name, routing_call=sentinel.rcall)
def _spawn_service_process(self, process_id, name, module, cls, config): """ Spawn a process acting as a service worker. Attach to service queue with service definition, attach to service pid """ service_instance = self._create_service_instance( process_id, name, module, cls, config) listen_name = get_safe(config, "process.listen_name") or service_instance.name log.debug("Service Process (%s) listen_name: %s", name, listen_name) service_instance._proc_listen_name = listen_name # Service RPC endpoint rsvc1 = ProcessRPCServer(node=self.container.node, from_name=listen_name, service=service_instance, process=service_instance) # Named local RPC endpoint rsvc2 = ProcessRPCServer(node=self.container.node, from_name=service_instance.id, service=service_instance, process=service_instance) # cleanup method to delete process queue cleanup = lambda _: self._cleanup_method(service_instance.id, rsvc2) # Start an ION process with the right kind of endpoint factory proc = self.proc_sup.spawn(name=service_instance.id, service=service_instance, listeners=[rsvc1, rsvc2], proc_name=service_instance._proc_name, cleanup_method=cleanup) self.proc_sup.ensure_ready( proc, "_spawn_service_process for %s" % ",".join( (listen_name, service_instance.id))) # map gproc to service_instance self._spawned_proc_to_process[proc.proc] = service_instance # set service's reference to process service_instance._process = proc self._service_init(service_instance) self._service_start(service_instance) proc.start_listeners() # look to load any existing policies for this service if self._is_policy_management_service_available( ) and self.container.governance_controller: self.container.governance_controller.update_service_access_policy( service_instance._proc_listen_name) return service_instance
def _set_service_endpoint(self, service_instance, listen_name): service_instance.errcause = "setting process service endpoint" # Service RPC endpoint rsvc = ProcessRPCServer(node=self.container.node, name=listen_name, service=service_instance, process=service_instance) # Start an ION process with the right kind of endpoint factory self.proc_sup.spawn((CFG.cc.proctype or 'green', None), listener=rsvc, name=listen_name) if not rsvc.get_ready_event().wait(timeout=10): raise exception.ContainerError('_set_service_endpoint for listen_name: %s did not report ok' % listen_name) log.debug("Process %s service listener ready: %s", service_instance.id, listen_name)
def _spawn_service_process(self, process_id, name, module, cls, config): """ Spawn a process acting as a service worker. Attach to service queue with service definition, attach to service pid """ service_instance = self._create_service_instance( process_id, name, module, cls, config) self._service_init(service_instance) self._service_start(service_instance) listen_name = get_safe(config, "process.listen_name") or service_instance.name log.debug("Service Process (%s) listen_name: %s", name, listen_name) # Service RPC endpoint rsvc1 = ProcessRPCServer(node=self.container.node, from_name=listen_name, service=service_instance, process=service_instance) # Named local RPC endpoint rsvc2 = ProcessRPCServer(node=self.container.node, from_name=service_instance.id, service=service_instance, process=service_instance) # Start an ION process with the right kind of endpoint factory proc = self.proc_sup.spawn(name=service_instance.id, service=service_instance, listeners=[rsvc1, rsvc2], proc_name=service_instance._proc_name) self.proc_sup.ensure_ready( proc, "_spawn_service_process for %s" % ",".join( (listen_name, service_instance.id))) # map gproc to service_instance self._spawned_proc_to_process[proc.proc] = service_instance # set service's reference to process service_instance._process = proc # Directory registration self.container.directory.register_safe("/Services", listen_name, interface=service_instance.name) self.container.directory.register_safe("/Services/%s" % listen_name, service_instance.id) return service_instance
def _spawn_standalone_process(self, process_id, name, module, cls, config): """ Spawn a process acting as standalone process. Attach to service pid. """ service_instance = self._create_service_instance( process_id, name, module, cls, config) self._service_init(service_instance) self._service_start(service_instance) rsvc = ProcessRPCServer(node=self.container.node, from_name=service_instance.id, service=service_instance, process=service_instance) proc = self.proc_sup.spawn(name=service_instance.id, service=service_instance, listeners=[rsvc], proc_name=service_instance._proc_name) self.proc_sup.ensure_ready( proc, "_spawn_standalone_process for %s" % service_instance.id) # map gproc to service_instance self._spawned_proc_to_process[proc.proc] = service_instance # set service's reference to process service_instance._process = proc # Add publishers if any... publish_streams = get_safe(config, "process.publish_streams") self._set_publisher_endpoints(service_instance, publish_streams) return service_instance
def _spawn_agent_process(self, process_id, name, module, cls, config): """ Spawn a process acting as agent process. Attach to service pid. """ service_instance = self._create_service_instance( process_id, name, module, cls, config) if not isinstance(service_instance, ResourceAgent): raise ContainerConfigError( "Agent process must extend ResourceAgent") # Set the resource ID if we get it through the config resource_id = get_safe(service_instance.CFG, "agent.resource_id") if resource_id: service_instance.resource_id = resource_id rsvc = ProcessRPCServer(node=self.container.node, from_name=service_instance.id, service=service_instance, process=service_instance) # cleanup method to delete process queue (@TODO: leaks a bit here - should use XOs) cleanup = lambda _: self._cleanup_method(service_instance.id, rsvc) proc = self.proc_sup.spawn(name=service_instance.id, service=service_instance, listeners=[rsvc], proc_name=service_instance._proc_name, cleanup_method=cleanup) self.proc_sup.ensure_ready( proc, "_spawn_agent_process for %s" % service_instance.id) # map gproc to service_instance self._spawned_proc_to_process[proc.proc] = service_instance # set service's reference to process service_instance._process = proc # Now call the on_init of the agent. self._service_init(service_instance) if not service_instance.resource_id: log.warn("New agent pid=%s has no resource_id set" % process_id) self._service_start(service_instance) proc.start_listeners() if service_instance.resource_id: # look to load any existing policies for this resource if self._is_policy_management_service_available( ) and self.container.governance_controller: self.container.governance_controller.update_resource_access_policy( service_instance.resource_id) else: log.warn("Agent process id=%s does not define resource_id!!" % service_instance.id) return service_instance
def _spawn_agent_process(self, process_id, name, module, cls, config): """ Spawn a process acting as agent process. Attach to service pid. """ service_instance = self._create_service_instance( process_id, name, module, cls, config) if not isinstance(service_instance, ResourceAgent): raise ContainerConfigError( "Agent process must extend ResourceAgent") # Set the resource ID if we get it through the config resource_id = get_safe(service_instance.CFG, "agent.resource_id") if resource_id: service_instance.resource_id = resource_id # Now call the on_init of the agent. self._service_init(service_instance) if not service_instance.resource_id: log.warn("New agent pid=%s has no resource_id set" % process_id) self._service_start(service_instance) rsvc = ProcessRPCServer(node=self.container.node, from_name=service_instance.id, service=service_instance, process=service_instance) proc = self.proc_sup.spawn(name=service_instance.id, service=service_instance, listeners=[rsvc], proc_name=service_instance._proc_name) self.proc_sup.ensure_ready( proc, "_spawn_agent_process for %s" % service_instance.id) # map gproc to service_instance self._spawned_proc_to_process[proc.proc] = service_instance # set service's reference to process service_instance._process = proc # Directory registration caps = service_instance.get_capabilities() self.container.directory.register( "/Agents", service_instance.id, **dict(name=service_instance._proc_name, container=service_instance.container.id, resource_id=service_instance.resource_id, agent_id=service_instance.agent_id, def_id=service_instance.agent_def_id, capabilities=caps)) if not service_instance.resource_id: log.warn("Agent process id=%s does not define resource_id!!" % service_instance.id) return service_instance
def _spawn_stream_process(self, process_id, name, module, cls, config): """ Spawn a process acting as a data stream process. Attach to subscription queue with process function. """ service_instance = self._create_service_instance( process_id, name, module, cls, config) listen_name = get_safe(config, "process.listen_name") or name service_instance._proc_listen_name = listen_name service_instance.stream_subscriber_registrar = StreamSubscriberRegistrar( process=service_instance, container=self.container) sub = service_instance.stream_subscriber_registrar.create_subscriber( exchange_name=listen_name) # Add publishers if any... publish_streams = get_safe(config, "process.publish_streams") self._set_publisher_endpoints(service_instance, publish_streams) rsvc = ProcessRPCServer(node=self.container.node, from_name=service_instance.id, service=service_instance, process=service_instance) # cleanup method to delete process queue (@TODO: leaks a bit here - should use XOs) cleanup = lambda _: self._cleanup_method(service_instance.id, rsvc) proc = self.proc_sup.spawn(name=service_instance.id, service=service_instance, listeners=[rsvc, sub], proc_name=service_instance._proc_name, cleanup_method=cleanup) self.proc_sup.ensure_ready( proc, "_spawn_stream_process for %s" % service_instance._proc_name) # map gproc to service_instance self._spawned_proc_to_process[proc.proc] = service_instance # set service's reference to process service_instance._process = proc self._service_init(service_instance) self._service_start(service_instance) proc.start_listeners() return service_instance
def _set_service_endpoint(self, service_instance, listen_name): service_instance.errcause = "setting process service endpoint" # Service RPC endpoint rsvc = ProcessRPCServer(node=self.container.node, from_name=listen_name, service=service_instance, process=service_instance) # Start an ION process with the right kind of endpoint factory proc = self.proc_sup.spawn((CFG.cc.proctype or 'green', None), listener=rsvc, name=listen_name, proc_name=service_instance._proc_name) self.proc_sup.ensure_ready( proc, "_set_service_endpoint for listen_name: %s" % listen_name) # map gproc to service_instance self._spawned_proc_to_process[proc.proc] = service_instance log.debug("Process %s service listener ready: %s", service_instance.id, listen_name)
def _create_listening_endpoint(self, **kwargs): """ Creates a listening endpoint for spawning processes. This method exists to be able to override the type created via configuration. In most cases it will create a ConversationRPCServer. """ eptypestr = CFG.get_safe( 'container.messaging.endpoint.proc_listening_type', None) if eptypestr is not None: module, cls = eptypestr.rsplit('.', 1) mod = __import__(module, fromlist=[cls]) eptype = getattr(mod, cls) ep = eptype(**kwargs) else: conv_enabled = CFG.get_safe( 'container.messaging.endpoint.rpc_conversation_enabled', False) if conv_enabled: ep = ConversationRPCServer(**kwargs) else: ep = ProcessRPCServer(**kwargs) return ep
def _spawn_standalone_process(self, process_id, name, module, cls, config): """ Spawn a process acting as standalone process. Attach to service pid. """ service_instance = self._create_service_instance( process_id, name, module, cls, config) rsvc = ProcessRPCServer(node=self.container.node, from_name=service_instance.id, service=service_instance, process=service_instance) # cleanup method to delete process queue (@TODO: leaks a bit here - should use XOs) cleanup = lambda _: self._cleanup_method(service_instance.id, rsvc) proc = self.proc_sup.spawn(name=service_instance.id, service=service_instance, listeners=[rsvc], proc_name=service_instance._proc_name, cleanup_method=cleanup) self.proc_sup.ensure_ready( proc, "_spawn_standalone_process for %s" % service_instance.id) # map gproc to service_instance self._spawned_proc_to_process[proc.proc] = service_instance # set service's reference to process service_instance._process = proc self._service_init(service_instance) self._service_start(service_instance) # Add publishers if any... publish_streams = get_safe(config, "process.publish_streams") self._set_publisher_endpoints(service_instance, publish_streams) proc.start_listeners() return service_instance
def start(self): log.debug("Container starting...") if self._is_started: raise ContainerError("Container already started") # Check if this UNIX process already runs a Container. self.pidfile = "cc-pid-%d" % os.getpid() if os.path.exists(self.pidfile): raise ContainerError("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile) # write out a PID file containing our agent messaging name with open(self.pidfile, 'w') as f: pid_contents = {'messaging': dict(CFG.server.amqp), 'container-agent': self.name, 'container-xp': bootstrap.get_sys_name() } f.write(msgpack.dumps(pid_contents)) atexit.register(self._cleanup_pid) self._capabilities.append("PID_FILE") # set up abnormal termination handler for this container def handl(signum, frame): try: self._cleanup_pid() # cleanup the pidfile first self.quit() # now try to quit - will not error on second cleanup pidfile call finally: signal.signal(signal.SIGTERM, self._normal_signal) os.kill(os.getpid(), signal.SIGTERM) self._normal_signal = signal.signal(signal.SIGTERM, handl) self.datastore_manager.start() self._capabilities.append("DATASTORE_MANAGER") # Self-register with Directory self.directory.register("/Containers", self.id, cc_agent=self.name) self.directory.register("/Containers/%s" % self.id, "Processes") self._capabilities.append("DIRECTORY") # Event repository self.event_repository = EventRepository() self.event_pub = EventPublisher() self._capabilities.append("EVENT_REPOSITORY") # Local resource registry self.resource_registry = ResourceRegistry() self._capabilities.append("RESOURCE_REGISTRY") # Persistent objects self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS) # State repository self.state_repository = StateRepository() self._capabilities.append("STATE_REPOSITORY") # Start ExchangeManager, which starts the node (broker connection) self.ex_manager.start() self._capabilities.append("EXCHANGE_MANAGER") self.proc_manager.start() self._capabilities.append("PROC_MANAGER") self.app_manager.start() self._capabilities.append("APP_MANAGER") self.governance_controller.start() self._capabilities.append("GOVERNANCE_CONTROLLER") if CFG.container.get('sflow', {}).get('enabled', False): self.sflow_manager.start() self._capabilities.append("SFLOW_MANAGER") # Start the CC-Agent API rsvc = ProcessRPCServer(node=self.node, from_name=self.name, service=self, process=self) # Start an ION process with the right kind of endpoint factory proc = self.proc_manager.proc_sup.spawn(name=self.name, listeners=[rsvc], service=self) self.proc_manager.proc_sup.ensure_ready(proc) self._capabilities.append("CONTAINER_AGENT") self.event_pub.publish_event(event_type="ContainerLifecycleEvent", origin=self.id, origin_type="CapabilityContainer", sub_type="START", state=ContainerStateEnum.START) self._is_started = True self._status = "RUNNING" log.info("Container started, OK.")
def create_endpoint(self, **kwargs): return ProcessRPCServer.create_endpoint(self, **kwargs)