def build_service_map(self): """ Adds all known service definitions to service registry. @todo: May be a bit fragile due to using BaseService.__subclasses__ """ for cls in BaseService.__subclasses__(): assert hasattr(cls, 'name'), 'Service class must define name value. Service class in error: %s' % cls if cls.name: self.services_by_name[cls.name] = cls self.add_servicedef_entry(cls.name, "base", cls) try: self.add_servicedef_entry(cls.name, "schema", json.loads(cls.SCHEMA_JSON)) except Exception as ex: log.exception("Cannot parse service schema " + cls.name) interfaces = list(implementedBy(cls)) if interfaces: self.add_servicedef_entry(cls.name, "interface", interfaces[0]) if cls.__name__.startswith("Base"): try: client = "%s.%sProcessClient" % (cls.__module__, cls.__name__[4:]) self.add_servicedef_entry(cls.name, "client", named_any(client)) sclient = "%s.%sClient" % (cls.__module__, cls.__name__[4:]) self.add_servicedef_entry(cls.name, "simple_client", named_any(sclient)) except Exception, ex: log.warning("Cannot find client for service %s" % (cls.name))
def build_service_map(self): """ Adds all known service definitions to service registry. @todo: May be a bit fragile due to using BaseService.__subclasses__ """ for cls in BaseService.__subclasses__(): assert hasattr( cls, 'name' ), 'Service class must define name value. Service class in error: %s' % cls if cls.name: self.services_by_name[cls.name] = cls self.add_servicedef_entry(cls.name, "base", cls) try: self.add_servicedef_entry(cls.name, "schema", json.loads(cls.SCHEMA_JSON)) except Exception as ex: log.exception("Cannot parse service schema " + cls.name) interfaces = list(implementedBy(cls)) if interfaces: self.add_servicedef_entry(cls.name, "interface", interfaces[0]) if cls.__name__.startswith("Base"): try: client = "%s.%sProcessClient" % (cls.__module__, cls.__name__[4:]) self.add_servicedef_entry(cls.name, "client", named_any(client)) sclient = "%s.%sClient" % (cls.__module__, cls.__name__[4:]) self.add_servicedef_entry(cls.name, "simple_client", named_any(sclient)) except Exception, ex: log.warning("Cannot find client for service %s" % (cls.name))
def load_service_mods(cls, path): import pkgutil import string mod_prefix = string.replace(path, "/", ".") for mod_imp, mod_name, is_pkg in pkgutil.iter_modules([path]): if is_pkg: cls.load_service_mods(path + "/" + mod_name) else: mod_qual = "%s.%s" % (mod_prefix, mod_name) #print "Import", mod_qual try: named_any(mod_qual) except Exception, ex: log.warning("Import module '%s' failed: %s" % (mod_qual, ex))
def load_service_mods(cls, path): import pkgutil import string mod_prefix = string.replace(path, "/", ".") for mod_imp, mod_name, is_pkg in pkgutil.iter_modules([path]): if is_pkg: cls.load_service_mods(path+"/"+mod_name) else: mod_qual = "%s.%s" % (mod_prefix, mod_name) #print "Import", mod_qual try: named_any(mod_qual) except Exception, ex: log.warning("Import module '%s' failed: %s" % (mod_qual, ex))
def initialize_res_lcsms(): """ Initializes resource type lifecycle state machines. """ res_lifecycle = (Config(["res/config/resource_lifecycle.yml"])).data # Initialize the set of available resource lifecycle workflows lcs_workflow_defs.clear() lcsm_defs = res_lifecycle["LifecycleWorkflowDefinitions"] for wf in lcsm_defs: wfname = wf['name'] clsname = wf.get('lcsm_class', None) if clsname: wf_cls = named_any(clsname) lcs_workflow_defs[wfname] = wf_cls(**wf) else: based_on = wf.get('based_on', None) wf_base = lcs_workflow_defs[based_on] lcs_workflow_defs[wfname] = wf_base._clone_with_restrictions(wf) lcs_workflows.clear() # Initialize the set of resource types with lifecycle for res_type, wf_name in res_lifecycle["LifecycleResourceTypes"].iteritems( ): lcs_workflows[res_type] = lcs_workflow_defs[wf_name]
def _get_object_class(self, objtype): if objtype in self.obj_classes: return self.obj_classes[objtype] obj_class = named_any("interface.objects.%s" % objtype) self.obj_classes[objtype] = obj_class return obj_class
def load_service_mods(cls, path, package=""): if isinstance(path, ModuleType): for p in path.__path__: cls.load_service_mods(p, path.__name__) return import pkgutil for mod_imp, mod_name, is_pkg in pkgutil.iter_modules([path]): if is_pkg: cls.load_service_mods(path + "/" + mod_name, package + "." + mod_name) else: mod_qual = "%s.%s" % (package, mod_name) try: named_any(mod_qual) except Exception as ex: log.warning("Import module '%s' failed: %s" % (mod_qual, ex))
def initialize_res_lcsms(): """ Initializes resource type lifecycle state machines. """ res_lifecycle = (Config(["res/config/resource_lifecycle.yml"])).data # Initialize the set of available resource lifecycle workflows lcs_workflow_defs.clear() lcsm_defs = res_lifecycle["LifecycleWorkflowDefinitions"] for wf in lcsm_defs: wfname = wf["name"] clsname = wf.get("lcsm_class", None) if clsname: wf_cls = named_any(clsname) lcs_workflow_defs[wfname] = wf_cls(**wf) else: based_on = wf.get("based_on", None) wf_base = lcs_workflow_defs[based_on] lcs_workflow_defs[wfname] = wf_base._clone_with_restrictions(wf) lcs_workflows.clear() # Initialize the set of resource types with lifecycle for res_type, wf_name in res_lifecycle["LifecycleResourceTypes"].iteritems(): lcs_workflows[res_type] = lcs_workflow_defs[wf_name]
def __init__(self, *args, **kwargs): BaseContainerAgent.__init__(self, *args, **kwargs) # Coordinates the container start self._status = INIT self._is_started = False # set container id and cc_agent name (as they are set in base class call) self.id = get_default_container_id() self.name = "cc_agent_%s" % self.id self.start_time = get_ion_ts() bootstrap.container_instance = self Container.instance = self self.container = self # Make self appear as process to service clients self.CCAP = CCAP self.CFG = CFG log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name()) # Keep track of the overrides from the command-line, so they can trump app/rel file data self.spawn_args = kwargs # Greenlet context-local storage self.context = LocalContextMixin() # Load general capabilities file and augment with specific profile self._load_capabilities() # Start the capabilities start_order = self.cap_profile['start_order'] for cap in start_order: if cap not in self._cap_definitions: raise ContainerError( "CC capability %s not defined in profile" % cap) if cap in self._capabilities or cap in self._cap_instances: raise ContainerError("CC capability %s already initialized" % cap) try: cap_def = self._cap_definitions[cap] log.debug("__init__(): Initializing '%s'" % cap) cap_obj = named_any(cap_def['class'])(container=self) self._cap_instances[cap] = cap_obj if 'depends_on' in cap_def and cap_def['depends_on']: dep_list = cap_def['depends_on'].split(',') for dep in dep_list: dep = dep.strip() if dep not in self._cap_initialized: raise ContainerError( "CC capability %s dependent on non-existing capability %s" % (cap, dep)) if 'field' in cap_def and cap_def['field']: setattr(self, cap_def['field'], cap_obj) self._cap_initialized.append(cap) except Exception as ex: log.error("Container Capability %s init error: %s" % (cap, ex)) raise log.debug("Container initialized, OK.")
def initialize_res_lcsms(): """ Initializes default and special resource type state machines @todo. Make dynamic later and maybe move out. """ res_lifecycle = (Config(["res/config/resource_lifecycle.yml"])).data # Initialize the set of available resource lifecycle workflows lcs_workflow_defs.clear() lcsm_defs = res_lifecycle["LifecycleWorkflowDefinitions"] for wf in lcsm_defs: #print "****** FOUND RES WORKFLOW %s" % (wf) wfname = wf['name'] clsname = wf.get('lcsm_class', None) if clsname: wf_cls = named_any(clsname) lcs_workflow_defs[wfname] = wf_cls(**wf) else: based_on = wf.get('based_on', None) wf_base = lcs_workflow_defs[based_on] lcs_workflow_defs[wfname] = wf_base._clone_with_restrictions(wf) lcs_workflows.clear() # Initialize the set of resource types with lifecycle for res_type, wf_name in res_lifecycle["LifecycleResourceTypes"].iteritems(): lcs_workflows[res_type] = lcs_workflow_defs[wf_name]
def get_records(self, max_count=MAX_RECORDS_PER_GRANULE): records = [] for ch_num, chunk in enumerate(self._chunks): if ch_num < self._chunk_index: continue before_records = len(records) dev_type = chunk["dev_type"] dev_parser = DEVICE_TYPES[dev_type] try: clss = named_any(dev_parser) chunk_parser = clss(chunk) new_records = chunk_parser.get_records(max_count=max_count) start_idx = 0 if ch_num == self._chunk_index and self._record_index > 0: # Before we stopped in the middle of a chunk new_records = new_records[self._record_index :] start_idx = self._record_index self._chunk_index = ch_num if before_records + len(new_records) > max_count: records.extend(new_records[: max_count - before_records]) self._record_index = start_idx + (max_count - before_records) break else: records.extend(new_records) if len(records) == max_count: self._chunk_index += 1 self._record_index = 0 break except Exception as ex: log.warn("Error: %s", ex) return records
def initialize_res_lcsms(): """ Initializes default and special resource type state machines @todo. Make dynamic later and maybe move out. """ res_lifecycle = (Config(["res/config/resource_lifecycle.yml"])).data # Initialize the set of available resource lifecycle workflows lcs_workflow_defs.clear() lcsm_defs = res_lifecycle["LifecycleWorkflowDefinitions"] for wf in lcsm_defs: #print "****** FOUND RES WORKFLOW %s" % (wf) wfname = wf['name'] clsname = wf.get('lcsm_class', None) if clsname: wf_cls = named_any(clsname) lcs_workflow_defs[wfname] = wf_cls(**wf) else: based_on = wf.get('based_on', None) wf_base = lcs_workflow_defs[based_on] lcs_workflow_defs[wfname] = wf_base._clone_with_restrictions(wf) lcs_workflows.clear() # Initialize the set of resource types with lifecycle for res_type, wf_name in res_lifecycle["LifecycleResourceTypes"].iteritems( ): lcs_workflows[res_type] = lcs_workflow_defs[wf_name]
def get_records(self, max_count=MAX_RECORDS_PER_GRANULE): records = [] for ch_num, chunk in enumerate(self._chunks): if ch_num < self._chunk_index: continue before_records = len(records) dev_type = chunk["dev_type"] dev_parser = DEVICE_TYPES[dev_type] try: clss = named_any(dev_parser) chunk_parser = clss(chunk) new_records = chunk_parser.get_records(max_count=max_count) start_idx = 0 if ch_num == self._chunk_index and self._record_index > 0: # Before we stopped in the middle of a chunk new_records = new_records[self._record_index:] start_idx = self._record_index self._chunk_index = ch_num if before_records + len(new_records) > max_count: records.extend(new_records[:max_count-before_records]) self._record_index = start_idx + (max_count - before_records) break else: records.extend(new_records) if len(records) == max_count: self._chunk_index += 1 self._record_index = 0 break except Exception as ex: log.warn("Error: %s", ex) return records
def on_init(self): # Time in between event persists self.persist_interval = float( self.CFG.get_safe("process.event_persister.persist_interval", 1.0)) self.persist_blacklist = self.CFG.get_safe( "process.event_persister.persist_blacklist", {}) self._event_type_blacklist = [ entry['event_type'] for entry in self.persist_blacklist if entry.get('event_type', None) and len(entry) == 1 ] self._complex_blacklist = [ entry for entry in self.persist_blacklist if not (entry.get('event_type', None) and len(entry) == 1) ] if self._complex_blacklist: log.warn( "EventPersister does not yet support complex blacklist expressions: %s", self._complex_blacklist) # Time in between view refreshs self.refresh_interval = float( self.CFG.get_safe("process.event_persister.refresh_interval", 60.0)) # Holds received events FIFO in syncronized queue self.event_queue = Queue() # Temporarily holds list of events to persist while datastore operation are not yet completed # This is where events to persist will remain if datastore operation fails occasionally. self.events_to_persist = None # Number of unsuccessful attempts to persist in a row self.failure_count = 0 # bookkeeping for greenlet self._persist_greenlet = None self._terminate_persist = Event( ) # when set, exits the persister greenlet self._refresh_greenlet = None self._terminate_refresh = Event( ) # when set, exits the refresher greenlet # The event subscriber self.event_sub = None # Registered event process plugins self.process_plugins = {} for plugin_name, plugin_cls, plugin_args in PROCESS_PLUGINS: try: plugin = named_any(plugin_cls)(**plugin_args) self.process_plugins[plugin_name] = plugin log.info("Loaded event processing plugin %s (%s)", plugin_name, plugin_cls) except Exception as ex: log.error( "Cannot instantiate event processing plugin %s (%s): %s", plugin_name, plugin_cls, ex)
def get_datastore(cls, datastore_name=None, variant=DS_BASE, config=None, container=None, profile=None, scope=None): #log.info("get_datastore(%s, variant=%s, profile=%s, scope=%s, config=%s)", datastore_name, variant, profile, scope, "") # Step 1: Get datastore server config if not config and container: config = container.CFG if config: if "container" in config: server_cfg = cls.get_server_config(config) else: server_cfg = config config = None if not server_cfg: raise BadRequest("No config available to determine datastore") # Step 2: Find type specific implementation class if config: server_types = get_safe(config, "container.datastore.server_types", None) if not server_types: # Some tests fudge the CFG - make it more lenient #raise BadRequest("Server types not configured!") variant_store = cls.get_datastore_class( server_cfg, variant=variant) else: server_type = server_cfg.get("type", "postgresql") type_cfg = server_types.get(server_type, None) if not type_cfg: raise BadRequest( "Server type '%s' not configured!" % server_type) variant_store = type_cfg.get(variant, cls.DS_BASE) else: # Fallback in case a server config was given (NOT NICE) variant_store = cls.get_datastore_class( server_cfg, variant=variant) # Step 3: Instantiate type specific implementation store_class = named_any(variant_store) profile = profile or DataStore.DS_PROFILE_MAPPING.get( datastore_name, DataStore.DS_PROFILE.BASIC) log.debug("get_datastore(%s, profile=%s, scope=%s, variant=%s) -> %s", datastore_name, profile, scope, variant, store_class.__name__) store = store_class( datastore_name=datastore_name, config=server_cfg, profile=profile, scope=scope) return store
def get_datastore(cls, datastore_name=None, variant=DS_BASE, config=None, container=None, profile=None, scope=None): #log.info("get_datastore(%s, variant=%s, profile=%s, scope=%s, config=%s)", datastore_name, variant, profile, scope, "") # Step 1: Get datastore server config if not config and container: config = container.CFG if config: if "container" in config: server_cfg = cls.get_server_config(config) else: server_cfg = config config = None if not server_cfg: raise BadRequest("No config available to determine datastore") # Step 2: Find type specific implementation class if config: server_types = get_safe(config, "container.datastore.server_types", None) if not server_types: # Some tests fudge the CFG - make it more lenient #raise BadRequest("Server types not configured!") variant_store = cls.get_datastore_class(server_cfg, variant=variant) else: server_type = server_cfg.get("type", "postgresql") type_cfg = server_types.get(server_type, None) if not type_cfg: raise BadRequest("Server type '%s' not configured!" % server_type) variant_store = type_cfg.get(variant, cls.DS_BASE) else: # Fallback in case a server config was given (NOT NICE) variant_store = cls.get_datastore_class(server_cfg, variant=variant) # Step 3: Instantiate type specific implementation store_class = named_any(variant_store) profile = profile or DataStore.DS_PROFILE_MAPPING.get( datastore_name, DataStore.DS_PROFILE.BASIC) log.debug("get_datastore(%s, profile=%s, scope=%s, variant=%s) -> %s", datastore_name, profile, scope, variant, store_class.__name__) store = store_class(datastore_name=datastore_name, config=server_cfg, profile=profile, scope=scope) return store
def _call_target(self, target, value=None, resource_id=None, res_type=None, cmd_args=None, cmd_kwargs=None): """ Makes a call to a specified function. Function specification can be of varying type. """ try: if not target: return None match = re.match( "(func|serviceop):([\w.]+)\s*\(\s*([\w,$\s]*)\s*\)\s*(?:->\s*([\w\.]+))?\s*$", target) if match: func_type, func_name, func_args, res_path = match.groups() func = None if func_type == "func": if func_name.startswith("self."): func = getattr(self, func_name[5:]) else: func = named_any(func_name) elif func_type == "serviceop": svc_name, svc_op = func_name.split('.', 1) try: svc_client_cls = get_service_registry( ).get_service_by_name(svc_name).client except Exception as ex: log.error("No service client found for service: %s", svc_name) else: svc_client = svc_client_cls(process=self) func = getattr(svc_client, svc_op) if not func: return None args = self._get_call_args(func_args, resource_id, res_type, value, cmd_args) kwargs = {} if not cmd_kwargs else cmd_kwargs func_res = func(*args, **kwargs) log.info("Function %s result: %s", func, func_res) if res_path and isinstance(func_res, dict): func_res = get_safe(func_res, res_path, None) return func_res else: log.error("Unknown call target expression: %s", target) except Exception as ex: log.exception("_call_target exception") return None
def __init__(self, *args, **kwargs): BaseContainerAgent.__init__(self, *args, **kwargs) # Coordinates the container start self._status = INIT self._is_started = False # set container id and cc_agent name (as they are set in base class call) self.id = get_default_container_id() self.name = "cc_agent_%s" % self.id bootstrap.container_instance = self Container.instance = self self.container = self # Make self appear as process to service clients self.CCAP = CCAP self.CFG = CFG log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name()) # Keep track of the overrides from the command-line, so they can trump app/rel file data self.spawn_args = kwargs # Greenlet context-local storage self.context = LocalContextMixin() # Load general capabilities file and augment with specific profile self._load_capabilities() # Start the capabilities start_order = self.cap_profile['start_order'] for cap in start_order: if cap not in self._cap_definitions: raise ContainerError("CC capability %s not defined in profile" % cap) if cap in self._capabilities or cap in self._cap_instances: raise ContainerError("CC capability %s already initialized" % cap) try: cap_def = self._cap_definitions[cap] log.debug("__init__(): Initializing '%s'" % cap) cap_obj = named_any(cap_def['class'])(container=self) self._cap_instances[cap] = cap_obj if 'depends_on' in cap_def and cap_def['depends_on']: dep_list = cap_def['depends_on'].split(',') for dep in dep_list: dep = dep.strip() if dep not in self._cap_initialized: raise ContainerError("CC capability %s dependent on non-existing capability %s" % (cap, dep)) if 'field' in cap_def and cap_def['field']: setattr(self, cap_def['field'], cap_obj) self._cap_initialized.append(cap) except Exception as ex: log.error("Container Capability %s init error: %s" % (cap, ex)) raise log.debug("Container initialized, OK.")
def spawn_process(self, name=None, module=None, cls=None, config=None): """ Spawn a process within the container. Processes can be of different type. """ # Generate a new process id # TODO: Ensure it is system-wide unique process_id = "%s.%s" % (self.container.id, self.proc_id_pool.get_id()) log.debug("ProcManager.spawn_process(name=%s, module.cls=%s.%s) as pid=%s", name, module, cls, process_id) if config is None: config = DictModifier(CFG) log.debug("spawn_process() pid=%s config=%s", process_id, config) # PROCESS TYPE. Determines basic process context (messaging, service interface) # One of: service, stream_process, agent, simple, immediate service_cls = named_any("%s.%s" % (module, cls)) process_type = config.get("process", {}).get("type", None) or getattr(service_cls, "process_type", "service") service_instance = None try: # spawn service by type if process_type == "service": service_instance = self._spawn_service_process(process_id, name, module, cls, config) elif process_type == "stream_process": service_instance = self._spawn_stream_process(process_id, name, module, cls, config) elif process_type == "agent": service_instance = self._spawn_agent_process(process_id, name, module, cls, config) elif process_type == "standalone": service_instance = self._spawn_standalone_process(process_id, name, module, cls, config) elif process_type == "immediate": service_instance = self._spawn_immediate_process(process_id, name, module, cls, config) elif process_type == "simple": service_instance = self._spawn_simple_process(process_id, name, module, cls, config) else: raise Exception("Unknown process type: %s" % process_type) service_instance._proc_type = process_type self._register_process(service_instance, name) service_instance.errcause = "OK" log.info("AppManager.spawn_process: %s.%s -> pid=%s OK" % (module, cls, process_id)) return service_instance.id except Exception: errcause = service_instance.errcause if service_instance else "instantiating service" log.exception("Error spawning %s %s process (process_id: %s): %s" % (name, process_type, process_id, errcause)) raise
def initialize_from_config(self, config): self.governance_dispatcher = GovernanceDispatcher() self.policy_decision_point_manager = PolicyDecisionPointManager(self) self.interceptor_order = config.get('interceptor_order', None) or [] gov_ints = config.get('governance_interceptors', None) or {} for name in gov_ints: interceptor_def = gov_ints[name] classobj = named_any(interceptor_def["class"]) classinst = classobj() self.interceptor_by_name_dict[name] = classinst
def _call_target(self, target, value=None, resource_id=None, res_type=None, cmd_args=None, cmd_kwargs=None): """ Makes a call to a specified function. Function specification can be of varying type. """ try: if not target: return None match = re.match("(func|serviceop):([\w.]+)\s*\(\s*([\w,$\s]*)\s*\)\s*(?:->\s*([\w\.]+))?\s*$", target) if match: func_type, func_name, func_args, res_path = match.groups() func = None if func_type == "func": if func_name.startswith("self."): func = getattr(self, func_name[5:]) else: func = named_any(func_name) elif func_type == "serviceop": svc_name, svc_op = func_name.split('.', 1) try: svc_client_cls = get_service_registry().get_service_by_name(svc_name).client except Exception as ex: log.error("No service client found for service: %s", svc_name) else: svc_client = svc_client_cls(process=self) func = getattr(svc_client, svc_op) if not func: return None args = self._get_call_args(func_args, resource_id, res_type, value, cmd_args) kwargs = {} if not cmd_kwargs else cmd_kwargs func_res = func(*args, **kwargs) log.info("Function %s result: %s", func, func_res) if res_path and isinstance(func_res, dict): func_res = get_safe(func_res, res_path, None) return func_res else: log.error("Unknown call target expression: %s", target) except Unauthorized as ex: # No need to log as this is not an application error, however, need to pass on the exception because # when called by the Service Gateway, the error message in the exception is required raise ex except Exception as ex: log.exception("_call_target exception") raise ex #Should to pass it back because when called by the Service Gateway, the error message in the exception is required
def _load_StreamDefinition(self, row): log.info("Loading StreamDefinition") res_obj = self._create_object_from_row("StreamDefinition", row, "sdef/") log.info("StreamDefinition: %s" % res_obj) sd_module = row["StreamContainer_module"] sd_method = row["StreamContainer_method"] creator_func = named_any("%s.%s" % (sd_module, sd_method)) sd_container = creator_func() svc_client = self._get_service_client("pubsub_management") res_id = svc_client.create_stream_definition(container=sd_container, name=res_obj.name, description=res_obj.description) self._register_id(row[self.COL_ID], res_id)
def _call_target(self, target, value=None, resource_id=None, res_type=None, cmd_args=None, cmd_kwargs=None): """ Makes a call to a specified function. Function specification can be of varying type. """ try: if not target: return None match = re.match("(func|serviceop):([\w.]+)\s*\(\s*([\w,$\s]*)\s*\)\s*(?:->\s*([\w\.]+))?\s*$", target) if match: func_type, func_name, func_args, res_path = match.groups() func = None if func_type == "func": if func_name.startswith("self."): func = getattr(self, func_name[5:]) else: func = named_any(func_name) elif func_type == "serviceop": svc_name, svc_op = func_name.split(".", 1) try: svc_client_cls = get_service_registry().get_service_by_name(svc_name).client except Exception as ex: log.error("No service client found for service: %s", svc_name) else: svc_client = svc_client_cls(process=self) func = getattr(svc_client, svc_op) if not func: return None args = self._get_call_args(func_args, resource_id, res_type, value, cmd_args) kwargs = {} if not cmd_kwargs else cmd_kwargs func_res = func(*args, **kwargs) log.info("Function %s result: %s", func, func_res) if res_path and isinstance(func_res, dict): func_res = get_safe(func_res, res_path, None) return func_res else: log.error("Unknown call target expression: %s", target) except Exception as ex: log.exception("_call_target exception") return None
def start_app(self, appdef=None, config=None): """ @brief Start an app from an app definition. Note: apps can come in one of 2 variants: 1 processapp: In-line defined process to be started 2 regular app: Full app definition """ log.debug("AppManager.start_app(appdef=%s) ..." % appdef) appdef = DotDict(appdef) if 'config' in appdef: app_cfg = deepcopy(appdef.config) if config: dict_merge(app_cfg, config, inplace=True) config = app_cfg if 'processapp' in appdef: # Case 1: Appdef contains definition of process to start name, module, cls = appdef.processapp try: pid = self.container.spawn_process(name, module, cls, config) appdef._pid = pid self.apps.append(appdef) except Exception: log.exception("Appl %s start from processapp failed" % appdef.name) else: # Case 2: Appdef contains full app start params modpath = appdef.mod try: mod = named_any(modpath) appdef._mod_loaded = mod # Start the app supid, state = mod.start(self.container, START_PERMANENT, appdef, config) appdef._supid = supid appdef._state = state log.debug("App '%s' started. Root sup-id=%s" % (appdef.name, supid)) self.apps.append(appdef) except Exception: log.exception("Appl %s start from appdef failed" % appdef.name)
def _load_StreamDefinition(self, row): log.info("Loading StreamDefinition") res_obj = self._create_object_from_row("StreamDefinition", row, "sdef/") log.info("StreamDefinition: %s" % res_obj) sd_module = row["StreamContainer_module"] sd_method = row["StreamContainer_method"] creator_func = named_any("%s.%s" % (sd_module, sd_method)) sd_container = creator_func() svc_client = self._get_service_client("pubsub_management") res_id = svc_client.create_stream_definition( container=sd_container, name=res_obj.name, description=res_obj.description) self._register_id(row[self.COL_ID], res_id)
def on_init(self): # Time in between event persists self.persist_interval = float(self.CFG.get_safe("process.event_persister.persist_interval", 1.0)) self.persist_blacklist = self.CFG.get_safe("process.event_persister.persist_blacklist", {}) self._event_type_blacklist = [entry['event_type'] for entry in self.persist_blacklist if entry.get('event_type', None) and len(entry) == 1] self._complex_blacklist = [entry for entry in self.persist_blacklist if not (entry.get('event_type', None) and len(entry) == 1)] if self._complex_blacklist: log.warn("EventPersister does not yet support complex blacklist expressions: %s", self._complex_blacklist) # Time in between view refreshs self.refresh_interval = float(self.CFG.get_safe("process.event_persister.refresh_interval", 60.0)) # Holds received events FIFO in syncronized queue self.event_queue = Queue() # Temporarily holds list of events to persist while datastore operation are not yet completed # This is where events to persist will remain if datastore operation fails occasionally. self.events_to_persist = None # Number of unsuccessful attempts to persist in a row self.failure_count = 0 # bookkeeping for greenlet self._persist_greenlet = None self._terminate_persist = Event() # when set, exits the persister greenlet self._refresh_greenlet = None self._terminate_refresh = Event() # when set, exits the refresher greenlet # The event subscriber self.event_sub = None # Registered event process plugins self.process_plugins = {} for plugin_name, plugin_cls, plugin_args in PROCESS_PLUGINS: try: plugin = named_any(plugin_cls)(**plugin_args) self.process_plugins[plugin_name]= plugin log.info("Loaded event processing plugin %s (%s)", plugin_name, plugin_cls) except Exception as ex: log.error("Cannot instantiate event processing plugin %s (%s): %s", plugin_name, plugin_cls, ex)
def on_init(self): # Retain a pointer to this object for use in routes global ui_instance ui_instance = self # Main references to basic components (if initialized) self.http_server = None self.socket_io = None self.service_gateway = None self.oauth = oauth # Configuration self.server_enabled = self.CFG.get_safe(CFG_PREFIX + ".server.enabled") is True self.server_debug = self.CFG.get_safe(CFG_PREFIX + ".server.debug") is True # Note: this may be the empty string. Using localhost does not make the server publicly accessible self.server_hostname = self.CFG.get_safe(CFG_PREFIX + ".server.hostname", DEFAULT_WEB_SERVER_HOSTNAME) self.server_port = self.CFG.get_safe(CFG_PREFIX + ".server.port", DEFAULT_WEB_SERVER_PORT) self.server_log_access = self.CFG.get_safe(CFG_PREFIX + ".server.log_access") is True self.server_log_errors = self.CFG.get_safe(CFG_PREFIX + ".server.log_errors") is True self.server_socket_io = self.CFG.get_safe(CFG_PREFIX + ".server.socket_io") is True self.server_secret = self.CFG.get_safe(CFG_PREFIX + ".security.secret") or "" self.session_timeout = int(self.CFG.get_safe(CFG_PREFIX + ".security.session_timeout") or DEFAULT_SESSION_TIMEOUT) self.extend_session_timeout = self.CFG.get_safe(CFG_PREFIX + ".security.extend_session_timeout") is True self.max_session_validity = int(self.CFG.get_safe(CFG_PREFIX + ".security.max_session_validity") or DEFAULT_SESSION_TIMEOUT) self.remember_user = self.CFG.get_safe(CFG_PREFIX + ".security.remember_user") is True self.set_cors_headers = self.CFG.get_safe(CFG_PREFIX + ".server.set_cors") is True self.develop_mode = self.CFG.get_safe(CFG_PREFIX + ".server.develop_mode") is True self.oauth_enabled = self.CFG.get_safe(CFG_PREFIX + ".oauth.enabled") is True self.oauth_scope = self.CFG.get_safe(CFG_PREFIX + ".oauth.default_scope") or "scioncc" self.has_service_gateway = self.CFG.get_safe(CFG_PREFIX + ".service_gateway.enabled") is True self.service_gateway_prefix = self.CFG.get_safe(CFG_PREFIX + ".service_gateway.url_prefix", DEFAULT_GATEWAY_PREFIX) self.extensions = self.CFG.get_safe(CFG_PREFIX + ".extensions") or [] self.extension_objs = [] # TODO: What about https? self.base_url = "http://%s:%s" % (self.server_hostname or "localhost", self.server_port) self.gateway_base_url = None self.idm_client = IdentityManagementServiceProcessClient(process=self) # One time setup if self.server_enabled: app.secret_key = self.server_secret or self.__class__.__name__ # Enables encrypted session cookies if self.server_debug: app.debug = True if self.server_socket_io: self.socket_io = SocketIO(app) if self.has_service_gateway: from ion.services.service_gateway import ServiceGateway, sg_blueprint self.gateway_base_url = self.base_url + self.service_gateway_prefix self.service_gateway = ServiceGateway(process=self, config=self.CFG, response_class=app.response_class) app.register_blueprint(sg_blueprint, url_prefix=self.service_gateway_prefix) for ext_cls in self.extensions: try: cls = named_any(ext_cls) except AttributeError as ae: # Try to nail down the error import importlib importlib.import_module(ext_cls.rsplit(".", 1)[0]) raise self.extension_objs.append(cls()) for ext_obj in self.extension_objs: ext_obj.on_init(self, app) if self.extensions: log.info("UI Server: %s extensions initialized", len(self.extensions)) # Start the web server self.start_service() log.info("UI Server: Started server on %s" % self.base_url) else: log.warn("UI Server: Server disabled in config")
def on_init(self): # Retain a pointer to this object for use in routes global ui_instance ui_instance = self # Main references to basic components (if initialized) self.http_server = None self.socket_io = None self.service_gateway = None self.oauth = oauth # Configuration self.server_enabled = self.CFG.get_safe(CFG_PREFIX + ".server.enabled") is True self.server_debug = self.CFG.get_safe(CFG_PREFIX + ".server.debug") is True # Note: this may be the empty string. Using localhost does not make the server publicly accessible self.server_hostname = self.CFG.get_safe(CFG_PREFIX + ".server.hostname", DEFAULT_WEB_SERVER_HOSTNAME) self.server_port = self.CFG.get_safe(CFG_PREFIX + ".server.port", DEFAULT_WEB_SERVER_PORT) self.server_log_access = self.CFG.get_safe(CFG_PREFIX + ".server.log_access") is True self.server_log_errors = self.CFG.get_safe(CFG_PREFIX + ".server.log_errors") is True self.server_socket_io = self.CFG.get_safe(CFG_PREFIX + ".server.socket_io") is True self.server_secret = self.CFG.get_safe(CFG_PREFIX + ".security.secret") or "" self.session_timeout = int(self.CFG.get_safe(CFG_PREFIX + ".security.session_timeout") or DEFAULT_SESSION_TIMEOUT) self.extend_session_timeout = self.CFG.get_safe(CFG_PREFIX + ".security.extend_session_timeout") is True self.max_session_validity = int(self.CFG.get_safe(CFG_PREFIX + ".security.max_session_validity") or DEFAULT_SESSION_TIMEOUT) self.remember_user = self.CFG.get_safe(CFG_PREFIX + ".security.remember_user") is True self.set_cors_headers = self.CFG.get_safe(CFG_PREFIX + ".server.set_cors") is True self.develop_mode = self.CFG.get_safe(CFG_PREFIX + ".server.develop_mode") is True self.oauth_enabled = self.CFG.get_safe(CFG_PREFIX + ".oauth.enabled") is True self.oauth_scope = self.CFG.get_safe(CFG_PREFIX + ".oauth.default_scope") or "scioncc" self.has_service_gateway = self.CFG.get_safe(CFG_PREFIX + ".service_gateway.enabled") is True self.service_gateway_prefix = self.CFG.get_safe(CFG_PREFIX + ".service_gateway.url_prefix", DEFAULT_GATEWAY_PREFIX) self.extensions = self.CFG.get_safe(CFG_PREFIX + ".extensions") or [] self.extension_objs = [] # TODO: What about https? self.base_url = "http://%s:%s" % (self.server_hostname or "localhost", self.server_port) self.gateway_base_url = None self.idm_client = IdentityManagementServiceProcessClient(process=self) # One time setup if self.server_enabled: app.secret_key = self.server_secret or self.__class__.__name__ # Enables encrypted session cookies if self.server_debug: app.debug = True if self.server_socket_io: self.socket_io = SocketIO(app) if self.has_service_gateway: from ion.service.service_gateway import ServiceGateway, sg_blueprint self.gateway_base_url = self.base_url + self.service_gateway_prefix self.service_gateway = ServiceGateway(process=self, config=self.CFG, response_class=app.response_class) app.register_blueprint(sg_blueprint, url_prefix=self.service_gateway_prefix) for ext_cls in self.extensions: try: cls = named_any(ext_cls) except AttributeError as ae: # Try to nail down the error import importlib importlib.import_module(ext_cls.rsplit(".", 1)[0]) raise self.extension_objs.append(cls()) for ext_obj in self.extension_objs: ext_obj.on_init(self, app) if self.extensions: log.info("UI Server: %s extensions initialized", len(self.extensions)) # Start the web server self.start_service() log.info("UI Server: Started server on %s" % self.base_url) else: log.warn("UI Server: Server disabled in config")
def spawn_process(self, name=None, module=None, cls=None, config=None, process_id=None): """ Spawn a process within the container. Processes can be of different type. """ if process_id and not is_valid_identifier(process_id, ws_sub='_'): raise BadRequest("Given process_id %s is not a valid identifier" % process_id) # Generate a new process id if not provided # TODO: Ensure it is system-wide unique process_id = process_id or "%s.%s" % (self.container.id, self.proc_id_pool.get_id()) log.debug("ProcManager.spawn_process(name=%s, module.cls=%s.%s, config=%s) as pid=%s", name, module, cls, config, process_id) process_cfg = deepcopy(CFG) if config: # Use provided config. Must be dict or DotDict if not isinstance(config, DotDict): config = DotDict(config) if config.get_safe("process.config_ref"): # Use a reference config_ref = config.get_safe("process.config_ref") log.info("Enhancing new process spawn config from ref=%s" % config_ref) matches = re.match(r'^([A-Za-z]+):([A-Za-z0-9]+)/(.+)$', config_ref) if matches: ref_type, ref_id, ref_ext = matches.groups() if ref_type == "resources": if self.container.has_capability(self.container.CCAP.RESOURCE_REGISTRY): try: obj = self.container.resource_registry.read(ref_id) if obj and hasattr(obj, ref_ext): ref_config = getattr(obj, ref_ext) if isinstance(ref_config, dict): dict_merge(process_cfg, ref_config, inplace=True) else: raise BadRequest("config_ref %s exists but not dict" % config_ref) else: raise BadRequest("config_ref %s - attribute not found" % config_ref) except NotFound as nf: log.warn("config_ref %s - object not found" % config_ref) raise else: log.error("Container missing RESOURCE_REGISTRY capability to resolve process config ref %s" % config_ref) else: raise BadRequest("Unknown reference type in: %s" % config_ref) dict_merge(process_cfg, config, inplace=True) if self.container.spawn_args: # Override config with spawn args dict_merge(process_cfg, self.container.spawn_args, inplace=True) #log.debug("spawn_process() pid=%s process_cfg=%s", process_id, process_cfg) # PROCESS TYPE. Determines basic process context (messaging, service interface) # One of the constants defined at the top of this file service_cls = named_any("%s.%s" % (module, cls)) process_type = get_safe(process_cfg, "process.type") or getattr(service_cls, "process_type", "service") process_start_mode = get_safe(config, "process.start_mode") process_instance = None # alert we have a spawning process, but we don't have the instance yet, so give the class instead (more accurate than name) self._call_proc_state_changed("%s.%s" % (module, cls), ProcessStateEnum.PENDING) try: # spawn service by type if process_type == SERVICE_PROCESS_TYPE: process_instance = self._spawn_service_process(process_id, name, module, cls, process_cfg) elif process_type == STREAM_PROCESS_TYPE: process_instance = self._spawn_stream_process(process_id, name, module, cls, process_cfg) elif process_type == AGENT_PROCESS_TYPE: process_instance = self._spawn_agent_process(process_id, name, module, cls, process_cfg) elif process_type == STANDALONE_PROCESS_TYPE: process_instance = self._spawn_standalone_process(process_id, name, module, cls, process_cfg) elif process_type == IMMEDIATE_PROCESS_TYPE: process_instance = self._spawn_immediate_process(process_id, name, module, cls, process_cfg) elif process_type == SIMPLE_PROCESS_TYPE: process_instance = self._spawn_simple_process(process_id, name, module, cls, process_cfg) else: raise BadRequest("Unknown process type: %s" % process_type) process_instance._proc_type = process_type self._register_process(process_instance, name) process_instance.errcause = "OK" log.info("ProcManager.spawn_process: %s.%s -> pid=%s OK", module, cls, process_id) if process_type == IMMEDIATE_PROCESS_TYPE: log.info('Terminating immediate process: %s', process_instance.id) self.terminate_process(process_instance.id) # terminate process also triggers TERMINATING/TERMINATED self._call_proc_state_changed(process_instance, ProcessStateEnum.EXITED) else: #Update local policies for the new process if self.container.has_capability(self.container.CCAP.GOVERNANCE_CONTROLLER): self.container.governance_controller.update_container_policies(process_instance, safe_mode=True) return process_instance.id except IonProcessError: errcause = process_instance.errcause if process_instance else "instantiating process" log.exception("Error spawning %s %s process (process_id: %s): %s", name, process_type, process_id, errcause) return None except Exception: errcause = process_instance.errcause if process_instance else "instantiating process" log.exception("Error spawning %s %s process (process_id: %s): %s", name, process_type, process_id, errcause) # trigger failed notification - catches problems in init/start self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED) raise
app_config = DictModifier(app_config, config) if 'processapp' in appdef: # Case 1: Appdef contains definition of process to start name, module, cls = appdef.processapp try: pid = self.container.spawn_process(name, module, cls, app_config) appdef._pid = pid self.apps.append(appdef) except Exception, ex: log.exception("Appl %s start from processapp failed" % appdef.name) else: # Case 2: Appdef contains full app start params modpath = appdef.mod try: mod = named_any(modpath) appdef._mod_loaded = mod # Start the app supid, state = mod.start(self.container, START_PERMANENT, appdef, config) appdef._supid = supid appdef._state = state log.debug("App '%s' started. Root sup-id=%s" % (appdef.name, supid)) self.apps.append(appdef) except Exception, ex: log.exception("Appl %s start from appdef failed" % appdef.name) def stop_app(self, appdef): log.debug("App '%s' stopping" % appdef.name)
def spawn_process(self, name=None, module=None, cls=None, config=None, process_id=None): """ Spawn a process within the container. Processes can be of different type. """ if process_id and not is_valid_identifier(process_id, ws_sub='_'): raise BadRequest("Given process_id %s is not a valid identifier" % process_id) # Generate a new process id if not provided # TODO: Ensure it is system-wide unique process_id = process_id or "%s.%s" % (self.container.id, self.proc_id_pool.get_id()) log.debug( "ProcManager.spawn_process(name=%s, module.cls=%s.%s, config=%s) as pid=%s", name, module, cls, config, process_id) process_cfg = deepcopy(CFG) if config: # Use provided config. Must be dict or DotDict if not isinstance(config, DotDict): config = DotDict(config) dict_merge(process_cfg, config, inplace=True) if self.container.spawn_args: # Override config with spawn args dict_merge(process_cfg, self.container.spawn_args, inplace=True) #log.debug("spawn_process() pid=%s process_cfg=%s", process_id, process_cfg) # PROCESS TYPE. Determines basic process context (messaging, service interface) # One of: service, stream_process, agent, simple, immediate service_cls = named_any("%s.%s" % (module, cls)) process_type = get_safe(process_cfg, "process.type") or getattr( service_cls, "process_type", "service") process_start_mode = get_safe(config, "process.start_mode") process_instance = None # alert we have a spawning process, but we don't have the instance yet, so give the class instead (more accurate than name) self._call_proc_state_changed("%s.%s" % (module, cls), ProcessStateEnum.PENDING) try: # spawn service by type if process_type == "service": process_instance = self._spawn_service_process( process_id, name, module, cls, process_cfg) elif process_type == "stream_process": process_instance = self._spawn_stream_process( process_id, name, module, cls, process_cfg) elif process_type == "agent": process_instance = self._spawn_agent_process( process_id, name, module, cls, process_cfg) elif process_type == "standalone": process_instance = self._spawn_standalone_process( process_id, name, module, cls, process_cfg) elif process_type == "immediate": process_instance = self._spawn_immediate_process( process_id, name, module, cls, process_cfg) elif process_type == "simple": process_instance = self._spawn_simple_process( process_id, name, module, cls, process_cfg) else: raise BadRequest("Unknown process type: %s" % process_type) process_instance._proc_type = process_type self._register_process(process_instance, name) process_instance.errcause = "OK" log.info("ProcManager.spawn_process: %s.%s -> pid=%s OK", module, cls, process_id) if process_type == 'immediate': log.info('Terminating immediate process: %s', process_instance.id) self.terminate_process(process_instance.id) # terminate process also triggers TERMINATING/TERMINATED self._call_proc_state_changed(process_instance, ProcessStateEnum.EXITED) else: #Shouldn't be any policies for immediate processes self.update_container_policies(process_instance) return process_instance.id except IonProcessError: errcause = process_instance.errcause if process_instance else "instantiating process" log.exception("Error spawning %s %s process (process_id: %s): %s", name, process_type, process_id, errcause) return None except Exception: errcause = process_instance.errcause if process_instance else "instantiating process" log.exception("Error spawning %s %s process (process_id: %s): %s", name, process_type, process_id, errcause) # trigger failed notification - catches problems in init/start self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED) raise
def spawn_process(self, name=None, module=None, cls=None, config=None): """ Spawn a process within the container. Processes can be of different type. """ # Generate a new process id # TODO: Ensure it is system-wide unique process_id = "%s.%s" % (self.container.id, self.proc_id_pool.get_id()) log.debug("ProcManager.spawn_process(name=%s, module.cls=%s.%s) as pid=%s", name, module, cls, process_id) if not config: # Use system CFG. It has the command line args in it config = DictModifier(CFG) else: # Use provided config. Must be dict or DotDict if not isinstance(config, DotDict): config = DotDict(config) config = DictModifier(CFG, config) if self.container.spawn_args: # Override config with spawn args dict_merge(config, self.container.spawn_args, inplace=True) log.debug("spawn_process() pid=%s config=%s", process_id, config) # PROCESS TYPE. Determines basic process context (messaging, service interface) # One of: service, stream_process, agent, simple, immediate service_cls = named_any("%s.%s" % (module, cls)) process_type = get_safe(config, "process.type") or getattr(service_cls, "process_type", "service") service_instance = None try: # spawn service by type if process_type == "service": service_instance = self._spawn_service_process(process_id, name, module, cls, config) elif process_type == "stream_process": service_instance = self._spawn_stream_process(process_id, name, module, cls, config) elif process_type == "agent": service_instance = self._spawn_agent_process(process_id, name, module, cls, config) elif process_type == "standalone": service_instance = self._spawn_standalone_process(process_id, name, module, cls, config) elif process_type == "immediate": service_instance = self._spawn_immediate_process(process_id, name, module, cls, config) elif process_type == "simple": service_instance = self._spawn_simple_process(process_id, name, module, cls, config) else: raise BadRequest("Unknown process type: %s" % process_type) service_instance._proc_type = process_type self._register_process(service_instance, name) service_instance.errcause = "OK" log.info("AppManager.spawn_process: %s.%s -> pid=%s OK" % (module, cls, process_id)) return service_instance.id except Exception: errcause = service_instance.errcause if service_instance else "instantiating service" log.exception("Error spawning %s %s process (process_id: %s): %s" % (name, process_type, process_id, errcause)) raise
def spawn_process(self, name=None, module=None, cls=None, config=None, process_id=None): """ Spawn a process within the container. Processes can be of different type. """ if process_id and not is_valid_identifier(process_id, ws_sub='_'): raise BadRequest("Given process_id %s is not a valid identifier" % process_id) # Generate a new process id if not provided # TODO: Ensure it is system-wide unique process_id = process_id or "%s.%s" % (self.container.id, self.proc_id_pool.get_id()) log.debug( "ProcManager.spawn_process(name=%s, module.cls=%s.%s) as pid=%s", name, module, cls, process_id) if not config: # Use system CFG. It has the command line args in it config = DictModifier(CFG) else: # Use provided config. Must be dict or DotDict if not isinstance(config, DotDict): config = DotDict(config) config = DictModifier(CFG, config) if self.container.spawn_args: # Override config with spawn args dict_merge(config, self.container.spawn_args, inplace=True) #log.debug("spawn_process() pid=%s config=%s", process_id, config) # PROCESS TYPE. Determines basic process context (messaging, service interface) # One of: service, stream_process, agent, simple, immediate service_cls = named_any("%s.%s" % (module, cls)) process_type = get_safe(config, "process.type") or getattr( service_cls, "process_type", "service") service_instance = None try: # spawn service by type if process_type == "service": service_instance = self._spawn_service_process( process_id, name, module, cls, config) elif process_type == "stream_process": service_instance = self._spawn_stream_process( process_id, name, module, cls, config) elif process_type == "agent": service_instance = self._spawn_agent_process( process_id, name, module, cls, config) elif process_type == "standalone": service_instance = self._spawn_standalone_process( process_id, name, module, cls, config) elif process_type == "immediate": service_instance = self._spawn_immediate_process( process_id, name, module, cls, config) elif process_type == "simple": service_instance = self._spawn_simple_process( process_id, name, module, cls, config) else: raise BadRequest("Unknown process type: %s" % process_type) service_instance._proc_type = process_type self._register_process(service_instance, name) service_instance.errcause = "OK" log.info("AppManager.spawn_process: %s.%s -> pid=%s OK", module, cls, process_id) if process_type == 'immediate': log.info('Terminating immediate process: %s', service_instance.id) self.terminate_process(service_instance.id) return service_instance.id except Exception: errcause = service_instance.errcause if service_instance else "instantiating service" log.exception("Error spawning %s %s process (process_id: %s): %s", name, process_type, process_id, errcause) raise
if 'processapp' in appdef: # Case 1: Appdef contains definition of process to start name, module, cls = appdef.processapp try: pid = self.container.spawn_process(name, module, cls, app_config) appdef._pid = pid self.apps.append(appdef) except Exception, ex: log.exception("Appl %s start from processapp failed" % appdef.name) else: # Case 2: Appdef contains full app start params modpath = appdef.mod try: mod = named_any(modpath) appdef._mod_loaded = mod # Start the app supid, state = mod.start(self.container, START_PERMANENT, appdef, config) appdef._supid = supid appdef._state = state log.debug("App '%s' started. Root sup-id=%s" % (appdef.name, supid)) self.apps.append(appdef) except Exception, ex: log.exception("Appl %s start from appdef failed" % appdef.name)
def spawn_process(self, name=None, module=None, cls=None, config=None, process_id=None): """ Spawn a process within the container. Processes can be of different type. """ if process_id and not is_valid_identifier(process_id, ws_sub='_'): raise BadRequest("Given process_id %s is not a valid identifier" % process_id) # PROCESS ID. Generate a new process id if not provided # TODO: Ensure it is system-wide unique process_id = process_id or "%s.%s" % (self.container.id, self.proc_id_pool.get_id()) log.debug("ProcManager.spawn_process(name=%s, module.cls=%s.%s, config=%s) as pid=%s", name, module, cls, config, process_id) # CONFIG process_cfg = self._create_process_config(config) try: service_cls = named_any("%s.%s" % (module, cls)) except AttributeError as ae: # Try to nail down the error import importlib importlib.import_module(module) raise # PROCESS TYPE. Determines basic process context (messaging, service interface) process_type = get_safe(process_cfg, "process.type") or getattr(service_cls, "process_type", PROCTYPE_SERVICE) process_start_mode = get_safe(config, "process.start_mode") process_instance = None # alert we have a spawning process, but we don't have the instance yet, so give the class instead (more accurate than name) # Note: this uses a str as first argument instead of a process instance self._call_proc_state_changed("%s.%s" % (module, cls), ProcessStateEnum.PENDING) try: # Additional attributes to set with the process instance proc_attr = {"_proc_type": process_type, "_proc_spawn_cfg": config } # SPAWN. Determined by type if process_type == PROCTYPE_SERVICE: process_instance = self._spawn_service_process(process_id, name, module, cls, process_cfg, proc_attr) elif process_type == PROCTYPE_STREAMPROC: process_instance = self._spawn_stream_process(process_id, name, module, cls, process_cfg, proc_attr) elif process_type == PROCTYPE_AGENT: process_instance = self._spawn_agent_process(process_id, name, module, cls, process_cfg, proc_attr) elif process_type == PROCTYPE_STANDALONE: process_instance = self._spawn_standalone_process(process_id, name, module, cls, process_cfg, proc_attr) elif process_type == PROCTYPE_IMMEDIATE: process_instance = self._spawn_immediate_process(process_id, name, module, cls, process_cfg, proc_attr) elif process_type == PROCTYPE_SIMPLE: process_instance = self._spawn_simple_process(process_id, name, module, cls, process_cfg, proc_attr) else: raise BadRequest("Unknown process type: %s" % process_type) # REGISTER. self._register_process(process_instance, name) process_instance.errcause = "OK" log.info("ProcManager.spawn_process: %s.%s -> pid=%s OK", module, cls, process_id) if process_type == PROCTYPE_IMMEDIATE: log.debug('Terminating immediate process: %s', process_instance.id) self.terminate_process(process_instance.id) # Terminate process also triggers TERMINATING/TERMINATED self._call_proc_state_changed(process_instance, ProcessStateEnum.EXITED) else: # Update local policies for the new process if self.container.has_capability(self.container.CCAP.GOVERNANCE_CONTROLLER): self.container.governance_controller.update_process_policies( process_instance, safe_mode=True, force_update=False) return process_instance.id except IonProcessError: errcause = process_instance.errcause if process_instance else "instantiating process" log.exception("Error spawning %s %s process (process_id: %s): %s", name, process_type, process_id, errcause) return None except Exception: errcause = process_instance.errcause if process_instance else "instantiating process" log.exception("Error spawning %s %s process (process_id: %s): %s", name, process_type, process_id, errcause) # trigger failed notification - catches problems in init/start self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED) raise
def spawn_process(self, name=None, module=None, cls=None, config=None, process_id=None): """ Spawn a process within the container. Processes can be of different type. """ if process_id and not is_valid_identifier(process_id, ws_sub='_'): raise BadRequest("Given process_id %s is not a valid identifier" % process_id) # Generate a new process id if not provided # TODO: Ensure it is system-wide unique process_id = process_id or "%s.%s" % (self.container.id, self.proc_id_pool.get_id()) log.debug("ProcManager.spawn_process(name=%s, module.cls=%s.%s, config=%s) as pid=%s", name, module, cls, config, process_id) process_cfg = deepcopy(CFG) if config: # Use provided config. Must be dict or DotDict if not isinstance(config, DotDict): config = DotDict(config) dict_merge(process_cfg, config, inplace=True) if self.container.spawn_args: # Override config with spawn args dict_merge(process_cfg, self.container.spawn_args, inplace=True) #log.debug("spawn_process() pid=%s process_cfg=%s", process_id, process_cfg) # PROCESS TYPE. Determines basic process context (messaging, service interface) # One of: service, stream_process, agent, simple, immediate service_cls = named_any("%s.%s" % (module, cls)) process_type = get_safe(process_cfg, "process.type") or getattr(service_cls, "process_type", "service") process_start_mode = get_safe(config, "process.start_mode") process_instance = None # alert we have a spawning process, but we don't have the instance yet, so give the class instead (more accurate than name) self._call_proc_state_changed("%s.%s" % (module, cls), ProcessStateEnum.PENDING) try: # spawn service by type if process_type == "service": process_instance = self._spawn_service_process(process_id, name, module, cls, process_cfg) elif process_type == "stream_process": process_instance = self._spawn_stream_process(process_id, name, module, cls, process_cfg) elif process_type == "agent": process_instance = self._spawn_agent_process(process_id, name, module, cls, process_cfg) elif process_type == "standalone": process_instance = self._spawn_standalone_process(process_id, name, module, cls, process_cfg) elif process_type == "immediate": process_instance = self._spawn_immediate_process(process_id, name, module, cls, process_cfg) elif process_type == "simple": process_instance = self._spawn_simple_process(process_id, name, module, cls, process_cfg) else: raise BadRequest("Unknown process type: %s" % process_type) process_instance._proc_type = process_type self._register_process(process_instance, name) process_instance.errcause = "OK" log.info("ProcManager.spawn_process: %s.%s -> pid=%s OK", module, cls, process_id) if process_type == 'immediate': log.info('Terminating immediate process: %s', process_instance.id) self.terminate_process(process_instance.id) # terminate process also triggers TERMINATING/TERMINATED self._call_proc_state_changed(process_instance, ProcessStateEnum.EXITED) else: #Shouldn't be any policies for immediate processes self.update_container_policies(process_instance) return process_instance.id except IonProcessError: errcause = process_instance.errcause if process_instance else "instantiating process" log.exception("Error spawning %s %s process (process_id: %s): %s", name, process_type, process_id, errcause) return None except Exception: errcause = process_instance.errcause if process_instance else "instantiating process" log.exception("Error spawning %s %s process (process_id: %s): %s", name, process_type, process_id, errcause) # trigger failed notification - catches problems in init/start self._call_proc_state_changed(process_instance, ProcessStateEnum.FAILED) raise