def apply_profile_configuration(system_cfg, bootstrap_config): profile_filename = bootstrap_config.get_safe("container.profile", None) if not profile_filename: return if not profile_filename.endswith(".yml"): profile_filename = "res/profile/%s.yml" % profile_filename from pyon.util.config import Config profile_cfg = Config([profile_filename]).data config_override = profile_cfg.get_safe("profile.config") if config_override and isinstance(config_override, dict): from pyon.util.containers import dict_merge dict_merge(system_cfg, config_override, inplace=True)
def read_and_set_config(self): one_from_config = False try: data = Config(["res/config/coverage.yml"]).data['CoverageConfig'] for k, v in data.iteritems(): self.__setattr__(k, v) one_from_config = True self.using_default_config = False self.config_time = get_current_ntp_time() except Exception as ex: if one_from_config: log.info("Load from config failed with '%s'. Using hybrid default/config file configuration" % ex.message) return False else: log.info("load from config failed with '%s'. Using default config" % ex.message) return True
def on_init(self): self.resource_interface = (Config(["res/config/resource_management.yml"])).data['ResourceInterface'] self._augment_resource_interface_from_interfaces() # Keep a cache of known resource ids self.restype_cache = {}
def _load_capabilities(self): self._cap_initialized = [] # List of capability constants initialized in container self._capabilities = [] # List of capability constants active in container self._cap_instances = {} # Dict mapping capability->manager instance self._cap_definitions = Config(["res/config/container_capabilities.yml"]).data['capabilities'] profile_filename = CFG.get_safe("container.profile", "development") if not profile_filename.endswith(".yml"): profile_filename = "res/profile/%s.yml" % profile_filename log.info("Loading CC capability profile from file: %s", profile_filename) profile_cfg = Config([profile_filename]).data if not isinstance(profile_cfg, dict) or profile_cfg['type'] != "profile" or not "profile" in profile_cfg: raise ContainerError("Container capability profile invalid: %s" % profile_filename) self.cap_profile = profile_cfg['profile'] if "capabilities" in self.cap_profile and self.cap_profile['capabilities']: dict_merge(self._cap_definitions, self.cap_profile['capabilities'], True) CCAP.clear() cap_list = self._cap_definitions.keys() CCAP.update(zip(cap_list, cap_list)) if "config" in self.cap_profile and self.cap_profile['config']: log.info("Container CFG was changed based on profile: %s", profile_filename)
def initialize_res_lcsms(): """ Initializes resource type lifecycle state machines. """ res_lifecycle = (Config(["res/config/resource_lifecycle.yml"])).data # Initialize the set of available resource lifecycle workflows lcs_workflow_defs.clear() lcsm_defs = res_lifecycle["LifecycleWorkflowDefinitions"] for wf in lcsm_defs: wfname = wf['name'] clsname = wf.get('lcsm_class', None) if clsname: wf_cls = named_any(clsname) lcs_workflow_defs[wfname] = wf_cls(**wf) else: based_on = wf.get('based_on', None) wf_base = lcs_workflow_defs[based_on] lcs_workflow_defs[wfname] = wf_base._clone_with_restrictions(wf) lcs_workflows.clear() # Initialize the set of resource types with lifecycle for res_type, wf_name in res_lifecycle["LifecycleResourceTypes"].iteritems( ): lcs_workflows[res_type] = lcs_workflow_defs[wf_name]
def initialize_res_lcsms(): """ Initializes default and special resource type state machines @todo. Make dynamic later and maybe move out. """ res_lifecycle = (Config(["res/config/resource_lifecycle.yml"])).data # Initialize the set of available resource lifecycle workflows lcs_workflow_defs.clear() lcsm_defs = res_lifecycle["LifecycleWorkflowDefinitions"] for wf in lcsm_defs: #print "****** FOUND RES WORKFLOW %s" % (wf) wfname = wf['name'] clsname = wf.get('lcsm_class', None) if clsname: wf_cls = named_any(clsname) lcs_workflow_defs[wfname] = wf_cls(**wf) else: based_on = wf.get('based_on', None) wf_base = lcs_workflow_defs[based_on] lcs_workflow_defs[wfname] = wf_base._clone_with_restrictions(wf) lcs_workflows.clear() # Initialize the set of resource types with lifecycle for res_type, wf_name in res_lifecycle["LifecycleResourceTypes"].iteritems( ): lcs_workflows[res_type] = lcs_workflow_defs[wf_name]
def setup_ipython(shell_api=None): from IPython.config.loader import Config ipython_cfg = Config() shell_config = ipython_cfg.InteractiveShellEmbed shell_config.prompt_in1 = '><> ' shell_config.prompt_in2 = '... ' shell_config.prompt_out = '--> ' shell_config.confirm_exit = False # monkeypatch the ipython inputhook to be gevent-friendly import gevent # should be auto-monkey-patched by pyon already. import select import sys def stdin_ready(): infds, outfds, erfds = select.select([sys.stdin], [], [], 0) if infds: return True else: return False def inputhook_gevent(): try: while not stdin_ready(): gevent.sleep(0.05) except KeyboardInterrupt: pass return 0 # install the gevent inputhook from IPython.lib.inputhook import inputhook_manager inputhook_manager.set_inputhook(inputhook_gevent) inputhook_manager._current_gui = 'gevent' # First import the embeddable shell class from IPython.frontend.terminal.embed import InteractiveShellEmbed # Update namespace of interactive shell # TODO: Cleanup namespace even further if shell_api is not None: locals().update(shell_api) # Now create an instance of the embeddable shell. The first argument is a # string with options exactly as you would type them if you were starting # IPython at the system command line. Any parameters you want to define for # configuration can thus be specified here. ipshell = InteractiveShellEmbed(config=ipython_cfg, banner1 =\ """ ____ ________ _ __ ____________ ____ ___ / __ \__ ______ ____ / _/ __ \/ | / / / ____/ ____/ / __ \|__ \\ / /_/ / / / / __ \/ __ \ ______ / // / / / |/ / / / / / / /_/ /__/ / / ____/ /_/ / /_/ / / / / /_____/ _/ // /_/ / /| / / /___/ /___ / _, _// __/ /_/ \__, /\____/_/ /_/ /___/\____/_/ |_/ \____/\____/ /_/ |_|/____/ /____/""", exit_msg = 'Leaving ION shell, shutting down container.') ipshell( 'Pyon - ION R2 CC interactive IPython shell. Type ionhelp() for help')
def get_predicate_type_list(): Predicates.clear() assoc_defs = Config(["res/config/associations.yml"]).data['AssociationDefinitions'] for ad in assoc_defs: if ad['predicate'] in Predicates: raise Inconsistent('Predicate %s defined multiple times in associations.yml' % ad['predicate']) Predicates[ad['predicate']] = ad return Predicates.keys()
def _setup_ipython_config(): from IPython.config.loader import Config ipy_config = Config() ipy_config.KernelApp.connection_file = os.path.join( os.path.abspath(os.curdir), "manhole-%s.json" % os.getpid()) ipy_config.PromptManager.in_template = '><> ' ipy_config.PromptManager.in2_template = '... ' ipy_config.PromptManager.out_template = '--> ' ipy_config.InteractiveShellEmbed.confirm_exit = False #ipy_config.Application.log_level = 10 # uncomment for debug level ipython logging return ipy_config
def _load_capabilities(self): self._cap_initialized = [ ] # List of capability constants initialized in container self._capabilities = [ ] # List of capability constants active in container self._cap_instances = {} # Dict mapping capability->manager instance self._cap_definitions = Config( ["res/config/container_capabilities.yml"]).data['capabilities'] profile_filename = CFG.get_safe("container.profile", "development") if not profile_filename.endswith(".yml"): profile_filename = "res/profile/%s.yml" % profile_filename log.debug("Loading CC capability profile from file: %s", profile_filename) profile_cfg = Config([profile_filename]).data if not isinstance( profile_cfg, dict ) or profile_cfg['type'] != "profile" or not "profile" in profile_cfg: raise ContainerError("Container capability profile invalid: %s" % profile_filename) self.cap_profile = profile_cfg['profile'] if "capabilities" in self.cap_profile and self.cap_profile[ 'capabilities']: dict_merge(self._cap_definitions, self.cap_profile['capabilities'], True) CCAP.clear() cap_list = self._cap_definitions.keys() CCAP.update(zip(cap_list, cap_list)) if "config" in self.cap_profile and self.cap_profile['config']: log.info("Container CFG was changed based on profile: %s", profile_filename)
def lookup_associations(self, classname): from pyon.util.config import Config from pyon.util.containers import DotDict Predicates = DotDict() Predicates.update( Config(["res/config/associations.yml"]).data['PredicateTypes']) output = {} for key in Predicates: domain = str(Predicates[key]["domain"]) range = str(Predicates[key]["range"]) if classname in domain: output[key] = Predicates[key] if classname in range: output[key] = Predicates[key] return output
def start_app_from_url(self, app_url="", config=None): """ @brief Read the app file and call start_app """ log.debug("AppManager.start_app_from_url(app_url=%s) ..." % app_url) try: app = Config([app_url]).data self.start_app(appdef=app, config=config) return True except ConfigNotFound as cnf: log.warning("Could not find container app file '%s'" % app_url) except Exception as ex: log.exception("Could not start app file %s" % app_url) raise ContainerAppError(ex.message) return False
def _lookup_associations(self, classname): """ Returns dict of associations for given object type (not base types) """ from pyon.util.config import Config from pyon.util.containers import DotDict if not self._associations: self._associations = DotDict() assoc_defs = Config(["res/config/associations.yml"]).data['AssociationDefinitions'] self._associations.update((ad['predicate'], ad) for ad in assoc_defs) output = {} for key in self._associations: domain = str(self._associations[key]["domain"]) range = str(self._associations[key]["range"]) if classname in domain or classname in range: output[key] = self._associations[key] return output
def start_rel_from_url(self, rel_url="", config=None): """ @brief Read the rel file and call start_rel """ log.info("AppManager.start_rel_from_url(rel_url=%s) ...", rel_url) try: rel = Config([rel_url]).data self.start_rel(rel, config) log.debug("AppManager.start_rel_from_url(rel_url=%s) done, OK.", rel_url) return True except ConfigNotFound as cnf: log.warning("Could not find container deploy file '%s'", rel_url) except Exception as ex: log.exception("Could not start container deploy file '%s'", rel_url) raise ContainerAppError(ex.message) return False
def _build_predicate_list(self): """ create a master dict of dicts of lists in self.predicates_for_subj_obj self.predicates_for_subj_obj[RT.SubjectType][RT.ObjectType] = [PRED.typeOfPred1, PRED.typeOfPred2] """ # if no extends are found, just return the base type as a list def my_getextends(iontype): try: return getextends(iontype) except KeyError: return [iontype] # read associations yaml and expand all domain/range pairs assoc_defs = Config(["res/config/associations.yml" ]).data['AssociationDefinitions'] for ad in assoc_defs: predicate = ad['predicate'] domain = ad['domain'] range = ad['range'] for d in domain: for ad in my_getextends(d): if not ad in self.predicates_for_subj_obj: self.predicates_for_subj_obj[ad] = {} for r in range: for ar in my_getextends(r): if not ar in self.predicates_for_subj_obj[ad]: self.predicates_for_subj_obj[ad][ar] = {} # create as dict for now using keys to prevent duplicates self.predicates_for_subj_obj[ad][ar][ predicate] = "" # collapse predicate dicts to lists for s, range in self.predicates_for_subj_obj.iteritems(): for o, preds in range.iteritems(): self.predicates_for_subj_obj[s][ o] = self.predicates_for_subj_obj[s][o].keys()
def read_local_configuration(conf_paths): from pyon.util.config import Config pyon_cfg = Config(conf_paths, ignore_not_found=True).data return pyon_cfg
def prepare_container(): """ Walks through pyon initialization in a deterministic way and initializes Container. In particular make sure configuration is loaded in correct order and pycc startup arguments are considered. """ import threading threading.current_thread().name = "CC-Main" # SIDE EFFECT: The import triggers static initializers: Monkey patching, setting pyon defaults import pyon from pyon.core import bootstrap, config # Set global testing flag to False. We are running as capability container. This is NO TEST. bootstrap.testing = False # Set sysname if provided in startup argument if opts.sysname: bootstrap.set_sys_name(opts.sysname) # Trigger any initializing default logic in get_sys_name bootstrap.get_sys_name() command_line_config = kwargs # This holds the minimal configuration used to bootstrap pycc and pyon and connect to datastores. bootstrap_config = None # This holds the new CFG object for pyon. Build it up in proper sequence and conditions. pyon_config = config.read_standard_configuration( ) # Initial pyon.yml + pyon.local.yml # Load config override if provided. Supports variants literal and list of paths config_override = None if opts.config: if '{' in opts.config: # Variant 1: Dict of config values try: eval_value = ast.literal_eval(opts.config) config_override = eval_value except ValueError: raise Exception("Value error in config arg '%s'" % opts.config) else: # Variant 2: List of paths from pyon.util.config import Config config_override = Config([opts.config]).data # Determine bootstrap_config if opts.config_from_directory: # Load minimal bootstrap config if option "config_from_directory" bootstrap_config = config.read_local_configuration( ['res/config/pyon_min_boot.yml']) config.apply_local_configuration(bootstrap_config, pyon.DEFAULT_LOCAL_CONFIG_PATHS) config.apply_configuration(bootstrap_config, config_override) config.apply_configuration(bootstrap_config, command_line_config) print "pycc: config_from_directory=True. Minimal bootstrap configuration:", bootstrap_config else: # Otherwise: Set to standard set of local config files plus command line overrides bootstrap_config = deepcopy(pyon_config) config.apply_configuration(bootstrap_config, config_override) config.apply_configuration(bootstrap_config, command_line_config) # Override sysname from config file or command line if not opts.sysname and bootstrap_config.get_safe("system.name", None): new_sysname = bootstrap_config.get_safe("system.name") bootstrap.set_sys_name(new_sysname) # Delete sysname datastores if option "force_clean" is set if opts.force_clean: from pyon.datastore import clear_couch_util print "pycc: force_clean=True. DROP DATASTORES for sysname=%s" % bootstrap.get_sys_name( ) clear_couch_util.clear_couch(bootstrap_config, prefix=bootstrap.get_sys_name()) pyon_config.container.filesystem.force_clean = True from pyon.core.interfaces.interfaces import InterfaceAdmin iadm = InterfaceAdmin(bootstrap.get_sys_name(), config=bootstrap_config) # If auto_bootstrap, load config and interfaces into directory # Note: this is idempotent and will not alter anything if this is not the first container to run if bootstrap_config.system.auto_bootstrap: print "pycc: auto_bootstrap=True." stored_config = deepcopy(pyon_config) config.apply_configuration(stored_config, config_override) config.apply_configuration(stored_config, command_line_config) iadm.create_core_datastores() iadm.store_config(stored_config) # Determine the final pyon_config # - Start from standard config already set (pyon.yml + local YML files) # - Optionally load config from directory if opts.config_from_directory: config.apply_remote_config(bootstrap_cfg=bootstrap_config, system_cfg=pyon_config) config.apply_local_configuration( pyon_config, pyon.DEFAULT_LOCAL_CONFIG_PATHS ) # apply pyon.local.yml again over top # - Last apply any separate command line config overrides config.apply_configuration(pyon_config, config_override) config.apply_configuration(pyon_config, command_line_config) # Also set the immediate flag, but only if specified - it is an override if opts.immediate: from pyon.util.containers import dict_merge dict_merge(pyon_config, {'system': {'immediate': True}}, True) # Bootstrap pyon's core. Load configuration etc. bootstrap.bootstrap_pyon(pyon_cfg=pyon_config) # Delete any queues/exchanges owned by sysname if option "broker_clean" is set if opts.broker_clean: print "pycc: broker_clean=True, sysname:", bootstrap.get_sys_name() # build connect str connect_str = "-q -H %s -P 55672 -u %s -p %s -V %s" % ( pyon_config.get_safe( 'server.amqp_priv.host', pyon_config.get_safe('server.amqp.host', 'localhost')), pyon_config.get_safe('container.exchange.management.username', 'guest'), pyon_config.get_safe('container.exchange.management.password', 'guest'), '/') from putil.rabbithelper import clean_by_sysname deleted_exchanges, deleted_queues = clean_by_sysname( connect_str, bootstrap.get_sys_name()) print " exchanges deleted (%s): %s" % ( len(deleted_exchanges), ",".join(deleted_exchanges)) print " queues deleted (%s): %s" % ( len(deleted_queues), ",".join(deleted_queues)) # Auto-bootstrap interfaces if bootstrap_config.system.auto_bootstrap: iadm.store_interfaces(idempotent=True) iadm.close() if opts.no_container: print "pycc: no_container=True. Stopping here." return None # Create the container instance from pyon.container.cc import Container container = Container(*args, **command_line_config) return container
def _read_logging_config(logging_conf_paths): from pyon.util.config import Config global LOGGING_CFG LOGGING_CFG = Config(logging_conf_paths, ignore_not_found=True).data
class Container(BaseContainerAgent): """ The Capability Container. Its purpose is to spawn/monitor processes and services that do the bulk of the work in the ION system. It also manages connections to the Exchange and the various forms of datastores in the systems. """ # Singleton static variables #node = None id = None name = None pidfile = None instance = None def __init__(self, *args, **kwargs): BaseContainerAgent.__init__(self, *args, **kwargs) # Coordinates the container start self._status = INIT self._is_started = False # set container id and cc_agent name (as they are set in base class call) self.id = get_default_container_id() self.name = "cc_agent_%s" % self.id bootstrap.container_instance = self Container.instance = self self.container = self # Make self appear as process to service clients self.CCAP = CCAP log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name()) # Keep track of the overrides from the command-line, so they can trump app/rel file data self.spawn_args = kwargs # Greenlet context-local storage self.context = LocalContextMixin() # Load general capabilities file and augment with specific profile self._load_capabilities() # Start the capabilities start_order = self.cap_profile['start_order'] for cap in start_order: if cap not in self._cap_definitions: raise ContainerError("CC capability %s not defined in profile" % cap) if cap in self._capabilities or cap in self._cap_instances: raise ContainerError("CC capability %s already initialized" % cap) try: cap_def = self._cap_definitions[cap] log.debug("__init__(): Initializing '%s'" % cap) cap_obj = named_any(cap_def['class'])(container=self) self._cap_instances[cap] = cap_obj if 'depends_on' in cap_def and cap_def['depends_on']: dep_list = cap_def['depends_on'].split(',') for dep in dep_list: dep = dep.strip() if dep not in self._cap_initialized: raise ContainerError("CC capability %s dependent on non-existing capability %s" % (cap, dep)) if 'field' in cap_def and cap_def['field']: setattr(self, cap_def['field'], cap_obj) self._cap_initialized.append(cap) except Exception as ex: log.error("Container Capability %s init error: %s" % (cap, ex)) raise log.debug("Container initialized, OK.") def _load_capabilities(self): self._cap_initialized = [] # List of capability constants initialized in container self._capabilities = [] # List of capability constants active in container self._cap_instances = {} # Dict mapping capability->manager instance self._cap_definitions = Config(["res/config/container_capabilities.yml"]).data['capabilities'] profile_filename = CFG.get_safe("container.profile", "development") if not profile_filename.endswith(".yml"): profile_filename = "res/profile/%s.yml" % profile_filename log.info("Loading CC capability profile from file: %s", profile_filename) profile_cfg = Config([profile_filename]).data if not isinstance(profile_cfg, dict) or profile_cfg['type'] != "profile" or not "profile" in profile_cfg: raise ContainerError("Container capability profile invalid: %s" % profile_filename) self.cap_profile = profile_cfg['profile'] if "capabilities" in self.cap_profile and self.cap_profile['capabilities']: dict_merge(self._cap_definitions, self.cap_profile['capabilities'], True) CCAP.clear() cap_list = self._cap_definitions.keys() CCAP.update(zip(cap_list, cap_list)) if "config" in self.cap_profile and self.cap_profile['config']: log.info("Container CFG was changed based on profile: %s", profile_filename) # Note: The config update actually happens in pycc.py early on def start(self): log.debug("Container starting...") if self._is_started: raise ContainerError("Container already started") start_order = self.cap_profile['start_order'] for cap in start_order: if cap not in self._cap_instances: continue # First find the default enabled value if no CFG key exists enabled_default = self._cap_definitions.get_safe("%s.enabled_default" % cap, True) # Then find CFG key where enabled flag is (default or override) enabled_config = self._cap_definitions.get_safe("%s.enabled_config" % cap, "container.%s.enabled" % cap) # Then determine the enabled value enabled = CFG.get_safe(enabled_config, enabled_default) if enabled: log.debug("start(): Starting '%s'" % cap) try: cap_obj = self._cap_instances[cap] cap_obj.start() self._capabilities.append(cap) except Exception as ex: log.error("Container Capability %s start error: %s" % (cap, ex)) raise else: log.debug("start(): Capability '%s' disabled by config '%s'", cap, enabled_config) if self.has_capability(CCAP.EVENT_PUBLISHER): self.event_pub.publish_event(event_type="ContainerLifecycleEvent", origin=self.id, origin_type="CapabilityContainer", sub_type="START", state=ContainerStateEnum.START) self._is_started = True self._status = RUNNING log.info("Container (%s) started, OK." , self.id) def has_capability(self, capability): """ Returns True if the given capability is in the list of container capabilities, i.e. available in this container. """ return capability in self._capabilities def _handle_sigusr2(self):#, signum, frame): """ Handles SIGUSR2, prints debugging greenlet information. """ gls = GreenletLeak.get_greenlets() allgls = [] for gl in gls: status = GreenletLeak.format_greenlet(gl) # build formatted output: # Greenlet at 0xdeadbeef # self: <EndpointUnit at 0x1ffcceef> # func: bound, EndpointUnit.some_func status[0].insert(0, "%s at %s:" % (gl.__class__.__name__, hex(id(gl)))) # indent anything in status a second time prefmt = [s.replace("\t", "\t\t") for s in status[0]] prefmt.append("traceback:") for line in status[1]: for subline in line.split("\n")[0:2]: prefmt.append(subline) glstr = "\n\t".join(prefmt) allgls.append(glstr) # print it out! print >>sys.stderr, "\n\n".join(allgls) with open("gls-%s" % os.getpid(), "w") as f: f.write("\n\n".join(allgls)) @property def node(self): """ Returns the active/default Node that should be used for most communication in the system. Defers to exchange manager, but only if it has been started, otherwise returns None. """ if self.has_capability(CCAP.EXCHANGE_MANAGER): return self.ex_manager.default_node return None @contextmanager def _push_status(self, new_status): """ Temporarily sets the internal status flag. Use this as a decorator or in a with-statement before calling a temporary status changing method, like start_rel_from_url. """ curstatus = self._status self._status = new_status try: yield finally: self._status = curstatus def serve_forever(self): """ Run the container until killed. """ log.debug("In Container.serve_forever") if not self.proc_manager.proc_sup.running: self.start() # serve forever short-circuits if immediate is on and children len is ok num_procs = len(self.proc_manager.proc_sup.children) immediate = CFG.system.get('immediate', False) if not (immediate and num_procs == 1): # only spawned greenlet is the CC-Agent # print a warning just in case if immediate and num_procs != 1: log.warn("CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs) try: # This just waits in this Greenlet for all child processes to complete, # which is triggered somewhere else. self.proc_manager.proc_sup.join_children() except (KeyboardInterrupt, SystemExit) as ex: log.info('Received a kill signal, shutting down the container.') if hasattr(self, 'gl_parent_watch') and self.gl_parent_watch is not None: self.gl_parent_watch.kill() except: log.exception('Unhandled error! Forcing container shutdown') else: log.debug("Container.serve_forever short-circuiting due to CFG.system.immediate") self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown) def status(self): """ Returns the internal status. """ return self._status def is_running(self): """ Is the container in the process of shutting down or stopped. """ if self._status == RUNNING: return True return False def is_terminating(self): """ Is the container in the process of shutting down or stopped. """ if self._status == TERMINATING or self._status == TERMINATED: return True return False def _cleanup_pid(self): if self.pidfile: log.debug("Cleanup pidfile: %s", self.pidfile) try: os.remove(self.pidfile) except Exception, e: log.warn("Pidfile could not be deleted: %s" % str(e)) self.pidfile = None
class Container(BaseContainerAgent): """ The Capability Container. Its purpose is to spawn/monitor processes and services that do the bulk of the work in the ION system. It also manages connections to the Exchange and the various forms of datastores in the systems. """ # Class static variables (defaults) id = None name = None pidfile = None instance = None version = None start_time = None def __init__(self, *args, **kwargs): BaseContainerAgent.__init__(self, *args, **kwargs) # Coordinates the container start self._status = INIT self._is_started = False # set container id and cc_agent name (as they are set in base class call) self.id = get_default_container_id() self.name = "cc_agent_%s" % self.id self.start_time = get_ion_ts() bootstrap.container_instance = self Container.instance = self self.container = self # Make self appear as process to service clients self.CCAP = CCAP self.CFG = CFG log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name()) # Keep track of the overrides from the command-line, so they can trump app/rel file data self.spawn_args = kwargs # Greenlet context-local storage self.context = LocalContextMixin() # Load general capabilities file and augment with specific profile self._load_capabilities() # Start the capabilities start_order = self.cap_profile['start_order'] for cap in start_order: if cap not in self._cap_definitions: raise ContainerError( "CC capability %s not defined in profile" % cap) if cap in self._capabilities or cap in self._cap_instances: raise ContainerError("CC capability %s already initialized" % cap) try: cap_def = self._cap_definitions[cap] log.debug("__init__(): Initializing '%s'" % cap) cap_obj = named_any(cap_def['class'])(container=self) self._cap_instances[cap] = cap_obj if 'depends_on' in cap_def and cap_def['depends_on']: dep_list = cap_def['depends_on'].split(',') for dep in dep_list: dep = dep.strip() if dep not in self._cap_initialized: raise ContainerError( "CC capability %s dependent on non-existing capability %s" % (cap, dep)) if 'field' in cap_def and cap_def['field']: setattr(self, cap_def['field'], cap_obj) self._cap_initialized.append(cap) except Exception as ex: log.error("Container Capability %s init error: %s" % (cap, ex)) raise log.debug("Container initialized, OK.") def _load_capabilities(self): self._cap_initialized = [ ] # List of capability constants initialized in container self._capabilities = [ ] # List of capability constants active in container self._cap_instances = {} # Dict mapping capability->manager instance self._cap_definitions = Config( ["res/config/container_capabilities.yml"]).data['capabilities'] profile_filename = CFG.get_safe("container.profile", "development") if not profile_filename.endswith(".yml"): profile_filename = "res/profile/%s.yml" % profile_filename log.debug("Loading CC capability profile from file: %s", profile_filename) profile_cfg = Config([profile_filename]).data if not isinstance( profile_cfg, dict ) or profile_cfg['type'] != "profile" or not "profile" in profile_cfg: raise ContainerError("Container capability profile invalid: %s" % profile_filename) self.cap_profile = profile_cfg['profile'] if "capabilities" in self.cap_profile and self.cap_profile[ 'capabilities']: dict_merge(self._cap_definitions, self.cap_profile['capabilities'], True) CCAP.clear() cap_list = self._cap_definitions.keys() CCAP.update(zip(cap_list, cap_list)) if "config" in self.cap_profile and self.cap_profile['config']: log.info("Container CFG was changed based on profile: %s", profile_filename) # Note: The config update actually happens in pycc.py early on def start(self): log.debug("Container starting...") if self._is_started: raise ContainerError("Container already started") start_order = self.cap_profile['start_order'] for cap in start_order: if cap not in self._cap_instances: continue # First find the default enabled value if no CFG key exists enabled_default = self._cap_definitions.get_safe( "%s.enabled_default" % cap, True) # Then find CFG key where enabled flag is (default or override) enabled_config = self._cap_definitions.get_safe( "%s.enabled_config" % cap, "container.%s.enabled" % cap) # Then determine the enabled value enabled = CFG.get_safe(enabled_config, enabled_default) if enabled: log.debug("start(): Starting '%s'" % cap) try: cap_obj = self._cap_instances[cap] cap_obj.start() self._capabilities.append(cap) except Exception as ex: log.error("Container Capability %s start error: %s" % (cap, ex)) raise else: log.debug("start(): Capability '%s' disabled by config '%s'", cap, enabled_config) if self.has_capability(CCAP.EVENT_PUBLISHER): self.event_pub.publish_event(event_type="ContainerLifecycleEvent", origin=self.id, origin_type="CapabilityContainer", sub_type="START", state=ContainerStateEnum.START) self._is_started = True self._status = RUNNING log.info("Container (%s) started, OK.", self.id) def has_capability(self, capability): """ Returns True if the given capability is in the list of container capabilities, i.e. available in this container. """ return capability in self._capabilities @property def node(self): """ Returns the active/default Node that should be used for most communication in the system. Defers to exchange manager, but only if it has been started, otherwise returns None. """ if self.has_capability(CCAP.EXCHANGE_MANAGER): return self.ex_manager.default_node return None @contextmanager def _push_status(self, new_status): """ Temporarily sets the internal status flag. Use this as a decorator or in a with-statement before calling a temporary status changing method, like start_rel_from_url. """ curstatus = self._status self._status = new_status try: yield finally: self._status = curstatus def serve_forever(self): """ Run the container until killed. """ log.debug("In Container.serve_forever") if not self.proc_manager.proc_sup.running: self.start() # Exit if immediate==True and children len is ok num_procs = len(self.proc_manager.proc_sup.children) immediate = CFG.system.get('immediate', False) if immediate and num_procs == 1: # only spawned greenlet is the CC-Agent log.debug( "Container.serve_forever exiting due to CFG.system.immediate") else: # print a warning just in case if immediate and num_procs != 1: log.warn( "CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs) try: # This just waits in this Greenlet for all child processes to complete, # which is triggered somewhere else. self.proc_manager.proc_sup.join_children() except (KeyboardInterrupt, SystemExit) as ex: if hasattr(self, 'gl_parent_watch' ) and self.gl_parent_watch is not None: # Remove the greenlet that watches the parent process self.gl_parent_watch.kill() # Let the caller handle this raise except: log.exception('Unhandled error! Forcing container shutdown') def status(self): """ Returns the internal status. """ return self._status def is_running(self): """ Is the container in the process of shutting down or stopped. """ if self._status == RUNNING: return True return False def is_terminating(self): """ Is the container in the process of shutting down or stopped. """ if self._status == TERMINATING or self._status == TERMINATED: return True return False def _cleanup_pid(self): if self.pidfile: log.debug("Cleanup pidfile: %s", self.pidfile) try: os.remove(self.pidfile) except Exception as e: log.warn("Pidfile could not be deleted: %s" % str(e)) self.pidfile = None def stop_container(self): log.info("Received request to stop container") gl = gevent.spawn_later(0.5, self.stop) def stop(self, do_exit=True): log.info("=============== Container stopping... ===============") self._status = TERMINATING if self.has_capability( CCAP.EVENT_PUBLISHER) and self.event_pub is not None: try: self.event_pub.publish_event( event_type="ContainerLifecycleEvent", origin=self.id, origin_type="CapabilityContainer", sub_type="TERMINATE", state=ContainerStateEnum.TERMINATE) except Exception as ex: log.exception("Error sending event") while self._capabilities: capability = self._capabilities.pop() #log.debug("stop(): Stopping '%s'" % capability) try: cap_obj = self._cap_instances[capability] cap_obj.stop() del self._cap_instances[capability] except Exception as ex: log.exception("Container stop(): Error stop %s" % capability) Container.instance = None from pyon.core import bootstrap bootstrap.container_instance = None self._is_started = False self._status = TERMINATED log.info("Container stopped (%s).", self.id) if do_exit: os.kill(os.getpid(), signal.SIGTERM) def start_app(self, appdef=None, config=None): with self._push_status("START_APP"): return self.app_manager.start_app(appdef=appdef, config=config) def start_app_from_url(self, app_url=''): with self._push_status("START_APP_FROM_URL"): return self.app_manager.start_app_from_url(app_url=app_url) def start_rel(self, rel=None): with self._push_status("START_REL"): return self.app_manager.start_rel(rel=rel) def start_rel_from_url(self, rel_url='', config=None): with self._push_status("START_REL_FROM_URL"): return self.app_manager.start_rel_from_url(rel_url=rel_url, config=config) def fail_fast(self, err_msg="", skip_stop=False): """ Container needs to shut down and NOW. """ log.error("Fail Fast: %s", err_msg) if not skip_stop: self.stop() log.error("Fail Fast: killing container") traceback.print_exc() self._kill_fast() def _kill_fast(self): # The exit code of the terminated process is set to non-zero os.kill(os.getpid(), signal.SIGTERM)
def prepare_container(): """ Walks through pyon initialization in a deterministic way and initializes Container. In particular make sure configuration is loaded in correct order and pycc startup arguments are considered. """ # SIDE EFFECT: The import triggers static initializers: Gevent monkey patching, setting pyon defaults import pyon import threading threading.current_thread().name = "CC-Main" import logging global log log = logging.getLogger('pycc') from pyon.core import bootstrap, config from pyon.util.containers import get_safe, dict_merge # Set global testing flag to False. We are running as capability container, because # we started through the pycc program. bootstrap.testing = False # Set sysname if provided in startup argument if opts.sysname: bootstrap.set_sys_name(opts.sysname) # Trigger any initializing default logic in get_sys_name bootstrap.get_sys_name() command_line_config = kwargs # This holds the minimal configuration used to bootstrap pycc and pyon and connect to datastores. bootstrap_config = None # This holds the new CFG object for pyon. Build it up in proper sequence and conditions. pyon_config = config.read_standard_configuration( ) # Initial pyon.yml + pyon.local.yml # Load config override if provided. Supports variants literal and list of paths config_override = None if opts.config: if '{' in opts.config: # Variant 1: Dict of config values try: eval_value = ast.literal_eval(opts.config) config_override = eval_value except ValueError: raise Exception("Value error in config arg '%s'" % opts.config) else: # Variant 2: List of paths from pyon.util.config import Config config_override = Config([opts.config]).data # Determine bootstrap_config if opts.config_from_directory: # Load minimal bootstrap config if option "config_from_directory" bootstrap_config = config.read_local_configuration( ['res/config/pyon_min_boot.yml']) config.apply_local_configuration(bootstrap_config, pyon.DEFAULT_LOCAL_CONFIG_PATHS) config.apply_configuration(bootstrap_config, config_override) config.apply_configuration(bootstrap_config, command_line_config) log.info( "config_from_directory=True. Minimal bootstrap configuration: %s", bootstrap_config) else: # Otherwise: Set to standard set of local config files plus command line overrides bootstrap_config = deepcopy(pyon_config) config.apply_configuration(bootstrap_config, config_override) config.apply_configuration(bootstrap_config, command_line_config) # Override sysname from config file or command line if not opts.sysname and bootstrap_config.get_safe("system.name", None): new_sysname = bootstrap_config.get_safe("system.name") bootstrap.set_sys_name(new_sysname) # Force_clean - deletes sysname datastores if opts.force_clean: from pyon.datastore import clear_db_util log.info("force_clean=True. DROP DATASTORES for sysname=%s", bootstrap.get_sys_name()) clear_db_util.clear_db(bootstrap_config, prefix=bootstrap.get_sys_name(), sysname=bootstrap.get_sys_name()) from pyon.core.interfaces.interfaces import InterfaceAdmin iadm = InterfaceAdmin(bootstrap.get_sys_name(), config=bootstrap_config) # If auto_store_interfaces: ensure that all datastores exist and directory is prepared, with config # WARNING: If multiple containers start concurrently, this may fail if get_safe(bootstrap_config, "bootstrap.auto_store_interfaces") is True: log.debug("auto_store_interfaces=True.") stored_config = deepcopy(pyon_config) config.apply_configuration(stored_config, config_override) config.apply_configuration(stored_config, command_line_config) iadm.create_core_datastores() iadm.store_config(stored_config) # Determine the final pyon_config: # - Start from standard config already set (pyon.yml + local YML files) # - Optionally load config from directory if opts.config_from_directory: config.apply_remote_config(bootstrap_cfg=bootstrap_config, system_cfg=pyon_config) # - Apply container profile specific config config.apply_profile_configuration(pyon_config, bootstrap_config) # - Reapply pyon.local.yml here again for good measure config.apply_local_configuration(pyon_config, pyon.DEFAULT_LOCAL_CONFIG_PATHS) # - Last apply any separate command line config overrides config.apply_configuration(pyon_config, config_override) config.apply_configuration(pyon_config, command_line_config) iadm.set_config(pyon_config) # Set the immediate flag when command line override specified if opts.immediate: dict_merge(pyon_config, {"system": { "immediate": True }}, inplace=True) # Determine system bootmode for bootstrapping actions (unless explicitly specified) if not pyon_config.get_safe("bootmode"): set_bootmode = get_safe(pyon_config, "bootstrap.set_bootmode") if set_bootmode == "auto": if iadm.system_data_exists(): dict_merge(pyon_config, {"bootmode": "restart"}, inplace=True) log.info( "System bootmode auto-detection is ON. Determined bootmode=%s", pyon_config.get_safe("bootmode", "initial")) elif set_bootmode == "secondary": dict_merge(pyon_config, {"bootmode": "secondary"}, inplace=True) log.info("System bootmode override. Set to bootmode=%s", pyon_config.get_safe("bootmode", "")) log.info("System in bootmode=%s", pyon_config.get_safe("bootmode", "initial")) # Bootstrap the pyon framework's core. Load configuration etc. bootstrap.bootstrap_pyon(pyon_cfg=pyon_config) # Delete any queues/exchanges owned by sysname if option "broker_clean" is set if opts.broker_clean: log.info("broker_clean=True, sysname: %s", bootstrap.get_sys_name()) from putil.rabbitmq.rabbit_util import RabbitManagementUtil rabbit_util = RabbitManagementUtil( pyon_config, sysname=bootstrap.get_sys_name()) deleted_exchanges, deleted_queues = rabbit_util.clean_by_sysname() log.info("Exchanges deleted (%s): %s" % (len(deleted_exchanges), ", ".join(deleted_exchanges))) log.info("Queues deleted (%s): %s" % (len(deleted_queues), ", ".join(deleted_queues))) if opts.force_clean: from pyon.util.file_sys import FileSystem FileSystem._clean(pyon_config) # If auto_store_interfaces (cont'd): Store interfaces if not yet existing; set up messaging if get_safe(bootstrap_config, "bootstrap.auto_store_interfaces") is True: iadm.store_interfaces(idempotent=True) iadm.declare_core_exchange_resources() iadm.close() if opts.no_container: log.info("no_container=True. Stopping here.") return None # Create the container instance from pyon.container.cc import Container container = Container(*args, **command_line_config) container.version = version return container
def get_compound_associations_list(): CompoundAssociations.clear() CompoundAssociations.update( Config(["res/config/associations.yml"]).data['CompoundAssociations']) return CompoundAssociations.keys()
def prepare_container(): """ Walks through pyon initialization in a deterministic way and initializes Container. In particular make sure configuration is loaded in correct order and pycc startup arguments are considered. """ import threading threading.current_thread().name = "CC-Main" # SIDE EFFECT: The import triggers static initializers: Monkey patching, setting pyon defaults import pyon from pyon.core import bootstrap, config # Set global testing flag to False. We are running as capability container. This is NO TEST. bootstrap.testing = False # Set sysname if provided in startup argument if opts.sysname: bootstrap.set_sys_name(opts.sysname) # Trigger any initializing default logic in get_sys_name bootstrap.get_sys_name() command_line_config = kwargs # This holds the minimal configuration used to bootstrap pycc and pyon and connect to datastores. bootstrap_config = None # This holds the new CFG object for pyon. Build it up in proper sequence and conditions. pyon_config = config.read_standard_configuration() # Load config override if provided. Supports variants literal and list of paths config_override = None if opts.config: if '{' in opts.config: # Variant 1: Dict of config values try: eval_value = ast.literal_eval(opts.config) config_override = eval_value except ValueError: raise Exception("Value error in config arg '%s'" % opts.config) else: # Variant 2: List of paths from pyon.util.config import Config config_override = Config([opts.config]).data # Determine bootstrap_config if opts.config_from_directory: # Load minimal bootstrap config if option "config_from_directory" bootstrap_config = config.read_local_configuration( ['res/config/pyon_min_boot.yml']) config.apply_local_configuration(bootstrap_config, pyon.DEFAULT_LOCAL_CONFIG_PATHS) config.apply_configuration(bootstrap_config, config_override) config.apply_configuration(bootstrap_config, command_line_config) print "pycc: config_from_directory=True. Minimal bootstrap configuration:", bootstrap_config else: # Otherwise: Set to standard set of local config files plus command line overrides bootstrap_config = pyon_config.copy() config.apply_configuration(bootstrap_config, config_override) config.apply_configuration(bootstrap_config, command_line_config) # Override sysname from config file or command line if not opts.sysname and bootstrap_config.get_safe("system.name", None): new_sysname = bootstrap_config.get_safe("system.name") bootstrap.set_sys_name(new_sysname) # Delete sysname datastores if option "force_clean" is set if opts.force_clean: from pyon.datastore import clear_couch_util print "pycc: force_clean=True. DROP DATASTORES for sysname=%s" % bootstrap.get_sys_name( ) clear_couch_util.clear_couch(bootstrap_config, prefix=bootstrap.get_sys_name()) # If auto_bootstrap, load config and interfaces into directory # Note: this is idempotent and will not alter anything if this is not the first container to run if bootstrap_config.system.auto_bootstrap: print "pycc: auto_bootstrap=True." stored_config = pyon_config.copy() config.apply_configuration(stored_config, config_override) config.apply_configuration(stored_config, command_line_config) config.auto_bootstrap_config(bootstrap_config, system_cfg=stored_config) # Determine the final pyon_config # - Start from standard config already set (pyon.yml + local YML files) # - Optionally load config from directory if opts.config_from_directory: config.apply_remote_config(pyon_config) # - Last apply any separate command line config overrides config.apply_configuration(pyon_config, config_override) config.apply_configuration(pyon_config, command_line_config) # Load logging override config if provided. Supports variants literal and path. logging_config_override = None if opts.logcfg: if '{' in opts.logcfg: # Variant 1: Value is dict of config values try: eval_value = ast.literal_eval(opts.logcfg) logging_config_override = eval_value except ValueError: raise Exception("Value error in logcfg arg '%s'" % opts.logcfg) else: # Variant 2: Value is path to YAML file containing config values pyon.DEFAULT_LOGGING_PATHS.append(opts.logcfg) # Also set the immediate flag, but only if specified - it is an override if opts.immediate: dict_merge(pyon_config, {'system': {'immediate': True}}, True) # Bootstrap pyon's core. Load configuration etc. bootstrap.bootstrap_pyon( logging_config_override=logging_config_override, pyon_cfg=pyon_config) # Auto-bootstrap interfaces # @WARN: This currently imports ALL modules, executing ALL static initializers as side effect!!!!!!! if bootstrap_config.system.auto_bootstrap: config.auto_bootstrap_interfaces(bootstrap_config) if opts.no_container: print "pycc: no_container=True. Stopping here." return None # Create the container instance from pyon.container.cc import Container container = Container(*args, **command_line_config) return container
class Container(BaseContainerAgent): """ The Capability Container. Its purpose is to spawn/monitor processes and services that do the bulk of the work in the ION system. It also manages connections to the Exchange and the various forms of datastores in the systems. """ # Class static variables (defaults) id = None name = None pidfile = None instance = None version = None start_time = None def __init__(self, *args, **kwargs): BaseContainerAgent.__init__(self, *args, **kwargs) # Coordinates the container start self._status = INIT self._is_started = False # set container id and cc_agent name (as they are set in base class call) self.id = get_default_container_id() self.name = "cc_agent_%s" % self.id self.start_time = get_ion_ts() bootstrap.container_instance = self Container.instance = self self.container = self # Make self appear as process to service clients self.CCAP = CCAP self.CFG = CFG log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name()) # Keep track of the overrides from the command-line, so they can trump app/rel file data self.spawn_args = kwargs # Greenlet context-local storage self.context = LocalContextMixin() # Load general capabilities file and augment with specific profile self._load_capabilities() # Start the capabilities start_order = self.cap_profile['start_order'] for cap in start_order: if cap not in self._cap_definitions: raise ContainerError("CC capability %s not defined in profile" % cap) if cap in self._capabilities or cap in self._cap_instances: raise ContainerError("CC capability %s already initialized" % cap) try: cap_def = self._cap_definitions[cap] log.debug("__init__(): Initializing '%s'" % cap) cap_obj = named_any(cap_def['class'])(container=self) self._cap_instances[cap] = cap_obj if 'depends_on' in cap_def and cap_def['depends_on']: dep_list = cap_def['depends_on'].split(',') for dep in dep_list: dep = dep.strip() if dep not in self._cap_initialized: raise ContainerError("CC capability %s dependent on non-existing capability %s" % (cap, dep)) if 'field' in cap_def and cap_def['field']: setattr(self, cap_def['field'], cap_obj) self._cap_initialized.append(cap) except Exception as ex: log.error("Container Capability %s init error: %s" % (cap, ex)) raise log.debug("Container initialized, OK.") def _load_capabilities(self): self._cap_initialized = [] # List of capability constants initialized in container self._capabilities = [] # List of capability constants active in container self._cap_instances = {} # Dict mapping capability->manager instance self._cap_definitions = Config(["res/config/container_capabilities.yml"]).data['capabilities'] profile_filename = CFG.get_safe("container.profile", "development") if not profile_filename.endswith(".yml"): profile_filename = "res/profile/%s.yml" % profile_filename log.debug("Loading CC capability profile from file: %s", profile_filename) profile_cfg = Config([profile_filename]).data if not isinstance(profile_cfg, dict) or profile_cfg['type'] != "profile" or not "profile" in profile_cfg: raise ContainerError("Container capability profile invalid: %s" % profile_filename) self.cap_profile = profile_cfg['profile'] if "capabilities" in self.cap_profile and self.cap_profile['capabilities']: dict_merge(self._cap_definitions, self.cap_profile['capabilities'], True) CCAP.clear() cap_list = self._cap_definitions.keys() CCAP.update(zip(cap_list, cap_list)) if "config" in self.cap_profile and self.cap_profile['config']: log.info("Container CFG was changed based on profile: %s", profile_filename) # Note: The config update actually happens in pycc.py early on def start(self): log.debug("Container starting...") if self._is_started: raise ContainerError("Container already started") start_order = self.cap_profile['start_order'] for cap in start_order: if cap not in self._cap_instances: continue # First find the default enabled value if no CFG key exists enabled_default = self._cap_definitions.get_safe("%s.enabled_default" % cap, True) # Then find CFG key where enabled flag is (default or override) enabled_config = self._cap_definitions.get_safe("%s.enabled_config" % cap, "container.%s.enabled" % cap) # Then determine the enabled value enabled = CFG.get_safe(enabled_config, enabled_default) if enabled: log.debug("start(): Starting '%s'" % cap) try: cap_obj = self._cap_instances[cap] cap_obj.start() self._capabilities.append(cap) except Exception as ex: log.error("Container Capability %s start error: %s" % (cap, ex)) raise else: log.debug("start(): Capability '%s' disabled by config '%s'", cap, enabled_config) if self.has_capability(CCAP.EVENT_PUBLISHER): self.event_pub.publish_event(event_type="ContainerLifecycleEvent", origin=self.id, origin_type="CapabilityContainer", sub_type="START", state=ContainerStateEnum.START) self._is_started = True self._status = RUNNING log.info("Container (%s) started, OK.", self.id) def has_capability(self, capability): """ Returns True if the given capability is in the list of container capabilities, i.e. available in this container. """ return capability in self._capabilities @property def node(self): """ Returns the active/default Node that should be used for most communication in the system. Defers to exchange manager, but only if it has been started, otherwise returns None. """ if self.has_capability(CCAP.EXCHANGE_MANAGER): return self.ex_manager.default_node return None @contextmanager def _push_status(self, new_status): """ Temporarily sets the internal status flag. Use this as a decorator or in a with-statement before calling a temporary status changing method, like start_rel_from_url. """ curstatus = self._status self._status = new_status try: yield finally: self._status = curstatus def serve_forever(self): """ Run the container until killed. """ log.debug("In Container.serve_forever") if not self.proc_manager.proc_sup.running: self.start() # Exit if immediate==True and children len is ok num_procs = len(self.proc_manager.proc_sup.children) immediate = CFG.system.get('immediate', False) if immediate and num_procs == 1: # only spawned greenlet is the CC-Agent log.debug("Container.serve_forever exiting due to CFG.system.immediate") else: # print a warning just in case if immediate and num_procs != 1: log.warn("CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs) try: # This just waits in this Greenlet for all child processes to complete, # which is triggered somewhere else. self.proc_manager.proc_sup.join_children() except (KeyboardInterrupt, SystemExit) as ex: if hasattr(self, 'gl_parent_watch') and self.gl_parent_watch is not None: # Remove the greenlet that watches the parent process self.gl_parent_watch.kill() # Let the caller handle this raise except: log.exception('Unhandled error! Forcing container shutdown') def status(self): """ Returns the internal status. """ return self._status def is_running(self): """ Is the container in the process of shutting down or stopped. """ if self._status == RUNNING: return True return False def is_terminating(self): """ Is the container in the process of shutting down or stopped. """ if self._status == TERMINATING or self._status == TERMINATED: return True return False def _cleanup_pid(self): if self.pidfile: log.debug("Cleanup pidfile: %s", self.pidfile) try: os.remove(self.pidfile) except Exception as e: log.warn("Pidfile could not be deleted: %s" % str(e)) self.pidfile = None def stop_container(self): log.info("Received request to stop container") gl = gevent.spawn_later(0.5, self.stop) def stop(self, do_exit=True): log.info("=============== Container stopping... ===============") self._status = TERMINATING if self.has_capability(CCAP.EVENT_PUBLISHER) and self.event_pub is not None: try: self.event_pub.publish_event(event_type="ContainerLifecycleEvent", origin=self.id, origin_type="CapabilityContainer", sub_type="TERMINATE", state=ContainerStateEnum.TERMINATE) except Exception as ex: log.exception("Error sending event") while self._capabilities: capability = self._capabilities.pop() #log.debug("stop(): Stopping '%s'" % capability) try: cap_obj = self._cap_instances[capability] cap_obj.stop() del self._cap_instances[capability] except Exception as ex: log.exception("Container stop(): Error stop %s" % capability) Container.instance = None from pyon.core import bootstrap bootstrap.container_instance = None self._is_started = False self._status = TERMINATED log.info("Container stopped (%s).", self.id) if do_exit: os.kill(os.getpid(), signal.SIGTERM) def start_app(self, appdef=None, config=None): with self._push_status("START_APP"): return self.app_manager.start_app(appdef=appdef, config=config) def start_app_from_url(self, app_url=''): with self._push_status("START_APP_FROM_URL"): return self.app_manager.start_app_from_url(app_url=app_url) def start_rel(self, rel=None): with self._push_status("START_REL"): return self.app_manager.start_rel(rel=rel) def start_rel_from_url(self, rel_url='', config=None): with self._push_status("START_REL_FROM_URL"): return self.app_manager.start_rel_from_url(rel_url=rel_url, config=config) def fail_fast(self, err_msg="", skip_stop=False): """ Container needs to shut down and NOW. """ log.error("Fail Fast: %s", err_msg) if not skip_stop: self.stop() log.error("Fail Fast: killing container") traceback.print_exc() self._kill_fast() def _kill_fast(self): # The exit code of the terminated process is set to non-zero os.kill(os.getpid(), signal.SIGTERM)
def get_predicate_type_list(): Predicates.clear() Predicates.update( Config(["res/config/associations.yml"]).data['PredicateTypes']) return Predicates.keys()
def main(): """ Store configuration and interfaces into the datastore How to run this from command line: bin/store_interfaces -s system name [ -of filename | -sf filename | -fc true|false] -of Load object definition file -sf Load service definition file -fc Force clean the database Example: Load all object and service definitions bin/python bin/store_interfaces -s mysysname Load all object and service definitions with force clean the database bin/python bin/store_interfaces -s mysysname -fc Load object definition from a file bin/python bin/store_interfaces -s mysysname -of obj/data/core/org.yml Load service definition from a file bin/python bin/store_interfaces -s mysysname -sf obj/services/core/datastore_service.yml """ parser = argparse.ArgumentParser() parser.add_argument( '-c', '--config', type=str, help='Additional config files to load or dict config content.', default=[]) parser.add_argument('-fc', '--force_clean', action='store_true', help='Force clean.') parser.add_argument("-of", "--object", dest="fobject", help="Load object definition from a file") parser.add_argument("-s", "--sysname", dest="sysname", help="System name") parser.add_argument("-sf", "--service", dest="fservice", help="Load service definition from a file") options, extra = parser.parse_known_args() args, command_line_config = parse_args(extra) log.info("Storing SciON config and interfaces, with options: %s", str(options)) from pyon.core import log as logutil logutil.configure_logging(logutil.DEFAULT_LOGGING_PATHS) # ------------------------------------------------------------------------- # Store config and interfaces # Set global testing flag to False. We are running as standalone script. This is NO TEST. bootstrap.testing = False # Set sysname if provided in startup argument if options.sysname: bootstrap.set_sys_name(options.sysname) # Load config override if provided. Supports variants literal and list of paths config_override = None if options.config: if '{' in options.config: # Variant 1: Dict of config values try: eval_value = ast.literal_eval(options.config) config_override = eval_value except ValueError: raise Exception("Value error in config arg '%s'" % options.config) else: # Variant 2: List of paths from pyon.util.config import Config config_override = Config([options.config]).data # bootstrap_config - Used for running this store_interfaces script bootstrap_config = config.read_local_configuration( ['res/config/pyon_min_boot.yml']) config.apply_local_configuration(bootstrap_config, pyon.DEFAULT_LOCAL_CONFIG_PATHS) if config_override: config.apply_configuration(bootstrap_config, config_override) config.apply_configuration(bootstrap_config, command_line_config) # Override sysname from config file or command line if not options.sysname and bootstrap_config.get_safe("system.name", None): new_sysname = bootstrap_config.get_safe("system.name") bootstrap.set_sys_name(new_sysname) # Delete sysname datastores if option "force_clean" is set if options.force_clean: from pyon.datastore import clear_db_util from pyon.util.file_sys import FileSystem log.info("force_clean=True. DROP DATASTORES for sysname=%s" % bootstrap.get_sys_name()) pyon_config = config.read_standard_configuration( ) # Initial pyon.yml + pyon.local.yml clear_db_util.clear_db(bootstrap_config, prefix=bootstrap.get_sys_name(), sysname=bootstrap.get_sys_name()) FileSystem._clean(pyon_config) # ion_config - Holds the new CFG object for the system (independent of this tool's config) ion_config = config.read_standard_configuration() if config_override: config.apply_configuration(ion_config, config_override) config.apply_configuration(ion_config, command_line_config) # ------------------------------------------------------------------------- # Store config and interfaces iadm = InterfaceAdmin(bootstrap.get_sys_name(), config=bootstrap_config) # Make sure core datastores exist iadm.create_core_datastores() # Store system CFG properties iadm.store_config(ion_config) # Store system interfaces iadm.store_interfaces(options.fobject, options.fservice) iadm.close()
def get_compound_associations_list(): """Parses the associations.yml file for compound associations for the extended resource framework""" CompoundAssociations.clear() CompoundAssociations.update( Config(["res/config/associations.yml"]).data['CompoundAssociations']) return CompoundAssociations.keys()