def _dump_datastore(cls, outpath_base, ds_name, clear_dir=True):
        if not DatastoreManager.exists(ds_name):
            log.warn("Datastore does not exist: %s" % ds_name)
            return
        ds = DatastoreManager.get_datastore_instance(ds_name)

        if not os.path.exists(outpath_base):
            os.makedirs(outpath_base)

        outpath = "%s/%s" % (outpath_base, ds_name)
        if not os.path.exists(outpath):
            os.makedirs(outpath)
        if clear_dir:
            [os.remove(os.path.join(outpath, f)) for f in os.listdir(outpath)]

        objs = ds.find_by_view("_all_docs",
                               None,
                               id_only=False,
                               convert_doc=False)
        numwrites = 0
        for obj_id, obj_key, obj in objs:
            fn = obj_id
            # Some object ids have slashes
            fn = obj_id.replace("/", "_")
            with open("%s/%s.yml" % (outpath, fn), 'w') as f:
                yaml.dump(obj, f, default_flow_style=False)
                numwrites += 1
        log.info("Wrote %s objects to %s" % (numwrites, outpath))
Exemple #2
0
    def clear_datastore(cls, ds_name=None, prefix=None):
        if CFG.system.mockdb:
            log.warn("Cannot clear MockDB")
            return

        generic_ds = DatastoreManager.get_datastore_instance("")

        if ds_name:
            # First interpret ds_name as unqualified name
            if DatastoreManager.exists(ds_name, scoped=False):
                generic_ds.delete_datastore(ds_name)
                return
            # New interpret as logical name
            if DatastoreManager.exists(ds_name, scoped=True):
                generic_ds.delete_datastore(ds_name)
            else:
                log.warn("Datastore does not exist: %s" % ds_name)
        elif prefix:
            db_list = generic_ds.list_datastores()
            cleared, ignored = 0, 0
            for db_name in db_list:
                if db_name.startswith(prefix):
                    generic_ds.delete_datastore(db_name)
                    log.debug("Cleared couch datastore '%s'" % db_name)
                    cleared += 1
                else:
                    ignored += 1
            log.info("Cleared %d couch datastores, ignored %d" % (cleared, ignored))
        else:
            log.warn("Cannot clear datastore without prefix or datastore name")
    def _load_datastore(cls, path=None, ds_name=None, ignore_errors=True):
        if not DatastoreManager.exists(ds_name):
            log.warn("Datastore does not exist: %s" % ds_name)
        ds = DatastoreManager.get_datastore_instance(ds_name)
        objects = []
        for fn in os.listdir(path):
            fp = os.path.join(path, fn)
            try:
                with open(fp, 'r') as f:
                    yaml_text = f.read()
                obj = yaml.load(yaml_text)
                if "_rev" in obj:
                    del obj["_rev"]
                objects.append(obj)
            except Exception as ex:
                if ignore_errors:
                    log.warn("load error id=%s err=%s" % (fn, str(ex)))
                else:
                    raise ex

        if objects:
            try:
                res = ds.create_doc_mult(objects, allow_ids=True)
                log.info("DatastoreLoader: Loaded %s objects into %s" % (len(res), ds_name))
            except Exception as ex:
                if ignore_errors:
                    log.warn("load error id=%s err=%s" % (fn, str(ex)))
                else:
                    raise ex
    def clear_datastore(cls, ds_name=None, prefix=None):
        if CFG.system.mockdb:
            log.warn("Cannot clear MockDB")
            return

        generic_ds = DatastoreManager.get_datastore_instance("")

        if ds_name:
            # First interpret ds_name as unqualified name
            if DatastoreManager.exists(ds_name, scoped=False):
                generic_ds.delete_datastore(ds_name)
                return
            # New interpret as logical name
            if DatastoreManager.exists(ds_name, scoped=True):
                generic_ds.delete_datastore(ds_name)
            else:
                log.warn("Datastore does not exist: %s" % ds_name)
        elif prefix:
            db_list = generic_ds.list_datastores()
            cleared, ignored = 0, 0
            for db_name in db_list:
                if db_name.startswith(prefix):
                    generic_ds.delete_datastore(db_name)
                    log.debug("Cleared couch datastore '%s'" % db_name)
                    cleared += 1
                else:
                    ignored += 1
            log.info("Cleared %d couch datastores, ignored %d" %
                     (cleared, ignored))
        else:
            log.warn("Cannot clear datastore without prefix or datastore name")
    def _load_datastore(cls, path=None, ds_name=None, ignore_errors=True):
        if not DatastoreManager.exists(ds_name):
            log.warn("Datastore does not exist: %s" % ds_name)
        ds = DatastoreManager.get_datastore_instance(ds_name)
        objects = []
        for fn in os.listdir(path):
            fp = os.path.join(path, fn)
            try:
                with open(fp, 'r') as f:
                    yaml_text = f.read()
                obj = yaml.load(yaml_text)
                if "_rev" in obj:
                    del obj["_rev"]
                objects.append(obj)
            except Exception as ex:
                if ignore_errors:
                    log.warn("load error id=%s err=%s" % (fn, str(ex)))
                else:
                    raise ex

        if objects:
            try:
                res = ds.create_doc_mult(objects, allow_ids=True)
                log.info("DatastoreLoader: Loaded %s objects into %s" %
                         (len(res), ds_name))
            except Exception as ex:
                if ignore_errors:
                    log.warn("load error id=%s err=%s" % (fn, str(ex)))
                else:
                    raise ex
Exemple #6
0
    def _dump_datastore(cls, outpath_base, ds_name, clear_dir=True):
        if not DatastoreManager.exists(ds_name):
            log.warn("Datastore does not exist: %s" % ds_name)
            return
        ds = DatastoreManager.get_datastore_instance(ds_name)

        if not os.path.exists(outpath_base):
            os.makedirs(outpath_base)

        outpath = "%s/%s" % (outpath_base, ds_name)
        if not os.path.exists(outpath):
            os.makedirs(outpath)
        if clear_dir:
            [os.remove(os.path.join(outpath, f)) for f in os.listdir(outpath)]

        objs = ds.find_by_view("_all_docs", None, id_only=False, convert_doc=False)
        numwrites = 0
        for obj_id, obj_key, obj in objs:
            fn = obj_id
            # Some object ids have slashes
            fn = obj_id.replace("/", "_")
            with open("%s/%s.yml" % (outpath, fn), "w") as f:
                yaml.dump(obj, f, default_flow_style=False)
                numwrites += 1
        log.info("Wrote %s objects to %s" % (numwrites, outpath))
Exemple #7
0
    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False

        self._capabilities = []

        # set container id and cc_agent name (as they are set in base class call)
        self.id = get_default_container_id()
        self.name = "cc_agent_%s" % self.id

        Container.instance = self

        from pyon.core import bootstrap
        bootstrap.container_instance = self

        log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name())

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # Instantiate Directory and self-register
        # Has the additional side effect of either
        # bootstrapping the configuration into the
        # directory or read the configuration based
        # in the value of the auto_bootstrap setting
        self.directory = Directory()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        log.debug("Container initialized, OK.")
Exemple #8
0
    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False

        # set id and name (as they are set in base class call)
        self.id = string.replace('%s_%d' % (os.uname()[1], os.getpid()), ".",
                                 "_")
        self.name = "cc_agent_%s" % self.id

        Container.instance = self

        # TODO: Bug: Replacing CFG instance not work because references are already public. Update directly
        dict_merge(CFG, kwargs, inplace=True)
        from pyon.core import bootstrap
        bootstrap.container_instance = self
        bootstrap.assert_configuration(CFG)
        log.debug("Container (sysname=%s) initializing ..." %
                  bootstrap.get_sys_name())

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # Load object and service registry etc.
        bootstrap_pyon()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._is_started = False
        self._capabilities = []
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        log.debug("Container initialized, OK.")
Exemple #9
0
    def test_event_repo(self):
        dsm = DatastoreManager()
        ds = dsm.get_datastore("events")
        ds.delete_datastore()
        ds.create_datastore()

        event_repo = EventRepository(dsm)
        event_repo1 = EventRepository(dsm)

        event1 = Event(origin="resource1")
        event_id, _ = event_repo.put_event(event1)

        event1r = event_repo.get_event(event_id)
        self.assertEquals(event1.origin, event1r.origin)

        ts = 1328680477138
        events2 = []
        for i in xrange(5):
            ev = Event(origin="resource2", ts_created=str(ts + i))
            event_id, _ = event_repo.put_event(ev)
            events2.append((ev, event_id))

        events_r = event_repo.find_events(origin='resource2')
        self.assertEquals(len(events_r), 5)

        events_r = event_repo.find_events(origin='resource2', descending=True)
        self.assertEquals(len(events_r), 5)

        events_r = event_repo.find_events(origin='resource2', limit=3)
        self.assertEquals(len(events_r), 3)

        events_r = event_repo.find_events(origin='resource2',
                                          start_ts=str(ts + 3))
        self.assertEquals(len(events_r), 2)

        events_r = event_repo.find_events(origin='resource2',
                                          end_ts=str(ts + 2))
        self.assertEquals(len(events_r), 3)

        events_r = event_repo.find_events(origin='resource2',
                                          start_ts=str(ts + 3),
                                          end_ts=str(ts + 4))
        self.assertEquals(len(events_r), 2)

        events_r = event_repo.find_events(start_ts=str(ts + 3),
                                          end_ts=str(ts + 4))
        self.assertEquals(len(events_r), 2)

        event3 = ResourceLifecycleEvent(origin="resource3")
        event_id, _ = event_repo.put_event(event3)

        events_r = event_repo.find_events(event_type="ResourceLifecycleEvent")
        self.assertEquals(len(events_r), 1)
Exemple #10
0
    def test_conv_repo(self):
        dsm = DatastoreManager()
        ds = dsm.get_datastore("conversations")
        ds.delete_datastore()
        ds.create_datastore()

        conv_repo = ConvRepository(dsm)

        conv1 = ConversationMessage(sender='sender', recipient='receiver', conversation_id='1', protocol='rpc', headers={'nofield':'novalue'})
        conv_id, _ = conv_repo.put_conv(conv1)

        conv1r = conv_repo.conv_store.read(conv_id)
        self.assertEquals(conv1.sender, conv1r.sender)
Exemple #11
0
 def _load_datastore(cls, path=None, ds_name=None, ignore_errors=True):
     if not DatastoreManager.exists(ds_name):
         log.warn("Datastore does not exist: %s" % ds_name)
     ds = DatastoreManager.get_datastore_instance(ds_name)
     for fn in os.listdir(path):
         fp = os.path.join(path, fn)
         try:
             cls._read_and_create_obj(ds, fp)
         except Exception as ex:
             if ignore_errors:
                 log.warn("load error id=%s err=%s" % (fn, str(ex)))
             else:
                 raise ex
Exemple #12
0
    def test_event_repo(self):
        dsm = DatastoreManager()
        ds = dsm.get_datastore("events")
        ds.delete_datastore()
        ds.create_datastore()

        event_repo = EventRepository(dsm)
        event_repo1 = EventRepository(dsm)

        event1 = Event(origin="resource1")
        event_id, _ = event_repo.put_event(event1)

        event1r = event_repo.get_event(event_id)
        self.assertEquals(event1.origin, event1r.origin)

        ts = 1328680477138
        events2 = []
        for i in xrange(5):
            ev = Event(origin="resource2", ts_created=str(ts + i))
            event_id, _ = event_repo.put_event(ev)
            events2.append((ev,event_id))

        events_r = event_repo.find_events(origin='resource2')
        self.assertEquals(len(events_r), 5)

        events_r = event_repo.find_events(origin='resource2', descending=True)
        self.assertEquals(len(events_r), 5)

        events_r = event_repo.find_events(origin='resource2', limit=3)
        self.assertEquals(len(events_r), 3)

        events_r = event_repo.find_events(origin='resource2', start_ts=str(ts+3))
        self.assertEquals(len(events_r), 2)

        events_r = event_repo.find_events(origin='resource2', end_ts=str(ts+2))
        self.assertEquals(len(events_r), 3)

        events_r = event_repo.find_events(origin='resource2', start_ts=str(ts+3), end_ts=str(ts+4))
        self.assertEquals(len(events_r), 2)

        events_r = event_repo.find_events(start_ts=str(ts+3), end_ts=str(ts+4))
        self.assertEquals(len(events_r), 2)


        event3 = ResourceLifecycleEvent(origin="resource3")
        event_id, _ = event_repo.put_event(event3)

        events_r = event_repo.find_events(event_type="ResourceLifecycleEvent")
        self.assertEquals(len(events_r), 1)
Exemple #13
0
 def __init__(self):
     bootstrap_pyon()
     dsm = DatastoreManager()
     self.datastore = dsm.get_datastore(ds_name='coverage')
     if self.datastore is None:
         raise RuntimeError("Unable to load datastore for coverage")
     else:
         self.entity_table_name = self.datastore._get_datastore_name()
         log.trace("Got datastore: %s type %s" % (self.datastore._get_datastore_name(), str(type(self.datastore))))
     self.span_store = dsm.get_datastore(ds_name='coverage_spans')
     if self.span_store is None:
         raise RuntimeError("Unable to load datastore for coverage_spans")
     else:
         self.span_table_name = self.span_store._get_datastore_name()
         log.trace("Got datastore: %s type %s", self.span_store._get_datastore_name(), type(self.span_store))
 def dump_datastore(cls, path=None, ds_name=None, clear_dir=True):
     """
     Dumps CouchDB datastores into a directory as YML files.
     @param ds_name Logical name (such as "resources") of an ION datastore
     @param path Directory to put dumped datastores into (defaults to
                 "res/preload/local/dump_[timestamp]")
     @param clear_dir if True, delete contents of datastore dump dirs
     """
     if CFG.system.mockdb:
         log.warn("Cannot dump from MockDB")
         return
     if not path:
         dtstr = datetime.datetime.today().strftime('%Y%m%d_%H%M%S')
         path = "res/preload/local/dump_%s" % dtstr
     if ds_name:
         if DatastoreManager.exists(ds_name):
             cls._dump_datastore(ds_name, path, clear_dir)
         else:
             log.warn("Datastore does not exist")
     else:
         ds_list = [
             'resources', 'objects', 'state', 'events', 'directory',
             'scidata'
         ]
         for ds in ds_list:
             cls._dump_datastore(path, ds, clear_dir)
Exemple #15
0
    def test_state(self):
        dsm = DatastoreManager()
        state_repo = StateRepository(dsm)
        state_repo.start()
        state_repo1 = StateRepository(dsm)

        state1 = {'key':'value1'}
        state_repo.put_state("id1", state1)

        state2, state_obj2 = state_repo.get_state("id1")
        self.assertEquals(state1, state2)
        self.assertEquals(state_obj2.state, state2)
        self.assertTrue(state_obj2.ts)
        self.assertTrue(state_obj2._rev)
        self.assertEquals(state_obj2._id, "id1")

        state3 = {'key':'value2', 'key2': {}}
        state_repo.put_state("id1", state3)

        state4, state_obj4 = state_repo.get_state("id1")
        self.assertEquals(state3, state4)

        # Test persisting with a prior state object to save a read
        state5 = {'key':'value5', 'key2': {}}
        state_repo.put_state("id1", state5, state_obj=state_obj4)

        state6, state_obj6 = state_repo.get_state("id1")
        self.assertEquals(state5, state6)

        # Test that using an old state object will not screw up
        state7 = {'key':'value7', 'key2': {}}
        state_repo.put_state("id1", state7, state_obj=state_obj4)
Exemple #16
0
    def test_directory(self):
        dsm = DatastoreManager()
        directory = Directory(dsm)

        self.addCleanup(directory.dir_store.delete_datastore)

        root = directory.lookup("/")
        self.assert_(root is not None)

        self.assertEquals(directory.register("/","temp"), None)

        # Create a node
        root = directory.lookup("/temp")
        self.assertEquals(root, {} )

        # The create case
        entry_old = directory.register("/temp", "entry1", foo="awesome")
        self.assertEquals(entry_old, None)
        entry_new = directory.lookup("/temp/entry1")
        self.assertEquals(entry_new, {"foo":"awesome"})

        # The update case
        entry_old = directory.register("/temp", "entry1", foo="ingenious")
        self.assertEquals(entry_old, {"foo":"awesome"})

        # The delete case
        entry_old = directory.unregister("/temp", "entry1")
        self.assertEquals(entry_old, {"foo":"ingenious"})
        entry_new = directory.lookup("/temp/entry1")
        self.assertEquals(entry_new, None)

        directory.register("/BranchA", "X", resource_id="rid1")
        directory.register("/BranchA", "Y", resource_id="rid2")
        directory.register("/BranchA", "Z", resource_id="rid3")
        directory.register("/BranchA/X", "a", resource_id="rid4")
        directory.register("/BranchA/X", "b", resource_id="rid5")
        directory.register("/BranchB", "k", resource_id="rid6")
        directory.register("/BranchB", "l", resource_id="rid7")
        directory.register("/BranchB/k", "m", resource_id="rid7")

        res_list = directory.find_by_value("/", attribute="resource_id", value="rid3")
        self.assertEquals(len(res_list), 1)
        self.assertEquals(res_list[0][0], "ION/BranchA/Z")

        res_list = directory.find_by_value("/", attribute="resource_id", value="rid34")
        self.assertEquals(len(res_list), 0)

        res_list = directory.find_by_value("/", attribute="resource_id", value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/BranchB", attribute="resource_id", value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/Branch", attribute="resource_id", value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/BranchB/k", attribute="resource_id", value="rid7")
        self.assertEquals(len(res_list), 1)

        directory.close()
Exemple #17
0
    def create_resources_snapshot(self, persist=False, filename=None):
        ds = DatastoreManager.get_datastore_instance(DataStore.DS_RESOURCES, DataStore.DS_PROFILE.RESOURCES)
        all_objs = ds.find_docs_by_view("_all_docs", None, id_only=False)

        log.info("Found %s objects in datastore resources", len(all_objs))

        resources = {}
        associations = {}
        snapshot = dict(resources=resources, associations=associations)

        for obj_id, key, obj in all_objs:
            if obj_id.startswith("_design"):
                continue
            if not isinstance(obj, dict):
                raise Inconsistent("Object of bad type found: %s" % type(obj))
            obj_type = obj.get("type_", None)
            if obj_type == "Association":
                associations[obj_id] = obj.get("ts", None)
            elif obj_type:
                resources[obj_id] = obj.get("ts_updated", None)
            else:
                raise Inconsistent("Object with no type_ found: %s" % obj)

        if persist:
            dtstr = datetime.datetime.today().strftime('%Y%m%d_%H%M%S')
            path = filename or "interface/rrsnapshot_%s.json" % dtstr
            snapshot_json = json.dumps(snapshot)
            with open(path, "w") as f:
                #yaml.dump(snapshot, f, default_flow_style=False)
                f.write(snapshot_json)

        log.debug("Created resource registry snapshot. %s resources, %s associations", len(resources), len(associations))

        return snapshot
    def on_start(self):

        #---------------------------------------------------------------------------------------------------
        # Get the event Repository
        #---------------------------------------------------------------------------------------------------

        self.event_repo = self.container.instance.event_repository

        self.smtp_client = setting_up_smtp_client()

        self.ION_NOTIFICATION_EMAIL_ADDRESS = '*****@*****.**'

        #---------------------------------------------------------------------------------------------------
        # Create an event processor
        #---------------------------------------------------------------------------------------------------

        self.event_processor = EmailEventProcessor(self.smtp_client)

        #---------------------------------------------------------------------------------------------------
        # load event originators, types, and table
        #---------------------------------------------------------------------------------------------------

        self.event_types = CFG.event.types
        self.event_table = {}

        #---------------------------------------------------------------------------------------------------
        # Get the clients
        #---------------------------------------------------------------------------------------------------

        self.discovery = DiscoveryServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()
        self.datastore_manager = DatastoreManager()

        self.event_publisher = EventPublisher()
        self.scheduler_service = SchedulerService()
Exemple #19
0
    def delete_ui(self):
        resource_types = [
            'UIInternalResourceType',
            'UIInformationLevel',
            'UIScreenLabel',
            'UIAttribute',
            'UIBlock',
            'UIGroup',
            'UIRepresentation',
            'UIResourceType',
            'UIView',
            'UIBlockAttribute',
            'UIBlockRepresentation',
            'UIGroupBlock',
            'UIViewGroup']

        res_ids = []

        for restype in resource_types:
            res_is_list, _ = self.container.resource_registry.find_resources(restype, id_only=True)
            res_ids.extend(res_is_list)
            log.debug("Found %s resources of type %s" % (len(res_is_list), restype))

        ds = DatastoreManager.get_datastore_instance("resources")
        docs = ds.read_doc_mult(res_ids)

        for doc in docs:
            doc['_deleted'] = True

        ds.create_doc_mult(docs, allow_ids=True)
Exemple #20
0
    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        # set id and name (as they are set in base class call)
        self.id = string.replace('%s_%d' % (os.uname()[1], os.getpid()), ".", "_")
        self.name = "cc_agent_%s" % self.id

        Container.instance = self

        # TODO: Bug: Replacing CFG instance not work because references are already public. Update directly
        dict_merge(CFG, kwargs)
        from pyon.core import bootstrap
        bootstrap.sys_name = CFG.system.name or bootstrap.sys_name
        log.debug("Container (sysname=%s) initializing ..." % bootstrap.sys_name)

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = DictModifier(CFG, kwargs)

        # Load object and service registry etc.
        bootstrap_pyon()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()
        
        log.debug("Container initialized, OK.")
Exemple #21
0
    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False

        # set id and name (as they are set in base class call)
        self.id = string.replace('%s_%d' % (os.uname()[1], os.getpid()), ".", "_")
        self.name = "cc_agent_%s" % self.id

        Container.instance = self

        # TODO: Bug: Replacing CFG instance not work because references are already public. Update directly
        dict_merge(CFG, kwargs, inplace=True)
        from pyon.core import bootstrap
        bootstrap.container_instance = self
        bootstrap.assert_configuration(CFG)
        log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name())

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # Load object and service registry etc.
        bootstrap_pyon()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._is_started = False
        self._capabilities = []
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        log.debug("Container initialized, OK.")
Exemple #22
0
    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False
        # set container id and cc_agent name (as they are set in base class call)
        self.id = get_default_container_id()
        self.name = "cc_agent_%s" % self.id
        self._capabilities = []

        from pyon.core import bootstrap
        bootstrap.container_instance = self
        Container.instance = self

        log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name())

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        # TODO: Do not start a capability here. Symmetric start/stop
        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Instantiate Directory
        self.directory = Directory()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        log.debug("Container initialized, OK.")
 def get_blame_objects(cls):
     ds_list = ['resources', 'objects', 'state', 'events', 'directory', 'scidata']
     blame_objs = {}
     for ds_name in ds_list:
         ds = DatastoreManager.get_datastore_instance(ds_name)
         ret_objs = ds.find_by_view("_all_docs", None, id_only=False, convert_doc=False)
         objs = []
         for obj_id, obj_key, obj in ret_objs:
             if "blame_" in obj:
                 objs.append(obj)
         blame_objs[ds_name] = objs
     return blame_objs
    def delete_ooi_assets(self):
        res_ids = []

        ooi_asset_types = ['InstrumentModel',
                           'PlatformModel',
                           'Observatory',
                           'Subsite',
                           'PlatformSite',
                           'InstrumentSite',
                           'InstrumentAgent',
                           'InstrumentAgentInstance',
                           'InstrumentDevice',
                           'PlatformAgent',
                           'PlatformAgentInstance',
                           'PlatformDevice',
                           'Deployment',
                           'DataProduct'
        ]

        self.resource_ds = DatastoreManager.get_datastore_instance(DataStore.DS_RESOURCES, DataStore.DS_PROFILE.RESOURCES)

        del_objs = {}
        del_assocs = {}
        all_objs = self.resource_ds.find_by_view("_all_docs", None, id_only=False, convert_doc=False)
        for obj_id, key, obj in all_objs:
            if obj_id.startswith("_design") or not isinstance(obj, dict):
                continue
            obj_type = obj.get("type_", None)
            if obj_type and obj_type in ooi_asset_types:
                del_objs[obj_id] = obj
        for obj_id, key, obj in all_objs:
            if obj_id.startswith("_design") or not isinstance(obj, dict):
                continue
            obj_type = obj.get("type_", None)
            if obj_type == "Association":
                if obj['o'] in del_objs or obj['s'] in del_objs:
                    del_assocs[obj_id] = obj
        for doc in del_objs.values():
            doc_id, doc_rev = doc['_id'], doc['_rev']
            doc.clear()
            doc.update(dict(_id=doc_id, _rev=doc_rev, _deleted=True))
        for doc in del_assocs.values():
            doc_id, doc_rev = doc['_id'], doc['_rev']
            doc.clear()
            doc.update(dict(_id=doc_id, _rev=doc_rev, _deleted=True))

        self.resource_ds.update_doc_mult(del_objs.values())
        self.resource_ds.update_doc_mult(del_assocs.values())

        log.info("Deleted %s OOI resources and %s associations", len(del_objs), len(del_assocs))
Exemple #25
0
    def test_event_repo(self):
        if bootstrap.CFG.system.mockdb:
            raise SkipTest("only works with CouchDB views")

        dsm = DatastoreManager()

        event_repo = EventRepository(dsm)
        event_repo1 = EventRepository(dsm)

        event1 = Event(origin="resource1")
        event_id, _ = event_repo.put_event(event1)

        event1r = event_repo.get_event(event_id)
        self.assertEquals(event1.origin, event1r.origin)

        ts = 1328680477138
        events2 = []
        for i in xrange(5):
            ev = Event(origin="resource2", ts_created=str(ts + i))
            event_id, _ = event_repo.put_event(ev)
            events2.append((ev,event_id))

        events_r = event_repo.find_events(origin='resource2')
        self.assertEquals(len(events_r), 5)

        events_r = event_repo.find_events(origin='resource2', descending=True)
        self.assertEquals(len(events_r), 5)

        events_r = event_repo.find_events(origin='resource2', limit=3)
        self.assertEquals(len(events_r), 3)

        events_r = event_repo.find_events(origin='resource2', start_ts=str(ts+3))
        self.assertEquals(len(events_r), 2)

        events_r = event_repo.find_events(origin='resource2', end_ts=str(ts+2))
        self.assertEquals(len(events_r), 3)

        events_r = event_repo.find_events(origin='resource2', start_ts=str(ts+3), end_ts=str(ts+4))
        self.assertEquals(len(events_r), 2)

        events_r = event_repo.find_events(start_ts=str(ts+3), end_ts=str(ts+4))
        self.assertEquals(len(events_r), 2)


        event3 = ResourceLifecycleEvent(origin="resource3")
        event_id, _ = event_repo.put_event(event3)

        events_r = event_repo.find_events(event_type="ResourceLifecycleEvent")
        self.assertEquals(len(events_r), 1)
Exemple #26
0
    def test_state(self):
        dsm = DatastoreManager()
        state_repo = StateRepository(dsm)
        state_repo1 = StateRepository(dsm)

        state1 = {'key': 'value1'}
        state_repo.put_state("id1", state1)

        state2 = state_repo.get_state("id1")
        self.assertEquals(state1, state2)

        state3 = {'key': 'value2', 'key2': {}}
        state_repo.put_state("id1", state3)

        state4 = state_repo.get_state("id1")
        self.assertEquals(state3, state4)
Exemple #27
0
    def delete_ui(self):
        res_ids = []
        for restype in self.UI_RESOURCE_TYPES:
            res_is_list, _ = self.container.resource_registry.find_resources(restype, id_only=True)
            res_ids.extend(res_is_list)
            #log.debug("Found %s resources of type %s" % (len(res_is_list), restype))

        ds = DatastoreManager.get_datastore_instance("resources")
        docs = ds.read_doc_mult(res_ids)

        for doc in docs:
            doc['_deleted'] = True

        # TODO: Also delete associations

        ds.update_doc_mult(docs)
        log.info("Deleted %s UI resources and associations", len(docs))
Exemple #28
0
    def delete_ui(self):
        res_ids = []
        for restype in self.UI_RESOURCE_TYPES:
            res_is_list, _ = self.container.resource_registry.find_resources(
                restype, id_only=True)
            res_ids.extend(res_is_list)
            #log.debug("Found %s resources of type %s" % (len(res_is_list), restype))

        ds = DatastoreManager.get_datastore_instance("resources")
        docs = ds.read_doc_mult(res_ids)

        for doc in docs:
            doc['_deleted'] = True

        # TODO: Also delete associations

        ds.update_doc_mult(docs)
        log.info("Deleted %s UI resources and associations", len(docs))
 def get_blame_objects(cls):
     ds_list = [
         'resources', 'objects', 'state', 'events', 'directory', 'scidata'
     ]
     blame_objs = {}
     for ds_name in ds_list:
         ret_objs = []
         try:
             ds = DatastoreManager.get_datastore_instance(ds_name)
             ret_objs = ds.find_by_view("_all_docs",
                                        None,
                                        id_only=False,
                                        convert_doc=False)
         except BadRequest:
             continue
         objs = []
         for obj_id, obj_key, obj in ret_objs:
             if "blame_" in obj:
                 objs.append(obj)
         blame_objs[ds_name] = objs
     return blame_objs
Exemple #30
0
    def dump_resources_as_xlsx(self, filename=None):
        self._clear()
        ds = DatastoreManager.get_datastore_instance(DataStore.DS_RESOURCES, DataStore.DS_PROFILE.RESOURCES)
        all_objs = ds.find_docs_by_view("_all_docs", None, id_only=False)

        log.info("Found %s objects in datastore resources", len(all_objs))

        self._analyze_objects(all_objs)

        self._wb = xlwt.Workbook()
        self._worksheets = {}

        self._dump_observatories()

        self._dump_network()

        for restype in sorted(self._res_by_type.keys()):
            self._dump_resource_type(restype)

        dtstr = datetime.datetime.today().strftime('%Y%m%d_%H%M%S')
        path = filename or "interface/resources_%s.xls" % dtstr
        self._wb.save(path)
Exemple #31
0
    def dump_resources_as_xlsx(self, filename=None):
        self._clear()
        ds = DatastoreManager.get_datastore_instance(
            DataStore.DS_RESOURCES, DataStore.DS_PROFILE.RESOURCES)
        all_objs = ds.find_docs_by_view("_all_docs", None, id_only=False)

        log.info("Found %s objects in datastore resources", len(all_objs))

        self._analyze_objects(all_objs)

        self._wb = xlwt.Workbook()
        self._worksheets = {}

        self._dump_observatories()

        self._dump_network()

        for restype in sorted(self._res_by_type.keys()):
            self._dump_resource_type(restype)

        dtstr = datetime.datetime.today().strftime('%Y%m%d_%H%M%S')
        path = filename or "interface/resources_%s.xls" % dtstr
        self._wb.save(path)
Exemple #32
0
 def dump_datastore(cls, path=None, ds_name=None, clear_dir=True):
     """
     Dumps CouchDB datastores into a directory as YML files.
     @param ds_name Logical name (such as "resources") of an ION datastore
     @param path Directory to put dumped datastores into (defaults to
                 "res/preload/local/dump_[timestamp]")
     @param clear_dir if True, delete contents of datastore dump dirs
     """
     if CFG.system.mockdb:
         log.warn("Cannot dump from MockDB")
         return
     if not path:
         dtstr = datetime.datetime.today().strftime("%Y%m%d_%H%M%S")
         path = "res/preload/local/dump_%s" % dtstr
     if ds_name:
         if DatastoreManager.exists(ds_name):
             cls._dump_datastore(ds_name, path, clear_dir)
         else:
             log.warn("Datastore does not exist")
     else:
         ds_list = ["resources", "objects", "state", "events", "directory", "scidata"]
         for ds in ds_list:
             cls._dump_datastore(path, ds, clear_dir)
Exemple #33
0
    def create_resources_snapshot(self, persist=False, filename=None):
        ds = DatastoreManager.get_datastore_instance(
            DataStore.DS_RESOURCES, DataStore.DS_PROFILE.RESOURCES)
        all_objs = ds.find_docs_by_view("_all_docs", None, id_only=False)

        log.info("Found %s objects in datastore resources", len(all_objs))

        resources = {}
        associations = {}
        snapshot = dict(resources=resources, associations=associations)

        for obj_id, key, obj in all_objs:
            if obj_id.startswith("_design"):
                continue
            if not isinstance(obj, dict):
                raise Inconsistent("Object of bad type found: %s" % type(obj))
            obj_type = obj.get("type_", None)
            if obj_type == "Association":
                associations[obj_id] = obj.get("ts", None)
            elif obj_type:
                resources[obj_id] = obj.get("ts_updated", None)
            else:
                raise Inconsistent("Object with no type_ found: %s" % obj)

        if persist:
            dtstr = datetime.datetime.today().strftime('%Y%m%d_%H%M%S')
            path = filename or "interface/rrsnapshot_%s.json" % dtstr
            snapshot_json = json.dumps(snapshot)
            with open(path, "w") as f:
                #yaml.dump(snapshot, f, default_flow_style=False)
                f.write(snapshot_json)

        log.debug(
            "Created resource registry snapshot. %s resources, %s associations",
            len(resources), len(associations))

        return snapshot
Exemple #34
0
    def test_directory_lock(self):
        dsm = DatastoreManager()
        ds = dsm.get_datastore("resources", "DIRECTORY")
        ds.delete_datastore()
        ds.create_datastore()

        self.patch_cfg('pyon.ion.directory.CFG',
                       {'service': {
                           'directory': {
                               'publish_events': False
                           }
                       }})

        directory = Directory(datastore_manager=dsm)
        directory.start()

        lock1 = directory.acquire_lock("LOCK1",
                                       lock_info=dict(process="proc1"))
        self.assertEquals(lock1, True)

        lock2 = directory.acquire_lock("LOCK1",
                                       lock_info=dict(process="proc2"))
        self.assertEquals(lock2, False)

        with self.assertRaises(BadRequest):
            directory.acquire_lock("LOCK/SOME")

        with self.assertRaises(BadRequest):
            directory.release_lock("LOCK/SOME")

        with self.assertRaises(NotFound):
            directory.release_lock("LOCK2")

        directory.release_lock("LOCK1")

        lock1 = directory.acquire_lock("LOCK1",
                                       lock_info=dict(process="proc3"))
        self.assertEquals(lock1, True)

        # TEST: With lock holders

        lock5 = directory.acquire_lock("LOCK5", lock_holder="proc1")
        self.assertEquals(lock5, True)

        lock5 = directory.acquire_lock("LOCK5", lock_holder="proc1")
        self.assertEquals(lock5, True)

        lock5 = directory.acquire_lock("LOCK5", lock_holder="proc2")
        self.assertEquals(lock5, False)

        directory.release_lock("LOCK5")

        # TEST: Timeout
        lock5 = directory.acquire_lock("LOCK5",
                                       lock_holder="proc1",
                                       timeout=0.1)
        self.assertEquals(lock5, True)

        lock5 = directory.acquire_lock("LOCK5", lock_holder="proc2")
        self.assertEquals(lock5, False)

        res = directory.is_locked("LOCK5")
        self.assertEquals(res, True)

        gevent.sleep(0.15)

        res = directory.is_locked("LOCK5")
        self.assertEquals(res, False)

        lock5 = directory.acquire_lock("LOCK5",
                                       lock_holder="proc2",
                                       timeout=0.1)
        self.assertEquals(lock5, True)

        gevent.sleep(0.15)

        # TEST: Holder self renew
        lock5 = directory.acquire_lock("LOCK5",
                                       lock_holder="proc2",
                                       timeout=0.1)
        self.assertEquals(lock5, True)

        directory.stop()
Exemple #35
0
class Container(BaseContainerAgent):
    """
    The Capability Container. Its purpose is to spawn/monitor processes and services
    that do the bulk of the work in the ION system. It also manages connections to the Exchange
    and the various forms of datastores in the systems.
    """

    # Singleton static variables
    #node        = None
    id          = None
    name        = None
    pidfile     = None
    instance    = None

    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False
        # set container id and cc_agent name (as they are set in base class call)
        self.id = get_default_container_id()
        self.name = "cc_agent_%s" % self.id
        self._capabilities = []

        bootstrap.container_instance = self
        Container.instance = self

        log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name())

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        # TODO: Do not start a capability here. Symmetric start/stop
        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Instantiate Directory
        self.directory = Directory()

        # internal router
        self.local_router = None

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        # publisher, initialized in start()
        self.event_pub = None

        # context-local storage
        self.context = LocalContextMixin()

        log.debug("Container initialized, OK.")

    def start(self):
        log.debug("Container starting...")
        if self._is_started:
            raise ContainerError("Container already started")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise ContainerError("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            pid_contents = {'messaging': dict(CFG.server.amqp),
                            'container-agent': self.name,
                            'container-xp': bootstrap.get_sys_name()}
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)
            self._capabilities.append("PID_FILE")

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()     # cleanup the pidfile first
                self.quit()             # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)
        self._normal_signal = signal.signal(signal.SIGTERM, handl)

        # set up greenlet debugging signal handler
        gevent.signal(signal.SIGUSR2, self._handle_sigusr2)

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        self._capabilities.append("DIRECTORY")

        # Event repository
        self.event_repository = EventRepository()
        self.event_pub = EventPublisher()
        self._capabilities.append("EVENT_REPOSITORY")

        # Local resource registry
        self.resource_registry = ResourceRegistry()
        self._capabilities.append("RESOURCE_REGISTRY")

        # Persistent objects
        self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS)

        # State repository
        self.state_repository = StateRepository()
        self._capabilities.append("STATE_REPOSITORY")

        # internal router for local transports
        self.local_router = LocalRouter(bootstrap.get_sys_name())
        self.local_router.start()
        self.local_router.ready.wait(timeout=2)
        self._capabilities.append("LOCAL_ROUTER")

        # Start ExchangeManager, which starts the node (broker connection)
        self.ex_manager.start()
        self._capabilities.append("EXCHANGE_MANAGER")

        self.proc_manager.start()
        self._capabilities.append("PROC_MANAGER")

        self.app_manager.start()
        self._capabilities.append("APP_MANAGER")

        self.governance_controller.start()
        self._capabilities.append("GOVERNANCE_CONTROLLER")

        if CFG.get_safe('container.sflow.enabled', False):
            self.sflow_manager.start()
            self._capabilities.append("SFLOW_MANAGER")

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node, from_name=self.name, service=self, process=self)

        cleanup = lambda _: self.proc_manager._cleanup_method(self.name, rsvc)

        # Start an ION process with the right kind of endpoint factory
        proc = self.proc_manager.proc_sup.spawn(name=self.name, listeners=[rsvc], service=self, cleanup_method=cleanup)
        self.proc_manager.proc_sup.ensure_ready(proc)
        proc.start_listeners()
        self._capabilities.append("CONTAINER_AGENT")

        self.event_pub.publish_event(event_type="ContainerLifecycleEvent",
                                     origin=self.id, origin_type="CapabilityContainer",
                                     sub_type="START",
                                     state=ContainerStateEnum.START)

        self._is_started    = True
        self._status        = "RUNNING"

        log.info("Container (%s) started, OK." , self.id)

    def _handle_sigusr2(self):#, signum, frame):
        """
        Handles SIGUSR2, prints debugging greenlet information.
        """
        gls = GreenletLeak.get_greenlets()

        allgls = []

        for gl in gls:
            status = GreenletLeak.format_greenlet(gl)

            # build formatted output:
            # Greenlet at 0xdeadbeef
            #     self: <EndpointUnit at 0x1ffcceef>
            #     func: bound, EndpointUnit.some_func

            status[0].insert(0, "%s at %s:" % (gl.__class__.__name__, hex(id(gl))))
            # indent anything in status a second time
            prefmt = [s.replace("\t", "\t\t") for s in status[0]]
            prefmt.append("traceback:")

            for line in status[1]:
                for subline in line.split("\n")[0:2]:
                    prefmt.append(subline)

            glstr = "\n\t".join(prefmt)

            allgls.append(glstr)

        # print it out!
        print >>sys.stderr, "\n\n".join(allgls)
        with open("gls-%s" % os.getpid(), "w") as f:
            f.write("\n\n".join(allgls))


    @property
    def node(self):
        """
        Returns the active/default Node that should be used for most communication in the system.

        Defers to exchange manager, but only if it has been started, otherwise returns None.
        """
        if "EXCHANGE_MANAGER" in self._capabilities:
            return self.ex_manager.default_node

        return None

    @contextmanager
    def _push_status(self, new_status):
        """
        Temporarily sets the internal status flag.
        Use this as a decorator or in a with-statement before calling a temporary status changing
        method, like start_rel_from_url.
        """
        curstatus = self._status
        self._status = new_status
        try:
            yield
        finally:
            self._status = curstatus

    def serve_forever(self):
        """ Run the container until killed. """
        log.debug("In Container.serve_forever")

        if not self.proc_manager.proc_sup.running:
            self.start()

        # serve forever short-circuits if immediate is on and children len is ok
        num_procs = len(self.proc_manager.proc_sup.children)
        immediate = CFG.system.get('immediate', False)
        if not (immediate and num_procs == 1):  # only spawned greenlet is the CC-Agent

            # print a warning just in case
            if immediate and num_procs != 1:
                log.warn("CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs)

            try:
                # This just waits in this Greenlet for all child processes to complete,
                # which is triggered somewhere else.
                self.proc_manager.proc_sup.join_children()
            except (KeyboardInterrupt, SystemExit) as ex:
                log.info('Received a kill signal, shutting down the container.')

                if hasattr(self, 'gl_parent_watch') and self.gl_parent_watch is not None:
                    self.gl_parent_watch.kill()

            except:
                log.exception('Unhandled error! Forcing container shutdown')
        else:
            log.debug("Container.serve_forever short-circuiting due to CFG.system.immediate")

        self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown)

    def status(self):
        """
        Returns the internal status.
        """
        return self._status

    def _cleanup_pid(self):
        if self.pidfile:
            log.debug("Cleanup pidfile: %s", self.pidfile)
            try:
                os.remove(self.pidfile)
            except Exception, e:
                log.warn("Pidfile could not be deleted: %s" % str(e))
            self.pidfile = None
Exemple #36
0
class Container(BaseContainerAgent):
    """
    The Capability Container. Its purpose is to spawn/monitor processes and services
    that do the bulk of the work in the ION system.
    """

    # Singleton static variables
    node        = None
    id          = None
    name        = None
    pidfile     = None
    instance    = None

    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False

        # set id and name (as they are set in base class call)
        self.id = string.replace('%s_%d' % (os.uname()[1], os.getpid()), ".", "_")
        self.name = "cc_agent_%s" % self.id

        Container.instance = self

        # TODO: Bug: Replacing CFG instance not work because references are already public. Update directly
        dict_merge(CFG, kwargs, inplace=True)
        from pyon.core import bootstrap
        bootstrap.container_instance = self
        bootstrap.assert_configuration(CFG)
        bootstrap.sys_name = CFG.system.name or bootstrap.sys_name
        log.debug("Container (sysname=%s) initializing ..." % bootstrap.sys_name)

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # Load object and service registry etc.
        bootstrap_pyon()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Coordinates the container start
        self._is_started = False
        self._capabilities = []

        log.debug("Container initialized, OK.")


    def start(self):
        log.debug("Container starting...")
        if self._is_started:
            raise ContainerError("Container already started")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise ContainerError("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            from pyon.core.bootstrap import get_sys_name
            pid_contents = {'messaging': dict(CFG.server.amqp),
                            'container-agent': self.name,
                            'container-xp': get_sys_name() }
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)
            self._capabilities.append("PID_FILE")

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()     # cleanup the pidfile first
                self.quit()             # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)
        self._normal_signal = signal.signal(signal.SIGTERM, handl)

        self._capabilities.append("EXCHANGE_CONNECTION")

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Instantiate Directory and self-register
        self.directory = Directory()
        self.directory.register("/Containers", self.id, cc_agent=self.name)
        self._capabilities.append("DIRECTORY")

        # Create other repositories to make sure they are there and clean if needed
        self.datastore_manager.get_datastore("resources", DataStore.DS_PROFILE.RESOURCES)

        self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS)

        self.state_repository = StateRepository()
        self._capabilities.append("STATE_REPOSITORY")

        self.event_repository = EventRepository()
        self._capabilities.append("EVENT_REPOSITORY")

        # Start ExchangeManager. In particular establish broker connection
        self.ex_manager.start()

        # TODO: Move this in ExchangeManager - but there is an error
        self.node, self.ioloop = messaging.make_node() # TODO: shortcut hack
        self._capabilities.append("EXCHANGE_MANAGER")

        self.proc_manager.start()
        self._capabilities.append("PROC_MANAGER")

        self.app_manager.start()
        self._capabilities.append("APP_MANAGER")

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node, name=self.name, service=self, process=self)

        # Start an ION process with the right kind of endpoint factory
        proc = self.proc_manager.proc_sup.spawn((CFG.cc.proctype or 'green', None), listener=rsvc)
        self.proc_manager.proc_sup.ensure_ready(proc)
        self._capabilities.append("CONTAINER_AGENT")

        self._is_started = True

        log.info("Container started, OK.")

    def serve_forever(self):
        """ Run the container until killed. """
        log.debug("In Container.serve_forever")
        
        if not self.proc_manager.proc_sup.running:
            self.start()
            
        try:
            # This just waits in this Greenlet for all child processes to complete,
            # which is triggered somewhere else.
            self.proc_manager.proc_sup.join_children()
        except (KeyboardInterrupt, SystemExit) as ex:
            log.info('Received a kill signal, shutting down the container.')
        except:
            log.exception('Unhandled error! Forcing container shutdown')

        self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown)
            
    def _cleanup_pid(self):
        if self.pidfile:
            log.debug("Cleanup pidfile: %s", self.pidfile)
            try:
                os.remove(self.pidfile)
            except Exception, e:
                log.warn("Pidfile could not be deleted: %s" % str(e))
            self.pidfile = None
Exemple #37
0
 def __init__(self):
     from pyon.core.bootstrap import container_instance
     self.container = container_instance
     self.rr = self.container.resource_registry
     self.rr_store = DatastoreManager.get_datastore_instance("resources")
     self.timer = ooi.timer.Timer()
Exemple #38
0
class Container(BaseContainerAgent):
    """
    The Capability Container. Its purpose is to spawn/monitor processes and services
    that do the bulk of the work in the ION system. It also manages connections to the Exchange
    and the various forms of datastores in the systems.
    """

    # Singleton static variables
    #node        = None
    id          = None
    name        = None
    pidfile     = None
    instance    = None

    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False

        self._capabilities = []

        # set container id and cc_agent name (as they are set in base class call)
        self.id = get_default_container_id()
        self.name = "cc_agent_%s" % self.id

        Container.instance = self

        from pyon.core import bootstrap
        bootstrap.container_instance = self

        log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name())

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # Instantiate Directory and self-register
        # Has the additional side effect of either
        # bootstrapping the configuration into the
        # directory or read the configuration based
        # in the value of the auto_bootstrap setting
        self.directory = Directory()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        log.debug("Container initialized, OK.")

    def start(self):
        log.debug("Container starting...")
        if self._is_started:
            raise ContainerError("Container already started")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise ContainerError("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            pid_contents = {'messaging': dict(CFG.server.amqp),
                            'container-agent': self.name,
                            'container-xp': bootstrap.get_sys_name() }
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)
            self._capabilities.append("PID_FILE")

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()     # cleanup the pidfile first
                self.quit()             # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)
        self._normal_signal = signal.signal(signal.SIGTERM, handl)

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Self-register with Directory
        self.directory.register("/Containers", self.id, cc_agent=self.name)
        self.directory.register("/Containers/%s" % self.id, "Processes")
        self._capabilities.append("DIRECTORY")

        # Event repository
        self.event_repository = EventRepository()
        self.event_pub = EventPublisher()

        self._capabilities.append("EVENT_REPOSITORY")

        # Local resource registry
        self.resource_registry = ResourceRegistry()
        self._capabilities.append("RESOURCE_REGISTRY")

        # Persistent objects
        self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS)

        # State repository
        self.state_repository = StateRepository()
        self._capabilities.append("STATE_REPOSITORY")

        # Start ExchangeManager, which starts the node (broker connection)
        self.ex_manager.start()
        self._capabilities.append("EXCHANGE_MANAGER")

        self.proc_manager.start()
        self._capabilities.append("PROC_MANAGER")

        self.app_manager.start()
        self._capabilities.append("APP_MANAGER")

        self.governance_controller.start()
        self._capabilities.append("GOVERNANCE_CONTROLLER")

        if CFG.container.get('sflow', {}).get('enabled', False):
            self.sflow_manager.start()
            self._capabilities.append("SFLOW_MANAGER")

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node, from_name=self.name, service=self, process=self)

        # Start an ION process with the right kind of endpoint factory
        proc = self.proc_manager.proc_sup.spawn(name=self.name, listeners=[rsvc], service=self)
        self.proc_manager.proc_sup.ensure_ready(proc)
        self._capabilities.append("CONTAINER_AGENT")

        self.event_pub.publish_event(event_type="ContainerLifecycleEvent",
                                     origin=self.id, origin_type="CapabilityContainer",
                                     sub_type="START",
                                     state=ContainerStateEnum.START)

        self._is_started    = True
        self._status        = "RUNNING"

        log.info("Container started, OK.")

    @property
    def node(self):
        """
        Returns the active/default Node that should be used for most communication in the system.

        Defers to exchange manager, but only if it has been started, otherwise returns None.
        """
        if "EXCHANGE_MANAGER" in self._capabilities:
            return self.ex_manager.default_node

        return None

    @contextmanager
    def _push_status(self, new_status):
        """
        Temporarily sets the internal status flag.
        Use this as a decorator or in a with-statement before calling a temporary status changing
        method, like start_rel_from_url.
        """
        curstatus = self._status
        self._status = new_status
        try:
            yield
        finally:
            self._status = curstatus

    def serve_forever(self):
        """ Run the container until killed. """
        log.debug("In Container.serve_forever")
        
        if not self.proc_manager.proc_sup.running:
            self.start()

        # serve forever short-circuits if immediate is on and children len is ok
        num_procs = len(self.proc_manager.proc_sup.children)
        immediate = CFG.system.get('immediate', False)
        if not (immediate and num_procs == 1):  # only spawned greenlet is the CC-Agent

            # print a warning just in case
            if immediate and num_procs != 1:
                log.warn("CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs)

            try:
                # This just waits in this Greenlet for all child processes to complete,
                # which is triggered somewhere else.
                self.proc_manager.proc_sup.join_children()
            except (KeyboardInterrupt, SystemExit) as ex:
                log.info('Received a kill signal, shutting down the container.')
                watch_parent = CFG.system.get('watch_parent', None)
                if watch_parent:
                    watch_parent.kill()
            except:
                log.exception('Unhandled error! Forcing container shutdown')
        else:
            log.debug("Container.serve_forever short-circuiting due to CFG.system.immediate")

        self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown)

    def status(self):
        """
        Returns the internal status.
        """
        return self._status
            
    def _cleanup_pid(self):
        if self.pidfile:
            log.debug("Cleanup pidfile: %s", self.pidfile)
            try:
                os.remove(self.pidfile)
            except Exception, e:
                log.warn("Pidfile could not be deleted: %s" % str(e))
            self.pidfile = None
Exemple #39
0
    def test_directory(self):
        dsm = DatastoreManager()
        ds = dsm.get_datastore("resources", "DIRECTORY")
        ds.delete_datastore()
        ds.create_datastore()

        self.patch_cfg('pyon.ion.directory.CFG',
                       {'service': {
                           'directory': {
                               'publish_events': False
                           }
                       }})

        directory = Directory(datastore_manager=dsm)
        directory.start()

        #self.addCleanup(directory.dir_store.delete_datastore)

        objs = directory.dir_store.list_objects()

        root = directory.lookup("/DIR")
        self.assert_(root is not None)

        entry = directory.lookup("/temp")
        self.assert_(entry is None)

        entry_old = directory.register("/", "temp")
        self.assertEquals(entry_old, None)

        # Create a node
        entry = directory.lookup("/temp")
        self.assertEquals(entry, {})

        # The create case
        entry_old = directory.register("/temp", "entry1", foo="awesome")
        self.assertEquals(entry_old, None)
        entry_new = directory.lookup("/temp/entry1")
        self.assertEquals(entry_new, {"foo": "awesome"})

        # The update case
        entry_old = directory.register("/temp", "entry1", foo="ingenious")
        self.assertEquals(entry_old, {"foo": "awesome"})

        # The delete case
        entry_old = directory.unregister("/temp", "entry1")
        self.assertEquals(entry_old, {"foo": "ingenious"})
        entry_new = directory.lookup("/temp/entry1")
        self.assertEquals(entry_new, None)

        directory.register("/BranchA", "X", resource_id="rid1")
        directory.register("/BranchA", "Y", resource_id="rid2")
        directory.register("/BranchA", "Z", resource_id="rid3")
        directory.register("/BranchA/X", "a", resource_id="rid4")
        directory.register("/BranchA/X", "b", resource_id="rid5")
        directory.register("/BranchB", "k", resource_id="rid6")
        directory.register("/BranchB", "l", resource_id="rid7")
        directory.register("/BranchB/k", "m", resource_id="rid7")
        directory.register("/BranchB/k", "X")

        res_list = directory.find_by_value("/",
                                           attribute="resource_id",
                                           value="rid3")
        self.assertEquals(len(res_list), 1)
        self.assertEquals(res_list[0].org, "ION")
        self.assertEquals(res_list[0].parent, "/BranchA")
        self.assertEquals(res_list[0].key, "Z")

        res_list = directory.find_by_value("/",
                                           attribute="resource_id",
                                           value="rid34")
        self.assertEquals(len(res_list), 0)

        res_list = directory.find_by_value("/",
                                           attribute="resource_id",
                                           value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/BranchB",
                                           attribute="resource_id",
                                           value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/Branch",
                                           attribute="resource_id",
                                           value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/BranchB/k",
                                           attribute="resource_id",
                                           value="rid7")
        self.assertEquals(len(res_list), 1)

        res_list = directory.find_child_entries("/BranchB/k/m")
        self.assertEquals(len(res_list), 0)

        res_list = directory.find_child_entries("/BranchB")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_child_entries("/BranchB/k/m",
                                                direct_only=False)
        self.assertEquals(len(res_list), 0)

        res_list = directory.find_child_entries("/BranchB", direct_only=False)
        self.assertEquals(len(res_list), 4)

        res_list = directory.find_by_key("X")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_key("X", parent="/BranchB")
        self.assertEquals(len(res_list), 1)

        entry_list = directory.lookup_mult("/BranchA", ["X", "Z"])
        self.assertEquals(len(entry_list), 2)
        self.assertEquals(entry_list[0]["resource_id"], "rid1")
        self.assertEquals(entry_list[1]["resource_id"], "rid3")

        entry_list = directory.lookup_mult("/BranchA", ["Y", "FOO"])
        self.assertEquals(len(entry_list), 2)
        self.assertEquals(entry_list[0]["resource_id"], "rid2")
        self.assertEquals(entry_list[1], None)

        # Test prevent duplicate entries
        directory.register("/some", "dupentry", foo="ingenious")
        de = directory.lookup("/some/dupentry", return_entry=True)
        de1_attrs = de.__dict__.copy()
        del de1_attrs["_id"]
        del de1_attrs["_rev"]
        del de1_attrs["type_"]
        de1 = DirEntry(**de1_attrs)
        with self.assertRaises(BadRequest) as ex:
            de_id1, _ = directory.dir_store.create(de1)
            self.assertTrue(ex.message.startswith("DirEntry already exists"))

        res_list = directory.find_by_key("dupentry", parent="/some")
        self.assertEquals(1, len(res_list))
Exemple #40
0
    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False
        # set container id and cc_agent name (as they are set in base class call)
        self.id = get_default_container_id()
        self.name = "cc_agent_%s" % self.id
        self._capabilities = []

        bootstrap.container_instance = self
        Container.instance = self

        log.debug("Container (sysname=%s) initializing ..." %
                  bootstrap.get_sys_name())

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        # TODO: Do not start a capability here. Symmetric start/stop
        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Instantiate Directory
        self.directory = Directory()

        # internal router
        self.local_router = None

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        # publisher, initialized in start()
        self.event_pub = None

        # context-local storage
        self.context = LocalContextMixin()

        log.debug("Container initialized, OK.")
Exemple #41
0
    def test_directory(self):
        dsm = DatastoreManager()
        ds = dsm.get_datastore("resources")
        ds.delete_datastore()
        ds.create_datastore()

        directory = Directory(datastore_manager=dsm)
        directory.start()

        #self.addCleanup(directory.dir_store.delete_datastore)

        objs = directory.dir_store.list_objects()
        self.assert_("_design/directory" in objs)

        root = directory.lookup("/DIR")
        self.assert_(root is not None)

        entry = directory.lookup("/temp")
        self.assert_(entry is None)

        entry_old = directory.register("/","temp")
        self.assertEquals(entry_old, None)

        # Create a node
        entry = directory.lookup("/temp")
        self.assertEquals(entry, {} )

        # The create case
        entry_old = directory.register("/temp", "entry1", foo="awesome")
        self.assertEquals(entry_old, None)
        entry_new = directory.lookup("/temp/entry1")
        self.assertEquals(entry_new, {"foo":"awesome"})

        # The update case
        entry_old = directory.register("/temp", "entry1", foo="ingenious")
        self.assertEquals(entry_old, {"foo":"awesome"})

        # The delete case
        entry_old = directory.unregister("/temp", "entry1")
        self.assertEquals(entry_old, {"foo":"ingenious"})
        entry_new = directory.lookup("/temp/entry1")
        self.assertEquals(entry_new, None)

        directory.register("/BranchA", "X", resource_id="rid1")
        directory.register("/BranchA", "Y", resource_id="rid2")
        directory.register("/BranchA", "Z", resource_id="rid3")
        directory.register("/BranchA/X", "a", resource_id="rid4")
        directory.register("/BranchA/X", "b", resource_id="rid5")
        directory.register("/BranchB", "k", resource_id="rid6")
        directory.register("/BranchB", "l", resource_id="rid7")
        directory.register("/BranchB/k", "m", resource_id="rid7")
        directory.register("/BranchB/k", "X")

        res_list = directory.find_by_value("/", attribute="resource_id", value="rid3")
        self.assertEquals(len(res_list), 1)
        self.assertEquals(res_list[0].org, "ION")
        self.assertEquals(res_list[0].parent, "/BranchA")
        self.assertEquals(res_list[0].key, "Z")

        res_list = directory.find_by_value("/", attribute="resource_id", value="rid34")
        self.assertEquals(len(res_list), 0)

        res_list = directory.find_by_value("/", attribute="resource_id", value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/BranchB", attribute="resource_id", value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/Branch", attribute="resource_id", value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/BranchB/k", attribute="resource_id", value="rid7")
        self.assertEquals(len(res_list), 1)

        res_list = directory.find_child_entries("/BranchB/k/m")
        self.assertEquals(len(res_list), 0)

        res_list = directory.find_child_entries("/BranchB")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_child_entries("/BranchB/k/m", direct_only=False)
        self.assertEquals(len(res_list), 0)

        res_list = directory.find_child_entries("/BranchB", direct_only=False)
        self.assertEquals(len(res_list), 4)

        res_list = directory.find_by_key("X")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_key("X", parent="/BranchB")
        self.assertEquals(len(res_list), 1)

        directory.stop()
Exemple #42
0
class Container(BaseContainerAgent):
    """
    The Capability Container. Its purpose is to spawn/monitor processes and services
    that do the bulk of the work in the ION system. It also manages connections to the Exchange
    and the various forms of datastores in the systems.
    """

    # Singleton static variables
    #node        = None
    id = None
    name = None
    pidfile = None
    instance = None

    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False
        # set container id and cc_agent name (as they are set in base class call)
        self.id = get_default_container_id()
        self.name = "cc_agent_%s" % self.id
        self._capabilities = []

        bootstrap.container_instance = self
        Container.instance = self

        log.debug("Container (sysname=%s) initializing ..." %
                  bootstrap.get_sys_name())

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        # TODO: Do not start a capability here. Symmetric start/stop
        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Instantiate Directory
        self.directory = Directory()

        # internal router
        self.local_router = None

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        # publisher, initialized in start()
        self.event_pub = None

        # context-local storage
        self.context = LocalContextMixin()

        log.debug("Container initialized, OK.")

    def start(self):
        log.debug("Container starting...")
        if self._is_started:
            raise ContainerError("Container already started")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise ContainerError(
                "Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s"
                % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            pid_contents = {
                'messaging': dict(CFG.server.amqp),
                'container-agent': self.name,
                'container-xp': bootstrap.get_sys_name()
            }
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)
            self._capabilities.append("PID_FILE")

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()  # cleanup the pidfile first
                self.quit(
                )  # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)

        self._normal_signal = signal.signal(signal.SIGTERM, handl)

        # set up greenlet debugging signal handler
        gevent.signal(signal.SIGUSR2, self._handle_sigusr2)

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        self._capabilities.append("DIRECTORY")

        # Event repository
        self.event_repository = EventRepository()
        self.event_pub = EventPublisher()
        self._capabilities.append("EVENT_REPOSITORY")

        # Local resource registry
        self.resource_registry = ResourceRegistry()
        self._capabilities.append("RESOURCE_REGISTRY")

        # Persistent objects
        self.datastore_manager.get_datastore("objects",
                                             DataStore.DS_PROFILE.OBJECTS)

        # State repository
        self.state_repository = StateRepository()
        self._capabilities.append("STATE_REPOSITORY")

        # internal router for local transports
        self.local_router = LocalRouter(bootstrap.get_sys_name())
        self.local_router.start()
        self.local_router.ready.wait(timeout=2)
        self._capabilities.append("LOCAL_ROUTER")

        # Start ExchangeManager, which starts the node (broker connection)
        self.ex_manager.start()
        self._capabilities.append("EXCHANGE_MANAGER")

        self.proc_manager.start()
        self._capabilities.append("PROC_MANAGER")

        self.app_manager.start()
        self._capabilities.append("APP_MANAGER")

        self.governance_controller.start()
        self._capabilities.append("GOVERNANCE_CONTROLLER")

        if CFG.get_safe('container.sflow.enabled', False):
            self.sflow_manager.start()
            self._capabilities.append("SFLOW_MANAGER")

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node,
                                from_name=self.name,
                                service=self,
                                process=self)

        cleanup = lambda _: self.proc_manager._cleanup_method(self.name, rsvc)

        # Start an ION process with the right kind of endpoint factory
        proc = self.proc_manager.proc_sup.spawn(name=self.name,
                                                listeners=[rsvc],
                                                service=self,
                                                cleanup_method=cleanup)
        self.proc_manager.proc_sup.ensure_ready(proc)
        proc.start_listeners()
        self._capabilities.append("CONTAINER_AGENT")

        self.event_pub.publish_event(event_type="ContainerLifecycleEvent",
                                     origin=self.id,
                                     origin_type="CapabilityContainer",
                                     sub_type="START",
                                     state=ContainerStateEnum.START)

        self._is_started = True
        self._status = "RUNNING"

        log.info("Container (%s) started, OK.", self.id)

    def _handle_sigusr2(self):  #, signum, frame):
        """
        Handles SIGUSR2, prints debugging greenlet information.
        """
        gls = GreenletLeak.get_greenlets()

        allgls = []

        for gl in gls:
            status = GreenletLeak.format_greenlet(gl)

            # build formatted output:
            # Greenlet at 0xdeadbeef
            #     self: <EndpointUnit at 0x1ffcceef>
            #     func: bound, EndpointUnit.some_func

            status[0].insert(
                0, "%s at %s:" % (gl.__class__.__name__, hex(id(gl))))
            # indent anything in status a second time
            prefmt = [s.replace("\t", "\t\t") for s in status[0]]
            prefmt.append("traceback:")

            for line in status[1]:
                for subline in line.split("\n")[0:2]:
                    prefmt.append(subline)

            glstr = "\n\t".join(prefmt)

            allgls.append(glstr)

        # print it out!
        print >> sys.stderr, "\n\n".join(allgls)
        with open("gls-%s" % os.getpid(), "w") as f:
            f.write("\n\n".join(allgls))

    @property
    def node(self):
        """
        Returns the active/default Node that should be used for most communication in the system.

        Defers to exchange manager, but only if it has been started, otherwise returns None.
        """
        if "EXCHANGE_MANAGER" in self._capabilities:
            return self.ex_manager.default_node

        return None

    @contextmanager
    def _push_status(self, new_status):
        """
        Temporarily sets the internal status flag.
        Use this as a decorator or in a with-statement before calling a temporary status changing
        method, like start_rel_from_url.
        """
        curstatus = self._status
        self._status = new_status
        try:
            yield
        finally:
            self._status = curstatus

    def serve_forever(self):
        """ Run the container until killed. """
        log.debug("In Container.serve_forever")

        if not self.proc_manager.proc_sup.running:
            self.start()

        # serve forever short-circuits if immediate is on and children len is ok
        num_procs = len(self.proc_manager.proc_sup.children)
        immediate = CFG.system.get('immediate', False)
        if not (immediate
                and num_procs == 1):  # only spawned greenlet is the CC-Agent

            # print a warning just in case
            if immediate and num_procs != 1:
                log.warn(
                    "CFG.system.immediate=True but number of spawned processes is not 1 (%d)",
                    num_procs)

            try:
                # This just waits in this Greenlet for all child processes to complete,
                # which is triggered somewhere else.
                self.proc_manager.proc_sup.join_children()
            except (KeyboardInterrupt, SystemExit) as ex:
                log.info(
                    'Received a kill signal, shutting down the container.')

                if hasattr(self, 'gl_parent_watch'
                           ) and self.gl_parent_watch is not None:
                    self.gl_parent_watch.kill()

            except:
                log.exception('Unhandled error! Forcing container shutdown')
        else:
            log.debug(
                "Container.serve_forever short-circuiting due to CFG.system.immediate"
            )

        self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown)

    def status(self):
        """
        Returns the internal status.
        """
        return self._status

    def _cleanup_pid(self):
        if self.pidfile:
            log.debug("Cleanup pidfile: %s", self.pidfile)
            try:
                os.remove(self.pidfile)
            except Exception, e:
                log.warn("Pidfile could not be deleted: %s" % str(e))
            self.pidfile = None
Exemple #43
0
                if obj_list:
                    spec_obj = obj_list[0]
                    spec_obj.spec = specs
                    self.container.resource_registry.update(spec_obj)
                else:
                    spec_obj = IonObject('UISpec', name="ION UI Specs", spec=specs)
                    res_id = self.container.resource_registry.create(spec_obj)
                spec_size = len(json.dumps(spec_obj.spec))
                log.info("Wrote UISpec object, size=%s", spec_size)

                if specs_path:
                    self.export_ui_specs(specs_path, specs=specs)
            else:
                # Write the full set of UIResource objects
                self._finalize_uirefs()
                ds = DatastoreManager.get_datastore_instance("resources")
                res = ds.create_mult(self.ui_obj_by_id.values(), allow_ids=True)
                log.info("Stored %s UI resource objects into resource registry" % (len(res)))
                res = ds.create_mult(self.ui_assocs, allow_ids=True)
                log.info("Stored %s UI resource associations into resource registry" % (len(res)))
        except Exception as ex:
            log.exception("Store in resource registry error err=%s" % (str(ex)))


    def _get_ui_files(self, path):
        dirurl = path or self.DEFAULT_UISPEC_LOCATION
        if not dirurl.endswith("/"):
            dirurl += "/"
        log.info("Accessing UI specs URL: %s", dirurl)
        dirpage = requests.get(dirurl).text
        csvfiles = re.findall('(?:href|HREF)="([-%/\w]+\.csv)"', dirpage)
Exemple #44
0
    def test_event_persist(self):
        events = [{'_id': '778dcc0811bd4b518ffd1ef873f3f457',
                   'base_types': ['Event'],
                   'description': 'Event to deliver the status of instrument.',
                   'origin': 'instrument_1',
                   'origin_type': 'PlatformDevice',
                   'status': 1,
                   'sub_type': 'input_voltage',
                   'time_stamps': [2.0, 2.0],
                   'ts_created': '1364121284585',
                   'type_': 'DeviceStatusEvent',
                   'valid_values': [-100, 100],
                   'values': [110.0, 111.0]},
                  {'_id': 'b40731684e41418082e1727f3cf61026',
                   'base_types': ['Event'],
                   'description': 'Event to deliver the status of instrument.',
                   'origin': 'instrument_1',
                   'origin_type': 'PlatformDevice',
                   'status': 1,
                   'sub_type': 'input_voltage',
                   'time_stamps': [2.0, 2.0],
                   'ts_created': '1364121284609',
                   'type_': 'DeviceStatusEvent',
                   'valid_values': [-100, 100],
                   'values': [110.0, 111.0]}]

        dsm = DatastoreManager()
        ds = dsm.get_datastore("events", "EVENTS")
        ds.delete_datastore()
        ds.create_datastore()

        event_repo = EventRepository(dsm)

        event1_dict = events[0].copy()
        event1_dict.pop("_id")
        event1_type = event1_dict.pop("type_")
        event1 = IonObject(event1_type, **event1_dict)
        event_repo.put_event(event1)

        events_r = event_repo.find_events(origin=event1_dict["origin"])
        self.assertEquals(len(events_r), 1)
        event1_read = events_r[0][2]
        self.assertEquals(event1_read.time_stamps, event1_dict["time_stamps"])

        event2_dict = events[1].copy()
        event2_id = event2_dict.pop("_id")
        event2_type = event2_dict.pop("type_")
        event2_obj = IonObject(event2_type, **event2_dict)
        event2_obj._id = event2_id
        event_repo.put_event(event2_obj)

        event1_dict = events[0].copy()
        event1_id = event1_dict.pop("_id")
        event1_type = event1_dict.pop("type_")
        event1_obj = IonObject(event1_type, **event1_dict)
        event1_obj._id = event1_id

        event2_dict = events[1].copy()
        event2_id = event2_dict.pop("_id")
        event2_type = event2_dict.pop("type_")
        event2_obj = IonObject(event2_type, **event2_dict)
        event2_obj._id = event2_id

        event_repo.put_events([event1_obj, event2_obj])
        events_r = event_repo.find_events(event_type='DeviceStatusEvent')
        self.assertEquals(len(events_r), 3)
Exemple #45
0
    def test_event_persist(self):
        events = [{
            '_id': '778dcc0811bd4b518ffd1ef873f3f457',
            'base_types': ['Event', 'ResourceEvent'],
            'description': 'Event to deliver the status of instrument.',
            'origin': 'instrument_1',
            'origin_type': 'PlatformDevice',
            'sub_type': 'input_voltage',
            'ts_created': '1364121284585',
            'type_': 'ResourceLifecycleEvent'
        }, {
            '_id': 'b40731684e41418082e1727f3cf61026',
            'base_types': ['Event', 'ResourceEvent'],
            'description': 'Event to deliver the status of instrument.',
            'origin': 'instrument_1',
            'origin_type': 'PlatformDevice',
            'sub_type': 'input_voltage',
            'ts_created': '1364121284609',
            'type_': 'ResourceModifiedEvent'
        }]

        dsm = DatastoreManager()
        ds = dsm.get_datastore(DataStore.DS_EVENTS,
                               DataStore.DS_PROFILE.EVENTS)
        ds.delete_datastore()
        ds.create_datastore()

        event_repo = EventRepository(dsm)

        # Store one event without ID
        event1_dict = events[0].copy()
        event1_dict.pop("_id")
        event1_type = event1_dict.pop("type_")
        event1 = IonObject(event1_type, **event1_dict)
        event_repo.put_event(event1)

        events_r = event_repo.find_events(origin=event1_dict["origin"])
        self.assertEquals(len(events_r), 1)
        event1_read = events_r[0][2]

        # Store one event with ID
        event2_dict = events[1].copy()
        event2_id = event2_dict.pop("_id")
        event2_type = event2_dict.pop("type_")
        event2_obj = IonObject(event2_type, **event2_dict)
        event2_obj._id = event2_id
        event_repo.put_event(event2_obj)

        # Store multiple new events with ID set and unset, non-existing
        event1_dict = events[0].copy()
        event1_id = event1_dict.pop("_id")
        event1_type = event1_dict.pop("type_")
        event1_obj = IonObject(event1_type, **event1_dict)
        event1_obj._id = create_unique_event_id()

        event2_dict = events[1].copy()
        event2_id = event2_dict.pop("_id")
        event2_type = event2_dict.pop("type_")
        event2_obj = IonObject(event2_type, **event2_dict)

        event_repo.put_events([event1_obj, event2_obj])
        events_r = event_repo.find_events(event_type='ResourceModifiedEvent')
        self.assertEquals(len(events_r), 2)
Exemple #46
0
    def test_directory(self):
        dsm = DatastoreManager()
        ds = dsm.get_datastore("resources", "DIRECTORY")
        ds.delete_datastore()
        ds.create_datastore()

        self.patch_cfg('pyon.ion.directory.CFG', {'service': {'directory': {'publish_events': False}}})

        directory = Directory(datastore_manager=dsm)
        directory.start()

        #self.addCleanup(directory.dir_store.delete_datastore)

        objs = directory.dir_store.list_objects()

        root = directory.lookup("/DIR")
        self.assert_(root is not None)

        entry = directory.lookup("/temp")
        self.assert_(entry is None)

        entry_old = directory.register("/", "temp")
        self.assertEquals(entry_old, None)

        # Create a node
        entry = directory.lookup("/temp")
        self.assertEquals(entry, {} )

        # The create case
        entry_old = directory.register("/temp", "entry1", foo="awesome")
        self.assertEquals(entry_old, None)
        entry_new = directory.lookup("/temp/entry1")
        self.assertEquals(entry_new, {"foo":"awesome"})

        # The update case
        entry_old = directory.register("/temp", "entry1", foo="ingenious")
        self.assertEquals(entry_old, {"foo": "awesome"})

        # The delete case
        entry_old = directory.unregister("/temp", "entry1")
        self.assertEquals(entry_old, {"foo": "ingenious"})
        entry_new = directory.lookup("/temp/entry1")
        self.assertEquals(entry_new, None)

        directory.register("/BranchA", "X", resource_id="rid1")
        directory.register("/BranchA", "Y", resource_id="rid2")
        directory.register("/BranchA", "Z", resource_id="rid3")
        directory.register("/BranchA/X", "a", resource_id="rid4")
        directory.register("/BranchA/X", "b", resource_id="rid5")
        directory.register("/BranchB", "k", resource_id="rid6")
        directory.register("/BranchB", "l", resource_id="rid7")
        directory.register("/BranchB/k", "m", resource_id="rid7")
        directory.register("/BranchB/k", "X")

        res_list = directory.find_by_value("/", attribute="resource_id", value="rid3")
        self.assertEquals(len(res_list), 1)
        self.assertEquals(res_list[0].org, "ION")
        self.assertEquals(res_list[0].parent, "/BranchA")
        self.assertEquals(res_list[0].key, "Z")

        res_list = directory.find_by_value("/", attribute="resource_id", value="rid34")
        self.assertEquals(len(res_list), 0)

        res_list = directory.find_by_value("/", attribute="resource_id", value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/BranchB", attribute="resource_id", value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/Branch", attribute="resource_id", value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/BranchB/k", attribute="resource_id", value="rid7")
        self.assertEquals(len(res_list), 1)

        res_list = directory.find_child_entries("/BranchB/k/m")
        self.assertEquals(len(res_list), 0)

        res_list = directory.find_child_entries("/BranchB")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_child_entries("/BranchB/k/m", direct_only=False)
        self.assertEquals(len(res_list), 0)

        res_list = directory.find_child_entries("/BranchB", direct_only=False)
        self.assertEquals(len(res_list), 4)

        res_list = directory.find_by_key("X")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_key("X", parent="/BranchB")
        self.assertEquals(len(res_list), 1)

        entry_list = directory.lookup_mult("/BranchA", ["X", "Z"])
        self.assertEquals(len(entry_list), 2)
        self.assertEquals(entry_list[0]["resource_id"], "rid1")
        self.assertEquals(entry_list[1]["resource_id"], "rid3")

        entry_list = directory.lookup_mult("/BranchA", ["Y", "FOO"])
        self.assertEquals(len(entry_list), 2)
        self.assertEquals(entry_list[0]["resource_id"], "rid2")
        self.assertEquals(entry_list[1], None)

        # Test prevent duplicate entries
        directory.register("/some", "dupentry", foo="ingenious")
        de = directory.lookup("/some/dupentry", return_entry=True)
        de1_attrs = de.__dict__.copy()
        del de1_attrs["_id"]
        del de1_attrs["_rev"]
        del de1_attrs["type_"]
        de1 = DirEntry(**de1_attrs)
        with self.assertRaises(BadRequest) as ex:
            de_id1,_ = directory.dir_store.create(de1)
            self.assertTrue(ex.message.startswith("DirEntry already exists"))

        res_list = directory.find_by_key("dupentry", parent="/some")
        self.assertEquals(1, len(res_list))
Exemple #47
0
 def _proc():
     rr_store = DatastoreManager.get_datastore_instance("resources")
     for i in xrange(int(num_read/num_thread)):
         res_obj = rr_store.read(self.res_ids[random.randint(0, len(self.res_ids)-1)])
Exemple #48
0
 def __init__(self):
     from pyon.core.bootstrap import container_instance
     self.container = container_instance
     self.rr = self.container.resource_registry
     self.rr_store = DatastoreManager.get_datastore_instance("resources")
     self.timer = ooi.timer.Timer()
 def bulk_delete(cls, objs):
     for ds_name in objs:
         ds = DatastoreManager.get_datastore_instance(ds_name)
         for obj in objs[ds_name]:
             ds.delete(obj["_id"])
Exemple #50
0
    def test_directory_lock(self):
        dsm = DatastoreManager()
        ds = dsm.get_datastore("resources", "DIRECTORY")
        ds.delete_datastore()
        ds.create_datastore()

        self.patch_cfg('pyon.ion.directory.CFG', {'service': {'directory': {'publish_events': False}}})

        directory = Directory(datastore_manager=dsm)
        directory.start()

        lock1 = directory.acquire_lock("LOCK1", lock_info=dict(process="proc1"))
        self.assertEquals(lock1, True)

        lock2 = directory.acquire_lock("LOCK1", lock_info=dict(process="proc2"))
        self.assertEquals(lock2, False)

        with self.assertRaises(BadRequest):
            directory.acquire_lock("LOCK/SOME")

        with self.assertRaises(BadRequest):
            directory.release_lock("LOCK/SOME")

        with self.assertRaises(NotFound):
            directory.release_lock("LOCK2")

        directory.release_lock("LOCK1")

        lock1 = directory.acquire_lock("LOCK1", lock_info=dict(process="proc3"))
        self.assertEquals(lock1, True)

        # TEST: With lock holders

        lock5 = directory.acquire_lock("LOCK5", lock_holder="proc1")
        self.assertEquals(lock5, True)

        lock5 = directory.acquire_lock("LOCK5", lock_holder="proc1")
        self.assertEquals(lock5, True)

        lock5 = directory.acquire_lock("LOCK5", lock_holder="proc2")
        self.assertEquals(lock5, False)

        directory.release_lock("LOCK5")

        # TEST: Timeout
        lock5 = directory.acquire_lock("LOCK5", lock_holder="proc1", timeout=100)
        self.assertEquals(lock5, True)

        lock5 = directory.acquire_lock("LOCK5", lock_holder="proc2")
        self.assertEquals(lock5, False)

        res = directory.is_locked("LOCK5")
        self.assertEquals(res, True)

        gevent.sleep(0.15)

        res = directory.is_locked("LOCK5")
        self.assertEquals(res, False)

        lock5 = directory.acquire_lock("LOCK5", lock_holder="proc2", timeout=100)
        self.assertEquals(lock5, True)

        gevent.sleep(0.15)

        # TEST: Holder self renew
        lock5 = directory.acquire_lock("LOCK5", lock_holder="proc2", timeout=100)
        self.assertEquals(lock5, True)

        directory.stop()
Exemple #51
0
            try:
                with open(filename, "rb") as csvfile:
                    reader = self._get_csv_reader(csvfile)
                    for row in reader:
                        catfunc(row)
                        row_do += 1
            except IOError, ioe:
                log.warn("UI category file %s error: %s" %
                         (filename, str(ioe)))

            log.info(
                "Loaded UI category %s: %d rows imported, %d rows skipped" %
                (category, row_do, row_skip))

        try:
            ds = DatastoreManager.get_datastore_instance("resources")
            self._finalize_uirefs(ds)
            res = ds.create_mult(self.ui_obj_by_id.values(), allow_ids=True)
            log.info("Loaded %s UI resource objects into resource registry" %
                     (len(res)))
            res = ds.create_mult(self.ui_assocs)
            log.info(
                "Loaded %s UI resource associations into resource registry" %
                (len(res)))
        except Exception as ex:
            log.exception("load error err=%s" % (str(ex)))

    def _add_ui_object(self, refid, obj):
        while refid in self.ui_objs:
            log.warn("Object duplicate id=%s, obj=%s" % (refid, obj))
            refid = refid + "!"
Exemple #52
0
    def test_directory(self):
        dsm = DatastoreManager()
        ds = dsm.get_datastore("resources")
        ds.delete_datastore()
        ds.create_datastore()

        directory = Directory(datastore_manager=dsm)

        #self.addCleanup(directory.dir_store.delete_datastore)

        objs = directory.dir_store.list_objects()
        self.assert_("_design/directory" in objs)

        root = directory.lookup("/DIR")
        self.assert_(root is not None)

        entry = directory.lookup("/temp")
        self.assert_(entry is None)

        entry_old = directory.register("/", "temp")
        self.assertEquals(entry_old, None)

        # Create a node
        entry = directory.lookup("/temp")
        self.assertEquals(entry, {})

        # The create case
        entry_old = directory.register("/temp", "entry1", foo="awesome")
        self.assertEquals(entry_old, None)
        entry_new = directory.lookup("/temp/entry1")
        self.assertEquals(entry_new, {"foo": "awesome"})

        # The update case
        entry_old = directory.register("/temp", "entry1", foo="ingenious")
        self.assertEquals(entry_old, {"foo": "awesome"})

        # The delete case
        entry_old = directory.unregister("/temp", "entry1")
        self.assertEquals(entry_old, {"foo": "ingenious"})
        entry_new = directory.lookup("/temp/entry1")
        self.assertEquals(entry_new, None)

        directory.register("/BranchA", "X", resource_id="rid1")
        directory.register("/BranchA", "Y", resource_id="rid2")
        directory.register("/BranchA", "Z", resource_id="rid3")
        directory.register("/BranchA/X", "a", resource_id="rid4")
        directory.register("/BranchA/X", "b", resource_id="rid5")
        directory.register("/BranchB", "k", resource_id="rid6")
        directory.register("/BranchB", "l", resource_id="rid7")
        directory.register("/BranchB/k", "m", resource_id="rid7")
        directory.register("/BranchB/k", "X")

        res_list = directory.find_by_value("/",
                                           attribute="resource_id",
                                           value="rid3")
        self.assertEquals(len(res_list), 1)
        self.assertEquals(res_list[0].org, "ION")
        self.assertEquals(res_list[0].parent, "/BranchA")
        self.assertEquals(res_list[0].key, "Z")

        res_list = directory.find_by_value("/",
                                           attribute="resource_id",
                                           value="rid34")
        self.assertEquals(len(res_list), 0)

        res_list = directory.find_by_value("/",
                                           attribute="resource_id",
                                           value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/BranchB",
                                           attribute="resource_id",
                                           value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/Branch",
                                           attribute="resource_id",
                                           value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/BranchB/k",
                                           attribute="resource_id",
                                           value="rid7")
        self.assertEquals(len(res_list), 1)

        res_list = directory.find_child_entries("/BranchB/k/m")
        self.assertEquals(len(res_list), 0)

        res_list = directory.find_child_entries("/BranchB")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_child_entries("/BranchB/k/m",
                                                direct_only=False)
        self.assertEquals(len(res_list), 0)

        res_list = directory.find_child_entries("/BranchB", direct_only=False)
        self.assertEquals(len(res_list), 4)

        res_list = directory.find_by_key("X")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_key("X", parent="/BranchB")
        self.assertEquals(len(res_list), 1)

        directory.close()
class UserNotificationService(BaseUserNotificationService):
    """
    A service that provides users with an API for CRUD methods for notifications.
    """

    def on_start(self):

        #---------------------------------------------------------------------------------------------------
        # Get the event Repository
        #---------------------------------------------------------------------------------------------------

        self.event_repo = self.container.instance.event_repository

        self.smtp_client = setting_up_smtp_client()

        self.ION_NOTIFICATION_EMAIL_ADDRESS = '*****@*****.**'

        #---------------------------------------------------------------------------------------------------
        # Create an event processor
        #---------------------------------------------------------------------------------------------------

        self.event_processor = EmailEventProcessor(self.smtp_client)

        #---------------------------------------------------------------------------------------------------
        # load event originators, types, and table
        #---------------------------------------------------------------------------------------------------

        self.event_types = CFG.event.types
        self.event_table = {}

        #---------------------------------------------------------------------------------------------------
        # Get the clients
        #---------------------------------------------------------------------------------------------------

        self.discovery = DiscoveryServiceClient()
        self.process_dispatcher = ProcessDispatcherServiceClient()
        self.datastore_manager = DatastoreManager()

        self.event_publisher = EventPublisher()
        self.scheduler_service = SchedulerService()

    def on_quit(self):

        pass

    def create_notification(self, notification=None, user_id=''):
        """
        Persists the provided NotificationRequest object for the specified Origin id.
        Associate the Notification resource with the user_id string.
        returned id is the internal id by which NotificationRequest will be identified
        in the data store.

        @param notification        NotificationRequest
        @param user_id             str
        @retval notification_id    str
        @throws BadRequest    if object passed has _id or _rev attribute

        """

        if not user_id:
            raise BadRequest("User id not provided.")

        #---------------------------------------------------------------------------------------------------
        # Persist Notification object as a resource if it has already not been persisted
        #---------------------------------------------------------------------------------------------------

        # find all notifications in the system
        notifs, _ = self.clients.resource_registry.find_resources(restype = RT.NotificationRequest)

        # if the notification has already been registered, simply use the old id
        if notification in notifs:
            log.warning("Notification object has already been created in resource registry before for another user. No new id to be generated.")
            notification_id = notification._id
        else:
            # since the notification has not been registered yet, register it and get the id
            notification_id, _ = self.clients.resource_registry.create(notification)

        #-------------------------------------------------------------------------------------------------------------------
        # read the registered notification request object because this has an _id and is more useful
        #-------------------------------------------------------------------------------------------------------------------

        notification = self.clients.resource_registry.read(notification_id)

        #-----------------------------------------------------------------------------------------------------------
        # Create an event processor for user. This sets up callbacks etc.
        # As a side effect this updates the UserInfo object and also the user info and reverse user info dictionaries.
        #-----------------------------------------------------------------------------------------------------------

        user = self.event_processor.add_notification_for_user(notification_request=notification, user_id=user_id)

        #-------------------------------------------------------------------------------------------------------------------
        # Allow the indexes to be updated for ElasticSearch
        # We publish event only after this so that the reload of the user info works by the
        # notification workers work properly
        #-------------------------------------------------------------------------------------------------------------------

        # todo: This is to allow time for the indexes to be created before publishing ReloadUserInfoEvent for notification workers.
        # todo: When things are more refined, it will be nice to have an event generated when the
        # indexes are updated so that a subscriber here when it received that event will publish
        # the reload user info event.
        time.sleep(4)

        #-------------------------------------------------------------------------------------------------------------------
        # Generate an event that can be picked by a notification worker so that it can update its user_info dictionary
        #-------------------------------------------------------------------------------------------------------------------
        log.debug("(create notification) Publishing ReloadUserInfoEvent for notification_id: %s" % notification_id)

        self.event_publisher.publish_event( event_type= "ReloadUserInfoEvent",
                                            origin="UserNotificationService",
                                            description= "A notification has been created.",
                                            notification_id = notification_id)

        return notification_id

    def update_notification(self, notification=None, user_id = ''):
        """Updates the provided NotificationRequest object.  Throws NotFound exception if
        an existing version of NotificationRequest is not found.  Throws Conflict if
        the provided NotificationRequest object is not based on the latest persisted
        version of the object.

        @param notification     NotificationRequest
        @throws BadRequest      if object does not have _id or _rev attribute
        @throws NotFound        object with specified id does not exist
        @throws Conflict        object not based on latest persisted object version
        """

        #-------------------------------------------------------------------------------------------------------------------
        # Get the old notification
        #-------------------------------------------------------------------------------------------------------------------

        old_notification = self.clients.resource_registry.read(notification._id)

        #-------------------------------------------------------------------------------------------------------------------
        # Update the notification
        #-------------------------------------------------------------------------------------------------------------------

        self.clients.resource_registry.update(notification)

        #-------------------------------------------------------------------------------------------------------------------
        # reading up the notification object to make sure we have the newly registered notification request object
        #-------------------------------------------------------------------------------------------------------------------

        notification_id = notification._id
        notification = self.clients.resource_registry.read(notification_id)

        #------------------------------------------------------------------------------------
        # Update the UserInfo object
        #------------------------------------------------------------------------------------

        user = self.update_user_info_object(user_id, notification, old_notification)

        #------------------------------------------------------------------------------------
        # Update the user_info dictionary maintained by UNS
        #------------------------------------------------------------------------------------

        self.update_user_info_dictionary(user, notification, old_notification)

        #-------------------------------------------------------------------------------------------------------------------
        # Generate an event that can be picked by notification workers so that they can update their user_info dictionary
        #-------------------------------------------------------------------------------------------------------------------
        log.info("(update notification) Publishing ReloadUserInfoEvent for updated notification")

        self.event_publisher.publish_event( event_type= "ReloadUserInfoEvent",
                                            origin="UserNotificationService",
                                            description= "A notification has been updated."
                                            )

    def read_notification(self, notification_id=''):
        """Returns the NotificationRequest object for the specified notification id.
        Throws exception if id does not match any persisted NotificationRequest
        objects.

        @param notification_id    str
        @retval notification    NotificationRequest
        @throws NotFound    object with specified id does not exist
        """
        notification = self.clients.resource_registry.read(notification_id)

        return notification

    def delete_notification(self, notification_id=''):
        """For now, permanently deletes NotificationRequest object with the specified
        id. Throws exception if id does not match any persisted NotificationRequest.

        @param notification_id    str
        @throws NotFound    object with specified id does not exist
        """

        #-------------------------------------------------------------------------------------------------------------------
        # Stop the event subscriber for the notification
        #-------------------------------------------------------------------------------------------------------------------
        notification_request = self.clients.resource_registry.read(notification_id)

        self.event_processor.stop_notification_subscriber(notification_request=notification_request)

        #-------------------------------------------------------------------------------------------------------------------
        # delete the notification from the user_info and reverse_user_info dictionaries
        #-------------------------------------------------------------------------------------------------------------------

        self.delete_notification_from_user_info(notification_id)

        #-------------------------------------------------------------------------------------------------------------------
        # delete from the resource registry
        #-------------------------------------------------------------------------------------------------------------------

        self.clients.resource_registry.delete(notification_id)

        #-------------------------------------------------------------------------------------------------------------------
        # Generate an event that can be picked by a notification worker so that it can update its user_info dictionary
        #-------------------------------------------------------------------------------------------------------------------
        log.info("(delete notification) Publishing ReloadUserInfoEvent for notification_id: %s" % notification_id)

        self.event_publisher.publish_event( event_type= "ReloadUserInfoEvent",
                                            origin="UserNotificationService",
                                            description= "A notification has been deleted.",
                                            notification_id = notification_id)

    def delete_notification_from_user_info(self, notification_id):
        '''
        Helper method to delete the notification from the user_info dictionary
        '''

        for user_name, value in self.event_processor.user_info.iteritems():
            for notif in value['notifications']:
                if notification_id == notif._id:
                    # remove the notification
                    value['notifications'].remove(notif)
                    # remove the notification_subscription
                    self.event_processor.user_info[user_name]['notification_subscriptions'].pop(notification_id)

        self.event_processor.reverse_user_info = calculate_reverse_user_info(self.event_processor.user_info)

    def find_events(self, origin='', type='', min_datetime='', max_datetime='', limit=-1, descending=False):
        """Returns a list of events that match the specified search criteria. Will throw a not NotFound exception
        if no events exist for the given parameters.

        @param origin         str
        @param type           str
        @param min_datetime   str
        @param max_datetime   str
        @param limit          int         (integer limiting the number of results (0 means unlimited))
        @param descending     boolean     (if True, reverse order (of production time) is applied, e.g. most recent first)
        @retval event_list    []
        @throws NotFound    object with specified parameters does not exist
        @throws NotFound    object with specified parameters does not exist
        """

        if min_datetime and max_datetime:
            search_time = "SEARCH 'ts_created' VALUES FROM %s TO %s FROM 'events_index'" % (min_datetime, max_datetime)
        else:
            search_time = 'search "ts_created" is "*" from "events_index"'

        if origin:
            search_origin = 'search "origin" is "%s" from "events_index"' % origin
        else:
            search_origin = 'search "origin" is "*" from "events_index"'

        if type:
            search_type = 'search "type_" is "%s" from "events_index"' % type
        else:
            search_type = 'search "type_" is "*" from "events_index"'

        search_string = search_time + ' and ' + search_origin + ' and ' + search_type

        # get the list of ids corresponding to the events
        ret_vals = self.discovery.parse(search_string)
        log.debug("(find_events) Discovery search returned the following event ids: %s" % ret_vals)

        events = []
        for event_id in ret_vals:
            datastore = self.datastore_manager.get_datastore('events')
            event_obj = datastore.read(event_id)
            events.append(event_obj)

        log.debug("(find_events) UNS found the following relevant events: %s" % events)

        if limit > -1:
            list = []
            for i in xrange(limit):
                list.append(events[i])
            return list

        #todo implement time ordering: ascending or descending

        return events

    def publish_event(self, event=None, scheduler_entry= None):
        '''
        Publish a general event at a certain time using the UNS

        @param event Event
        @param scheduler_entry SchedulerEntry This object is created through Scheduler Service
        '''

        log.debug("UNS to publish on schedule the event: %s" % event)

        #--------------------------------------------------------------------------------
        # Set up a subscriber to get the nod from the scheduler to publish the event
        #--------------------------------------------------------------------------------
        def publish(message, headers):
            self.event_publisher._publish_event( event_msg = event,
                                            origin=event.origin,
                                            event_type = event.type_)
            log.info("UNS published an event in response to a nod from the Scheduler Service.")

        event_subscriber = EventSubscriber( event_type = "ResourceEvent", callback=publish)
        event_subscriber.start()

        # Use the scheduler to set up a timer
        self.scheduler_service.create_timer(scheduler_entry)

    def create_worker(self, number_of_workers=1):
        '''
        Creates notification workers

        @param number_of_workers int
        @ret_val pids list

        '''

        pids = []

        for n in xrange(number_of_workers):

            process_definition = ProcessDefinition( name='notification_worker_%s' % n)

            process_definition.executable = {
                'module': 'ion.processes.data.transforms.notification_worker',
                'class':'NotificationWorker'
            }
            process_definition_id = self.process_dispatcher.create_process_definition(process_definition=process_definition)

            # ------------------------------------------------------------------------------------
            # Process Spawning
            # ------------------------------------------------------------------------------------

            pid2 = self.process_dispatcher.create_process(process_definition_id)

            #@todo put in a configuration
            configuration = {}
            configuration['process'] = dict({
                'name': 'notification_worker_%s' % n,
                'type':'simple'
            })

            pid  = self.process_dispatcher.schedule_process(
                process_definition_id,
                configuration = configuration,
                process_id=pid2
            )

            pids.append(pid)

        return pids


    def process_batch(self, start_time = 0, end_time = 10):
        '''
        This method is launched when an process_batch event is received. The user info dictionary maintained
        by the User Notification Service is used to query the event repository for all events for a particular
        user that have occurred in a provided time interval, and then an email is sent to the user containing
        the digest of all the events.
        '''
        for user_name, value in self.event_processor.user_info.iteritems():

            notifications = value['notifications']

            events_for_message = []

            search_time = "SEARCH 'ts_created' VALUES FROM %s TO %s FROM 'events_index'" % (start_time, end_time)

            for notification in notifications:

                if notification.origin:
                    search_origin = 'search "origin" is "%s" from "events_index"' % notification.origin
                else:
                    search_origin = 'search "origin" is "*" from "events_index"'

                if notification.origin_type:
                    search_origin_type= 'search "origin_type" is "%s" from "events_index"' % notification.origin_type
                else:
                    search_origin_type= 'search "origin_type" is "*" from "events_index"'

                if notification.event_type:
                    search_event_type = 'search "type_" is "%s" from "events_index"' % notification.event_type
                else:
                    search_event_type = 'search "type_" is "*" from "events_index"'

                search_string = search_time + ' and ' + search_origin + ' and ' + search_origin_type + ' and ' + search_event_type

                # get the list of ids corresponding to the events
                ret_vals = self.discovery.parse(search_string)

                for event_id in ret_vals:
                    datastore = self.datastore_manager.get_datastore('events')
                    event_obj = datastore.read(event_id)
                    events_for_message.append(event_obj)

            log.debug("Found following events of interest to user, %s: %s" % (user_name, events_for_message))

            # send a notification email to each user using a _send_email() method
            if events_for_message:
                self.format_and_send_email(events_for_message, user_name)

    def format_and_send_email(self, events_for_message, user_name):
        '''
        Format the message for a particular user containing information about the events he is to be notified about
        '''

        message = str(events_for_message)
        log.info("The user, %s, will get the following events in his batch notification email: %s" % (user_name, message))

        msg_body = ''
        count = 1
        for event in events_for_message:
            # build the email from the event content
            msg_body += string.join(("\r\n",
                                     "Event %s: %s" %  (count, event),
                                    "",
                                    "Originator: %s" %  event.origin,
                                    "",
                                    "Description: %s" % event.description ,
                                    "",
                                    "Event time stamp: %s" %  event.ts_created,
                                    "\r\n",
                                    "------------------------"
                                    "\r\n"))
            count += 1

        msg_body += "You received this notification from ION because you asked to be " + \
                    "notified about this event from this source. " + \
                    "To modify or remove notifications about this event, " + \
                    "please access My Notifications Settings in the ION Web UI. " + \
                    "Do not reply to this email.  This email address is not monitored " + \
                    "and the emails will not be read. \r\n "


        log.debug("The email has the following message body: %s" % msg_body)

        msg_subject = "(SysName: " + get_sys_name() + ") ION event "

        self.send_batch_email(  msg_body = msg_body,
                                msg_subject = msg_subject,
                                msg_recipient=self.event_processor.user_info[user_name]['user_contact'].email,
                                smtp_client=self.smtp_client )

    def send_batch_email(self, msg_body, msg_subject, msg_recipient, smtp_client):
        '''
        Send the email
        '''

        msg = MIMEText(msg_body)
        msg['Subject'] = msg_subject
        msg['From'] = self.ION_NOTIFICATION_EMAIL_ADDRESS
        msg['To'] = msg_recipient
        log.debug("EventProcessor.subscription_callback(): sending email to %s"\
        %msg_recipient)

        smtp_sender = CFG.get_safe('server.smtp.sender')

        smtp_client.sendmail(smtp_sender, msg_recipient, msg.as_string())

    def update_user_info_object(self, user_id, new_notification, old_notification):
        '''
        Update the UserInfo object. If the passed in parameter, od_notification, is None, it does not need to remove the old notification
        '''

        #------------------------------------------------------------------------------------
        # read the user
        #------------------------------------------------------------------------------------

        user = self.clients.resource_registry.read(user_id)

        if not user:
            raise BadRequest("No user with the provided user_id: %s" % user_id)

        notifications = []
        for item in user.variables:
            if item['name'] == 'notifications':
                if old_notification and old_notification in item['value']:

                    notifications = item['value']
                    # remove the old notification
                    notifications.remove(old_notification)

                # put in the new notification
                notifications.append(new_notification)

                item['value'] = notifications

                break

        #------------------------------------------------------------------------------------
        # update the resource registry
        #------------------------------------------------------------------------------------

        self.clients.resource_registry.update(user)

        return user

    def update_user_info_dictionary(self, user, new_notification, old_notification):

        #------------------------------------------------------------------------------------
        # Remove the old notifications
        #------------------------------------------------------------------------------------

        if old_notification in self.event_processor.user_info[user.name]['notifications']:

            # remove from notifications list
            self.event_processor.user_info[user.name]['notifications'].remove(old_notification)

            #------------------------------------------------------------------------------------
            # update the notification subscription object
            #------------------------------------------------------------------------------------

            # get the old notification_subscription
            notification_subscription = self.event_processor.user_info[user.name]['notification_subscriptions'].pop(old_notification._id)

            # update that old notification subscription
            notification_subscription._res_obj = new_notification

            # feed the updated notification subscription back into the user info dictionary
            self.event_processor.user_info[user.name]['notification_subscriptions'][old_notification._id] = notification_subscription

        #------------------------------------------------------------------------------------
        # find the already existing notifications for the user
        #------------------------------------------------------------------------------------

        notifications = self.event_processor.user_info[user.name]['notifications']
        notifications.append(new_notification)

        #------------------------------------------------------------------------------------
        # update the user info - contact information, notifications
        #------------------------------------------------------------------------------------

        self.event_processor.user_info[user.name]['user_contact'] = user.contact
        self.event_processor.user_info[user.name]['notifications'] = notifications

        self.event_processor.reverse_user_info = calculate_reverse_user_info(self.event_processor.user_info)
Exemple #54
0
class Container(BaseContainerAgent):
    """
    The Capability Container. Its purpose is to spawn/monitor processes and services
    that do the bulk of the work in the ION system.
    """

    # Singleton static variables
    node        = None
    id          = None
    name        = None
    pidfile     = None
    instance    = None

    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False

        # set id and name (as they are set in base class call)
        self.id = string.replace('%s_%d' % (os.uname()[1], os.getpid()), ".", "_")
        self.name = "cc_agent_%s" % self.id

        Container.instance = self

        # TODO: Bug: Replacing CFG instance not work because references are already public. Update directly
        dict_merge(CFG, kwargs, inplace=True)
        from pyon.core import bootstrap
        bootstrap.container_instance = self
        bootstrap.assert_configuration(CFG)
        log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name())

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # Load object and service registry etc.
        bootstrap_pyon()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._is_started = False
        self._capabilities = []
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        log.debug("Container initialized, OK.")


    def start(self):
        log.debug("Container starting...")
        if self._is_started:
            raise ContainerError("Container already started")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise ContainerError("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            pid_contents = {'messaging': dict(CFG.server.amqp),
                            'container-agent': self.name,
                            'container-xp': bootstrap.get_sys_name() }
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)
            self._capabilities.append("PID_FILE")

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()     # cleanup the pidfile first
                self.quit()             # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)
        self._normal_signal = signal.signal(signal.SIGTERM, handl)

        self._capabilities.append("EXCHANGE_CONNECTION")

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Instantiate Directory and self-register
        self.directory = Directory()
        self.directory.register("/Containers", self.id, cc_agent=self.name)
        self.directory.register("/Containers/%s" % self.id, "Processes")
        self._capabilities.append("DIRECTORY")

        # Event repository
        self.event_repository = EventRepository()
        self.event_pub = EventPublisher()

        self._capabilities.append("EVENT_REPOSITORY")

        # Local resource registry
        self.resource_registry = ResourceRegistry()
        self._capabilities.append("RESOURCE_REGISTRY")

        # Persistent objects
        self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS)

        # State repository
        self.state_repository = StateRepository()
        self._capabilities.append("STATE_REPOSITORY")

        # Start ExchangeManager, which starts the node (broker connection)
        self.node, self.ioloop = self.ex_manager.start()
        self._capabilities.append("EXCHANGE_MANAGER")

        self.proc_manager.start()
        self._capabilities.append("PROC_MANAGER")

        self.app_manager.start()
        self._capabilities.append("APP_MANAGER")

        self.governance_controller.start()
        self._capabilities.append("GOVERNANCE_CONTROLLER")

        if CFG.container.get('sflow', {}).get('enabled', False):
            self.sflow_manager.start()
            self._capabilities.append("SFLOW_MANAGER")

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node, from_name=self.name, service=self, process=self)

        # Start an ION process with the right kind of endpoint factory
        proc = self.proc_manager.proc_sup.spawn((CFG.cc.proctype or 'green', None), listener=rsvc)
        self.proc_manager.proc_sup.ensure_ready(proc)
        self._capabilities.append("CONTAINER_AGENT")

        self.event_pub.publish_event(event_type="ContainerLifecycleEvent",
                                     origin=self.id, origin_type="CapabilityContainer",
                                     sub_type="START",
                                     state=ContainerStateEnum.START)

        self._is_started    = True
        self._status        = "RUNNING"

        log.info("Container started, OK.")

    @contextmanager
    def _push_status(self, new_status):
        """
        Temporarily sets the internal status flag.
        Use this as a decorator or in a with-statement before calling a temporary status changing
        method, like start_rel_from_url.
        """
        curstatus = self._status
        self._status = new_status
        try:
            yield
        finally:
            self._status = curstatus

    def serve_forever(self):
        """ Run the container until killed. """
        log.debug("In Container.serve_forever")
        
        if not self.proc_manager.proc_sup.running:
            self.start()

        # serve forever short-circuits if immediate is on and children len is ok
        num_procs = len(self.proc_manager.proc_sup.children)
        immediate = CFG.system.get('immediate', False)
        if not (immediate and num_procs == 1):  # only spawned greenlet is the CC-Agent

            # print a warning just in case
            if immediate and num_procs != 1:
                log.warn("CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs)

            try:
                # This just waits in this Greenlet for all child processes to complete,
                # which is triggered somewhere else.
                self.proc_manager.proc_sup.join_children()
            except (KeyboardInterrupt, SystemExit) as ex:
                log.info('Received a kill signal, shutting down the container.')
                watch_parent = CFG.system.get('watch_parent', None)
                if watch_parent:
                    watch_parent.kill()
            except:
                log.exception('Unhandled error! Forcing container shutdown')
        else:
            log.debug("Container.serve_forever short-circuiting due to CFG.system.immediate")

        self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown)

    def status(self):
        """
        Returns the internal status.
        """
        return self._status
            
    def _cleanup_pid(self):
        if self.pidfile:
            log.debug("Cleanup pidfile: %s", self.pidfile)
            try:
                os.remove(self.pidfile)
            except Exception, e:
                log.warn("Pidfile could not be deleted: %s" % str(e))
            self.pidfile = None
Exemple #55
0
 def _proc():
     rr_store = DatastoreManager.get_datastore_instance("resources")
     for i in xrange(int(num_read / num_thread)):
         res_obj = rr_store.read(self.res_ids[random.randint(
             0,
             len(self.res_ids) - 1)])
Exemple #56
0
    def test_directory(self):
        dsm = DatastoreManager()
        ds = dsm.get_datastore("resources", "DIRECTORY")
        ds.delete_datastore()
        ds.create_datastore()

        directory = Directory(datastore_manager=dsm)
        directory.start()

        #self.addCleanup(directory.dir_store.delete_datastore)

        objs = directory.dir_store.list_objects()
        if CFG.get_safe("container.datastore.default_server", "couchdb").startswith("couch"):
            self.assert_("_design/directory" in objs)

        root = directory.lookup("/DIR")
        self.assert_(root is not None)

        entry = directory.lookup("/temp")
        self.assert_(entry is None)

        entry_old = directory.register("/","temp")
        self.assertEquals(entry_old, None)

        # Create a node
        entry = directory.lookup("/temp")
        self.assertEquals(entry, {} )

        # The create case
        entry_old = directory.register("/temp", "entry1", foo="awesome")
        self.assertEquals(entry_old, None)
        entry_new = directory.lookup("/temp/entry1")
        self.assertEquals(entry_new, {"foo":"awesome"})

        # The update case
        entry_old = directory.register("/temp", "entry1", foo="ingenious")
        self.assertEquals(entry_old, {"foo":"awesome"})

        # The delete case
        entry_old = directory.unregister("/temp", "entry1")
        self.assertEquals(entry_old, {"foo":"ingenious"})
        entry_new = directory.lookup("/temp/entry1")
        self.assertEquals(entry_new, None)

        directory.register("/BranchA", "X", resource_id="rid1")
        directory.register("/BranchA", "Y", resource_id="rid2")
        directory.register("/BranchA", "Z", resource_id="rid3")
        directory.register("/BranchA/X", "a", resource_id="rid4")
        directory.register("/BranchA/X", "b", resource_id="rid5")
        directory.register("/BranchB", "k", resource_id="rid6")
        directory.register("/BranchB", "l", resource_id="rid7")
        directory.register("/BranchB/k", "m", resource_id="rid7")
        directory.register("/BranchB/k", "X")

        res_list = directory.find_by_value("/", attribute="resource_id", value="rid3")
        self.assertEquals(len(res_list), 1)
        self.assertEquals(res_list[0].org, "ION")
        self.assertEquals(res_list[0].parent, "/BranchA")
        self.assertEquals(res_list[0].key, "Z")

        res_list = directory.find_by_value("/", attribute="resource_id", value="rid34")
        self.assertEquals(len(res_list), 0)

        res_list = directory.find_by_value("/", attribute="resource_id", value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/BranchB", attribute="resource_id", value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/Branch", attribute="resource_id", value="rid7")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_value("/BranchB/k", attribute="resource_id", value="rid7")
        self.assertEquals(len(res_list), 1)

        res_list = directory.find_child_entries("/BranchB/k/m")
        self.assertEquals(len(res_list), 0)

        res_list = directory.find_child_entries("/BranchB")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_child_entries("/BranchB/k/m", direct_only=False)
        self.assertEquals(len(res_list), 0)

        res_list = directory.find_child_entries("/BranchB", direct_only=False)
        self.assertEquals(len(res_list), 4)

        res_list = directory.find_by_key("X")
        self.assertEquals(len(res_list), 2)

        res_list = directory.find_by_key("X", parent="/BranchB")
        self.assertEquals(len(res_list), 1)

        # Test _cleanup_outdated_entries
        directory.register("/some", "dupentry", foo="ingenious")
        de = directory.lookup("/some/dupentry", return_entry=True)
        de1_attrs = de.__dict__.copy()
        del de1_attrs["_id"]
        del de1_attrs["_rev"]
        del de1_attrs["type_"]
        de1 = DirEntry(**de1_attrs)
        de_id1,_ = directory.dir_store.create(de1)

        res_list = directory.find_by_key("dupentry", parent="/some")
        self.assertEquals(2, len(res_list))

        de = directory.lookup("/some/dupentry", return_entry=True)
        res_list = directory.find_by_key("dupentry", parent="/some")
        self.assertEquals(1, len(res_list))

        de1_attrs = de.__dict__.copy()
        del de1_attrs["_id"]
        del de1_attrs["_rev"]
        del de1_attrs["type_"]
        de1_attrs["ts_updated"] = str(int(de1_attrs["ts_updated"]) + 10)
        de1_attrs["attributes"]["unique"] = "NEW"
        de1 = DirEntry(**de1_attrs)
        de_id1,_ = directory.dir_store.create(de1)

        res_list = directory.find_by_key("dupentry", parent="/some")
        self.assertEquals(2, len(res_list))

        de = directory.lookup("/some/dupentry", return_entry=True)
        res_list = directory.find_by_key("dupentry", parent="/some")
        self.assertEquals(1, len(res_list))
        self.assertEquals("NEW", res_list[0].attributes["unique"])


        directory.stop()