Beispiel #1
0
        def set_ports(pnode):
            platform_id = pnode.platform_id
            port_infos = rsn_oms.port.get_platform_ports(platform_id)
            if not isinstance(port_infos, dict):
                raise PlatformDriverException(
                    "%r: get_platform_ports response is not a dict: %s" % (
                    platform_id, port_infos))

            if log.isEnabledFor(logging.TRACE):
                log.trace("%r: port_infos: %s", platform_id, port_infos)

            if not platform_id in port_infos:
                raise PlatformDriverException(
                    "%r: get_platform_ports response does not include "
                    "platform_id: %s" % (platform_id, port_infos))

            ports = port_infos[platform_id]

            if not isinstance(ports, dict):
                raise PlatformDriverException(
                    "%r: get_platform_ports: entry for platform_id is "
                    "not a dict: %s" % (platform_id, ports))

            for port_id, dic in ports.iteritems():
                port = PortNode(port_id)
                port.set_state(dic['state'])
                pnode.add_port(port)

                # add connected instruments:
                instrs_res = rsn_oms.instr.get_connected_instruments(platform_id, port_id)
                if not isinstance(instrs_res, dict):
                    log.warn("%r: port_id=%r: get_connected_instruments "
                             "response is not a dict: %s" % (platform_id, port_id, instrs_res))
                    continue

                if log.isEnabledFor(logging.TRACE):
                    log.trace("%r: port_id=%r: get_connected_instruments "
                              "returned: %s" % (platform_id, port_id, instrs_res))

                if not platform_id in instrs_res:
                    raise PlatformDriverException(
                        "%r: port_id=%r: get_connected_instruments response"
                        "does not have entry for platform_id: %s" % (
                        platform_id, ports))

                if not port_id in instrs_res[platform_id]:
                    raise PlatformDriverException(
                        "%r: port_id=%r: get_connected_instruments response "
                        "for platform_id does not have entry for port_id: %s" % (
                        platform_id, port_id, instrs_res[platform_id]))

                instr = instrs_res[platform_id][port_id]
                for instrument_id, attrs in instr.iteritems():
                    port.add_instrument(InstrumentNode(instrument_id, attrs))
Beispiel #2
0
    def test_logging_handler(self):
        """ initial log level for ion.processes.event is INFO -- test we can change it to TRACE """
        config.replace_configuration('pyon/container/test/logging.yml')
        log.debug('this should probably not be logged')

        self.assertFalse(log.isEnabledFor(TRACE))
        #
        handler = pyon.container.management.LogLevelHandler()
        action = IonObject(OT.ChangeLogLevel, logger='pyon.container', level='TRACE')
        handler.handle_request(action)
        #
        self.assertTrue(log.isEnabledFor(TRACE))
Beispiel #3
0
    def test_logging_clear(self):
        """ initial log level for ion.processes.event is INFO -- test that we can clear it
            (root level WARN should apply)
        """
        config.replace_configuration('pyon/container/test/logging.yml')
        log.debug('this should probably not be logged')

        self.assertTrue(log.isEnabledFor(logging.INFO), msg=repr(log.__dict__))
        #
        handler = pyon.container.management.LogLevelHandler()
        action = IonObject(OT.ChangeLogLevel, logger='pyon.container', level='NOTSET')
        handler.handle_request(action)
        #
        self.assertFalse(log.isEnabledFor(logging.INFO))
    def _start_diagnostics_subscriber(self):  # pragma: no cover
        """
        For debugging/diagnostics purposes.
        Registers a subscriber to DeviceStatusEvent events with origin="command_line"
        and sub_type="diagnoser" to log the current statuses via log.info.
        This method does nothing if the logging level is not enabled for INFO
        for this module.

        From the pycc command line, the event can be sent as indicated in
        publish_event_for_diagnostics().

        """
        # TODO perhaps a more visible/official command for diagnostic purposes,
        # and for resource agents in general should be considered, something
        # like RESOURCE_AGENT_EVENT_REPORT_DIAGNOSTICS.

        if not log.isEnabledFor(logging.INFO):
            return

        event_type  = "DeviceStatusEvent"
        origin      = "command_line"
        sub_type    = "diagnoser"

        def got_event(evt, *args, **kwargs):
            if not self._active:
                log.warn("%r: got_event called but manager has been destroyed",
                         self._platform_id)
                return

            if evt.type_ != event_type:
                log.trace("%r: ignoring event type %r. Only handle %r directly",
                          self._platform_id, evt.type_, event_type)
                return

            if evt.sub_type != sub_type:
                log.trace("%r: ignoring event sub_type %r. Only handle %r",
                          self._platform_id, evt.sub_type, sub_type)
                return

            state = self._agent.get_agent_state()

            statuses = formatted_statuses(self.aparam_aggstatus,
                                          self.aparam_child_agg_status,
                                          self.aparam_rollup_status)

            invalidated_children = self._agent._get_invalidated_children()

            log.info("%r/%s: (%s) status report triggered by diagnostic event:\n"
                     "%s\n"
                     "%40s : %s\n",
                     self._platform_id, state, self.resource_id, statuses,
                     "invalidated_children", invalidated_children)

        self._diag_sub = self._agent._create_event_subscriber(event_type=event_type,
                                                              origin=origin,
                                                              sub_type=sub_type,
                                                              callback=got_event)
        log.info("%r: registered diagnostics event subscriber", self._platform_id)
    def setUp(self):

        DVR_CONFIG['oms_uri'] = self._dispatch_simulator(oms_uri)
        log.debug("DVR_CONFIG['oms_uri'] = %s", DVR_CONFIG['oms_uri'])

        # Use the network definition provided by RSN OMS directly.
        rsn_oms = CIOMSClientFactory.create_instance(DVR_CONFIG['oms_uri'])
        network_definition = RsnOmsUtil.build_network_definition(rsn_oms)
        CIOMSClientFactory.destroy_instance(rsn_oms)

        if log.isEnabledFor(logging.DEBUG):
            network_definition_ser = NetworkUtil.serialize_network_definition(network_definition)
            log.debug("NetworkDefinition serialization:\n%s", network_definition_ser)

        platform_id = self.PLATFORM_ID
        pnode = network_definition.pnodes[platform_id]
        self._plat_driver = RSNPlatformDriver(pnode, self.evt_recv)
    def _extract_id_and_type(self, id_or_obj):
        """
        figure out whether a subject/object is an IonObject or just an ID
        """
        if hasattr(id_or_obj, "_id"):
            log.debug("find_object for IonObject")
            the_id = id_or_obj._id
            the_type = type(id_or_obj).__name__
        else:
            the_id = id_or_obj
            the_type = "(Unspecified IonObject)"
            if log.isEnabledFor(logging.DEBUG):
                try:
                    the_obj = self.RR.read(the_id)
                    the_type = type(the_obj).__name__
                except:
                    pass


        return the_id, the_type
Beispiel #7
0
        def set_attributes(pnode):
            platform_id = pnode.platform_id
            attr_infos = rsn_oms.attr.get_platform_attributes(platform_id)
            if not isinstance(attr_infos, dict):
                raise PlatformDriverException(
                    "%r: get_platform_attributes returned: %s" % (
                    platform_id, attr_infos))

            if log.isEnabledFor(logging.TRACE):
                log.trace("%r: attr_infos: %s", platform_id, attr_infos)

            if not platform_id in attr_infos:
                raise PlatformDriverException(
                    "%r: get_platform_attributes response does not "
                    "include entry for platform_id: %s" %(
                    platform_id, attr_infos))

            ret_infos = attr_infos[platform_id]
            for attrName, attr_defn in ret_infos.iteritems():
                attr = AttrNode(attrName, attr_defn)
                pnode.add_attribute(attr)
    def generate_config(self):
        """
        create the generic parts of the configuration including resource_id, egg_uri, and org
        """
        if self.generated_config:
            log.warn(
                "Generating config again for the same Instance object (%s)",
                self.agent_instance_obj.name)

        self._check_associations()

        agent_config = self._generate_skeleton_config_block()

        device_obj = self._get_device()
        agent_obj = self._get_agent()

        log.debug("complement agent_config with resource_id")
        if 'agent' not in agent_config:
            agent_config['agent'] = {'resource_id': device_obj._id}
        elif 'resource_id' not in agent_config.get('agent'):
            agent_config['agent']['resource_id'] = device_obj._id

        log.debug("add egg URI if available")
        if agent_obj.driver_uri:
            agent_config['driver_config']['process_type'] = (
                DriverProcessType.EGG, )
            agent_config['driver_config']['dvr_egg'] = agent_obj.driver_uri
        else:
            agent_config['driver_config']['process_type'] = (
                DriverProcessType.PYTHON_MODULE, )

        if log.isEnabledFor(logging.INFO):
            tree = self._summarize_children(agent_config)
            log.info("Children of %s are %s", self.agent_instance_obj.name,
                     tree)

        self.generated_config = True

        return agent_config
    def generate_config(self):
        """
        create the generic parts of the configuration including resource_id, egg_uri, and org
        """
        if self.generated_config:
            log.warn("Generating config again for the same Instance object (%s)", self.agent_instance_obj.name)

        self._check_associations()

        agent_config = self._generate_skeleton_config_block()

        device_obj = self._get_device()
        agent_obj  = self._get_agent()

        log.debug("complement agent_config with resource_id")
        if 'agent' not in agent_config:
            agent_config['agent'] = {'resource_id': device_obj._id}
        elif 'resource_id' not in agent_config.get('agent'):
            agent_config['agent']['resource_id'] = device_obj._id


        log.debug("add egg URI if available")
        if agent_obj.driver_uri:
            agent_config['driver_config']['process_type'] = (DriverProcessType.EGG,)
            agent_config['driver_config']['dvr_egg'] = agent_obj.driver_uri
        else:
            agent_config['driver_config']['process_type'] = (DriverProcessType.PYTHON_MODULE,)


        if log.isEnabledFor(logging.INFO):
            tree = self._summarize_children(agent_config)
            log.info("Children of %s are %s", self.agent_instance_obj.name, tree)

        self.generated_config = True

        return agent_config
Beispiel #10
0
    def build_network_definition(rsn_oms):
        """
        Creates and returns a NetworkDefinition object reflecting the platform
        network definition reported by the RSN OMS Client object.
        The returned object will have as root the PlatformNode corresponding to the
        actual root of the whole newtork. You can use the `pnodes` property to
        access any node.

        @param rsn_oms RSN OMS Client object.
        @return NetworkDefinition object
        """
        if log.isEnabledFor(logging.DEBUG):
            log.debug("build_network_definition. rsn_oms class: %s",
                      rsn_oms.__class__.__name__)

        # platform types:
        platform_types = rsn_oms.config.get_platform_types()
        if log.isEnabledFor(logging.DEBUG):
            log.debug("got platform_types %s", str(platform_types))

        # platform map:
        map = rsn_oms.config.get_platform_map()
        if log.isEnabledFor(logging.DEBUG):
            log.debug("got platform map %s", str(map))

        # build topology:
        pnodes = NetworkUtil.create_node_network(map)
        dummy_root = pnodes['']
        root_pnode = pnodes[dummy_root.subplatforms.keys()[0]]
        if log.isEnabledFor(logging.DEBUG):
            log.debug("topology's root platform_id=%r", root_pnode.platform_id)

        # now, populate the attributes and ports for the platforms

        def build_attributes_and_ports(pnode):
            """
            Recursive routine to call set_attributes and set_ports on each pnode.
            """
            set_attributes(pnode)
            set_ports(pnode)

            for sub_platform_id, sub_pnode in pnode.subplatforms.iteritems():
                build_attributes_and_ports(sub_pnode)

        def set_attributes(pnode):
            platform_id = pnode.platform_id
            attr_infos = rsn_oms.attr.get_platform_attributes(platform_id)
            if not isinstance(attr_infos, dict):
                raise PlatformDriverException(
                    "%r: get_platform_attributes returned: %s" % (
                    platform_id, attr_infos))

            if log.isEnabledFor(logging.TRACE):
                log.trace("%r: attr_infos: %s", platform_id, attr_infos)

            if not platform_id in attr_infos:
                raise PlatformDriverException(
                    "%r: get_platform_attributes response does not "
                    "include entry for platform_id: %s" %(
                    platform_id, attr_infos))

            ret_infos = attr_infos[platform_id]
            for attrName, attr_defn in ret_infos.iteritems():
                attr = AttrNode(attrName, attr_defn)
                pnode.add_attribute(attr)

        def set_ports(pnode):
            platform_id = pnode.platform_id
            port_infos = rsn_oms.port.get_platform_ports(platform_id)
            if not isinstance(port_infos, dict):
                raise PlatformDriverException(
                    "%r: get_platform_ports response is not a dict: %s" % (
                    platform_id, port_infos))

            if log.isEnabledFor(logging.TRACE):
                log.trace("%r: port_infos: %s", platform_id, port_infos)

            if not platform_id in port_infos:
                raise PlatformDriverException(
                    "%r: get_platform_ports response does not include "
                    "platform_id: %s" % (platform_id, port_infos))

            ports = port_infos[platform_id]

            if not isinstance(ports, dict):
                raise PlatformDriverException(
                    "%r: get_platform_ports: entry for platform_id is "
                    "not a dict: %s" % (platform_id, ports))

            for port_id, dic in ports.iteritems():
                port = PortNode(port_id)
                port.set_state(dic['state'])
                pnode.add_port(port)

                # add connected instruments:
                instrs_res = rsn_oms.instr.get_connected_instruments(platform_id, port_id)
                if not isinstance(instrs_res, dict):
                    log.warn("%r: port_id=%r: get_connected_instruments "
                             "response is not a dict: %s" % (platform_id, port_id, instrs_res))
                    continue

                if log.isEnabledFor(logging.TRACE):
                    log.trace("%r: port_id=%r: get_connected_instruments "
                              "returned: %s" % (platform_id, port_id, instrs_res))

                if not platform_id in instrs_res:
                    raise PlatformDriverException(
                        "%r: port_id=%r: get_connected_instruments response"
                        "does not have entry for platform_id: %s" % (
                        platform_id, ports))

                if not port_id in instrs_res[platform_id]:
                    raise PlatformDriverException(
                        "%r: port_id=%r: get_connected_instruments response "
                        "for platform_id does not have entry for port_id: %s" % (
                        platform_id, port_id, instrs_res[platform_id]))

                instr = instrs_res[platform_id][port_id]
                for instrument_id, attrs in instr.iteritems():
                    port.add_instrument(InstrumentNode(instrument_id, attrs))

        # call the recursive routine
        build_attributes_and_ports(root_pnode)

        # we got our whole network including platform attributes and ports.

        # and finally create and return NetworkDefinition:
        ndef = NetworkDefinition()
        ndef._platform_types = platform_types
        ndef._pnodes = pnodes
        ndef._dummy_root = dummy_root
        return ndef
Beispiel #11
0
    def _start_diagnostics_subscriber(self):  # pragma: no cover
        """
        For debugging/diagnostics purposes.
        Registers a subscriber to DeviceStatusEvent events with origin="command_line"
        and sub_type="diagnoser" to log the current statuses via log.info.
        This method does nothing if the logging level is not enabled for INFO
        for this module.

        From the pycc command line, the event can be sent as indicated in
        publish_event_for_diagnostics().

        """
        # TODO perhaps a more visible/official command for diagnostic purposes,
        # and for resource agents in general should be considered, something
        # like RESOURCE_AGENT_EVENT_REPORT_DIAGNOSTICS.

        if not log.isEnabledFor(logging.INFO):
            return

        event_type = "DeviceStatusEvent"
        origin = "command_line"
        sub_type = "diagnoser"

        def got_event(evt, *args, **kwargs):
            if not self._active:
                log.warn("%r: got_event called but manager has been destroyed",
                         self._platform_id)
                return

            if evt.type_ != event_type:
                log.trace(
                    "%r: ignoring event type %r. Only handle %r directly",
                    self._platform_id, evt.type_, event_type)
                return

            if evt.sub_type != sub_type:
                log.trace("%r: ignoring event sub_type %r. Only handle %r",
                          self._platform_id, evt.sub_type, sub_type)
                return

            state = self._agent.get_agent_state()

            statuses = formatted_statuses(self.aparam_aggstatus,
                                          self.aparam_child_agg_status,
                                          self.aparam_rollup_status)

            invalidated_children = self._agent._get_invalidated_children()

            log.info(
                "%r/%s: (%s) status report triggered by diagnostic event:\n"
                "%s\n"
                "%40s : %s\n", self._platform_id, state, self.resource_id,
                statuses, "invalidated_children", invalidated_children)

        self._diag_sub = self._agent._create_event_subscriber(
            event_type=event_type,
            origin=origin,
            sub_type=sub_type,
            callback=got_event)
        log.info("%r: registered diagnostics event subscriber",
                 self._platform_id)
Beispiel #12
0
    def _got_device_aggregate_status_event(self, evt, *args, **kwargs):
        """
        Reacts to a DeviceAggregateStatusEvent from a platform's child.

        - notifies platform that child is running in case of any needed revalidation
        - updates the local image of the child status for the corresponding status name
        - updates the rollup status for that status name
        - if this rollup status changes, then a subsequent DeviceAggregateStatusEvent
          is published.

        The consolidation operation is taken from observatory_util.py.

        @param evt    DeviceAggregateStatusEvent from child.
        """

        with self._lock:
            if not self._active:
                log.warn(
                    "%r: _got_device_aggregate_status_event called but "
                    "manager has been destroyed", self._platform_id)
                return

        log.debug("%r: _got_device_aggregate_status_event: %s",
                  self._platform_id, evt)

        if evt.type_ != "DeviceAggregateStatusEvent":
            # should not happen.
            msg = "%r: Got event for different event_type=%r but subscribed to %r" % (
                self._platform_id, evt.type_, "DeviceAggregateStatusEvent")
            log.error(msg)
            raise PlatformException(msg)

        if evt.origin not in self.aparam_child_agg_status:
            # should not happen.
            msg = "%r: got event from unrecognized origin=%s" % (
                self._platform_id, evt.origin)
            log.error(msg)
            raise PlatformException(msg)

        status_name = evt.status_name
        child_origin = evt.origin
        child_status = evt.status

        # tell platform this child is running in case of any needed revalidation:
        self._agent._child_running(child_origin)

        with self._lock:
            old_status = self.aparam_child_agg_status[child_origin][
                status_name]
            if child_status == old_status:
                #
                # My image of the child status is not changing, so nothing to do:
                #
                return

            # update the specific status
            self.aparam_child_agg_status[child_origin][
                status_name] = child_status

            # TODO any need to pass child's alerts_list in the next call? See OOIION-1275
            new_rollup_status = self._update_rollup_status_and_publish(
                status_name, child_origin)

        if new_rollup_status and log.isEnabledFor(
                logging.TRACE):  # pragma: no cover
            self._log_agg_status_update(log.trace, evt, new_rollup_status)
Beispiel #13
0
        def freeze():

            # get the full association list
            master_assn_list = reduce(collect, predicate_list, [])

            if log.isEnabledFor(logging.TRACE):
                summary = {}
                for a in master_assn_list:
                    label = "%s %s %s" % (a.st, a.p, a.ot)
                    if not label in summary: summary[label] = 0
                    summary[label] += 1

                log.trace("master assn list is %s", ["%s x%d" % (k, v) for k, v in summary.iteritems()])

            def get_related_resources_partial_fn(predicate_dictionary, resource_whitelist):
                """
                This function generates a resource crawler from 2 data structures representing desired crawl behavior

                The predicate dictionary is keyed on a predicate type, whose value is a 2-tuple of booleans
                  the first boolean is whether to crawl subject-object, the second boolean for object-subject
                  For example: dict([(PRED.hasModel, (False, True)]) would generate a crawler that could find
                               platforms or instruments with a given model

                The resource whitelist is a list of resource types that will be crawled.

                The return value of this function is a function that accepts a resource id and returns a list
                  of associations related (based on crawl behavior)
                """
                log.trace("get_related_resources_partial_fn predicate_dict=%s rsrc_whitelist=%s",
                          predicate_dictionary,
                          resource_whitelist)

                # assertions on data types
                assert type({}) == type(predicate_dictionary)
                for v in predicate_dictionary.values():
                    assert type((True, True)) == type(v)
                assert type([]) == type(resource_whitelist)


                def lookup_fn(resource_id):
                    """
                    return a dict of related resources as dictated by the pred dict and whitelist
                     - the key is the next resource id to crawl
                     - the value is the entire association
                    """
                    retval = {}

                    for a in master_assn_list:
                        search_sto, search_ots = predicate_dictionary[a.p]

                        if search_sto and a.s == resource_id and a.ot in resource_whitelist:
                            log.trace("lookup_fn matched %s object", a.ot)
                            retval[a.o] = a
                        elif search_ots and a.o == resource_id and a.st in resource_whitelist:
                            log.trace("lookup_fn matched %s subject", a.st)
                            retval[a.s] = a

                    return retval


                def get_related_resources_h(accum, input_resource_id, recursion_limit):
                    """
                    This is a recursive helper function that does the work of crawling for related resources

                    The accum is a tuple: (set of associations that are deemed "Related", set of "seen" resources)

                    The input resource id is the current resource being crawled

                    The recursion limit decrements with each recursive call, ending at 0.  So -1 for infinity.

                    The return value is a list of associations
                    """
                    if 0 == recursion_limit:
                        return accum

                    if -1000 > recursion_limit:
                        log.warn("Terminating related resource recursion, hit -1000")
                        return accum

                    acc, seen = accum

                    matches = lookup_fn(input_resource_id)
                    log.trace("get_related_resources_h got matches %s",
                              [dict((k, "%s %s %s" % (a.st, a.p, a.ot)) for k, a in matches.iteritems())])

                    unseen = set(matches.keys()) - seen
                    seen.add(input_resource_id)
                    acc  = acc  | set(matches.values())

                    #if log.isEnabledFor(logging.TRACE):
                    #    summary = {}
                    #    for a in acc:
                    #        label = "%s %s %s" % (a.st, a.p, a.ot)
                    #        if not label in summary: summary[label] = 0
                    #        summary[label] += 1
                    #    log.trace("acc2 is now %s", ["%s x%d" % (k, v) for k, v in summary.iteritems()])

                    def looper(acc2, input_rsrc_id):
                        return get_related_resources_h(acc2, input_rsrc_id, recursion_limit - 1)

                    h_ret = reduce(looper, unseen, (acc, seen))
                    #h_ret = reduce(looper, unseen, (acc, seen))
                    #(h_ret_acc, h_ret_seen) = h_ret
                    #log.trace("h_ret is %s", ["%s %s %s" % (a.st, a.p, a.ot) for a in h_ret_acc])
                    return h_ret


                def get_related_resources_fn(input_resource_id, recursion_limit=1024):
                    """
                    This is the function that finds related resources.

                    input_resource_id and recursion_limit are self explanatory

                    The return value is a list of associations.
                    """
                    retval, _ = get_related_resources_h((set([]), set([])), input_resource_id, recursion_limit)
                    log.trace("final_ret is %s", ["%s %s %s" % (a.st, a.p, a.ot) for a in retval])
                    return list(retval)

                return get_related_resources_fn # retval of get_related_resources_partial_fn

            return get_related_resources_partial_fn # retval of freeze()
Beispiel #14
0
 def on_quit(self):
     if log.isEnabledFor(logging.DEBUG):
         import traceback
         log.debug('on_quit called from:\n' + '\n'.join(['%s:%d %s'%(f,l,c) for f,l,m,c in traceback.extract_stack()]) )
     if self.keep_running:
         self.stop_agent()
    def _got_device_aggregate_status_event(self, evt, *args, **kwargs):
        """
        Reacts to a DeviceAggregateStatusEvent from a platform's child.

        - notifies platform that child is running in case of any needed revalidation
        - updates the local image of the child status for the corresponding status name
        - updates the rollup status for that status name
        - if this rollup status changes, then a subsequent DeviceAggregateStatusEvent
          is published.

        The consolidation operation is taken from observatory_util.py.

        @param evt    DeviceAggregateStatusEvent from child.
        """

        with self._lock:
            if not self._active:
                log.warn("%r: _got_device_aggregate_status_event called but "
                         "manager has been destroyed",
                         self._platform_id)
                return

        log.debug("%r: _got_device_aggregate_status_event: %s",
                  self._platform_id, evt)

        if evt.type_ != "DeviceAggregateStatusEvent":
            # should not happen.
            msg = "%r: Got event for different event_type=%r but subscribed to %r" % (
                self._platform_id, evt.type_, "DeviceAggregateStatusEvent")
            log.error(msg)
            raise PlatformException(msg)

        if evt.origin not in self.aparam_child_agg_status:
            # should not happen.
            msg = "%r: got event from unrecognized origin=%s" % (
                  self._platform_id, evt.origin)
            log.error(msg)
            raise PlatformException(msg)

        status_name = evt.status_name
        child_origin = evt.origin
        child_status = evt.status

        # tell platform this child is running in case of any needed revalidation:
        self._agent._child_running(child_origin)

        with self._lock:
            old_status = self.aparam_child_agg_status[child_origin][status_name]
            if child_status == old_status:
                #
                # My image of the child status is not changing, so nothing to do:
                #
                return

            # update the specific status
            self.aparam_child_agg_status[child_origin][status_name] = child_status

            # TODO any need to pass child's alerts_list in the next call? See OOIION-1275
            new_rollup_status = self._update_rollup_status_and_publish(status_name, child_origin)

        if new_rollup_status and log.isEnabledFor(logging.TRACE):  # pragma: no cover
            self._log_agg_status_update(log.trace, evt, new_rollup_status)
Beispiel #16
0
def describe_deployments(deployments, context, instruments=None, instrument_status=None):
    """

    @param deployments  list of Deployment resource objects
    @param context  object to get the resource_registry from (e.g. container)
    @param instruments  list of InstrumentDevice resource objects
    @param instrument_status  coindexed list of status for InstrumentDevice to be added to respective Deployment
    @retval list with Deployment info dicts coindexed with argument deployments list
    """
    instruments = instruments or []
    instrument_status = instrument_status or []
    if not deployments:
        return []
    rr = context.resource_registry
    deployment_ids = [d._id for d in deployments]
    descriptions = {}
    for d in deployments:
        descriptions[d._id] = {'is_primary': False}
        # add start, end time
        time_constraint = None
        for constraint in d.constraint_list:
            if constraint.type_ == OT.TemporalBounds:
                if time_constraint:
                    log.warn('deployment %s has more than one time constraint (using first)', d.name)
                else:
                    time_constraint = constraint
        if time_constraint:
            descriptions[d._id]['start_time'] = time.strftime(TIME_FORMAT, time.gmtime(
                float(time_constraint.start_datetime))) if time_constraint.start_datetime else ""
            descriptions[d._id]['end_time'] = time.strftime(TIME_FORMAT, time.gmtime(
                float(time_constraint.end_datetime))) if time_constraint.end_datetime else ""
        else:
            descriptions[d._id]['start_time'] = descriptions[d._id]['end_time'] = ""

    # first get the all site and instrument objects
    site_ids = []
    objects, associations = rr.find_subjects_mult(objects=deployment_ids, id_only=False)
    if log.isEnabledFor(TRACE):
        log.trace('have %d deployment-associated objects, %d are hasDeployment', len(associations),
                  sum([1 if assoc.p==PRED.hasDeployment else 0 for assoc in associations]))
    for obj, assoc in zip(objects, associations):
        # if this is a hasDeployment association...
        if assoc.p == PRED.hasDeployment:
            description = descriptions[assoc.o]

            # always save the id in one known field (used by UI)
            description['resource_id'] = assoc.o

            # save site or device info in the description
            type = obj.type_
            if type in (RT.InstrumentSite, RT.PlatformSite):
                description['site_id'] = obj._id
                description['site_name'] = obj.name
                description['site_type'] = type
                if obj._id not in site_ids:
                    site_ids.append(obj._id)
            elif type in (RT.InstrumentDevice, RT.PlatformDevice):
                description['device_id'] = obj._id
                description['device_name'] = obj.name
                description['device_type'] = type
                for instrument, status in zip(instruments, instrument_status):
                    if obj._id == instrument._id:
                        description['device_status'] = status
            else:
                log.warn('unexpected association: %s %s %s %s %s', assoc.st, assoc.s, assoc.p, assoc.ot, assoc.o)

    # Make the code below more robust by ensuring that all description entries are present, even
    # if Deployment is missing some associations (OOIION-1183)
    for d in descriptions.values():
        if "site_id" not in d:
            d['site_id'] = d['site_name'] = d['site_type'] = None
        if "device_id" not in d:
            d['device_id'] = d['device_name'] = d['device_type'] = None

    # now look for hasDevice associations to determine which deployments are "primary" or "active"
    objects2, associations = rr.find_objects_mult(subjects=site_ids)
    if log.isEnabledFor(TRACE):
        log.trace('have %d site-associated objects, %d are hasDeployment', len(associations), sum([1 if assoc.p==PRED.hasDeployment else 0 for assoc in associations]))
    for obj, assoc in zip(objects2, associations):
        if assoc.p == PRED.hasDevice:
            found_match = False
            for description in descriptions.itervalues():
                if description.get('site_id', None) == assoc.s and description.get('device_id', None) == assoc.o:
                    if found_match:
                        log.warn('more than one primary deployment for site %s (%s) and device %s (%s)',
                                 assoc.s, description['site_name'], assoc.o, description['device_name'])
                    description['is_primary'] = found_match = True

    # finally get parents of sites using hasSite
    objects3, associations = rr.find_subjects_mult(objects=site_ids)
    if log.isEnabledFor(TRACE):
        log.trace('have %d site-associated objects, %d are hasDeployment', len(associations), sum([1 if assoc.p==PRED.hasDeployment else 0 for assoc in associations]))
    for obj, assoc in zip(objects3, associations):
        if assoc.p == PRED.hasSite:
            found_match = False
            for description in descriptions.itervalues():
                if description.get('site_id', None) == assoc.o:
                    if found_match:
                        log.warn('more than one parent for site %s (%s)', assoc.o, description['site_name'])
                    description['parent_site_id'] = obj._id
                    description['parent_site_name'] = obj.name
                    description['parent_site_description'] = obj.description

    # convert to array
    descriptions_list = [descriptions[d._id] for d in deployments]

    if log.isEnabledFor(DEBUG):
        log.debug('%d deployments, %d associated sites/devices, %d activations, %d missing status',
                  len(deployments), len(objects), len(objects2),
                  sum([0 if 'device_status' in d else 1 for d in descriptions_list]))

    return descriptions_list
Beispiel #17
0
def describe_deployments(deployments, context, instruments=[], instrument_status=[]):
    if not deployments:
        return {}
    rr=context.resource_registry
    deployment_ids = [ d._id for d in deployments ]
    descriptions = {}
    for d in deployments:
        descriptions[d._id] = { 'is_primary': False }
        # add start, end time
        time_constraint = None
        for constraint in d.constraint_list:
            if constraint.type_ == OT.TemporalBounds:
                if time_constraint:
                    log.warn('deployment %s has more than one time constraint (using first)', d.name)
                else:
                    time_constraint = constraint
        if time_constraint:
            descriptions[d._id]['start_time'] = time.strftime(TIME_FORMAT, time.gmtime(float(time_constraint.start_datetime))) if time_constraint.start_datetime else ""
            descriptions[d._id]['end_time'] = time.strftime(TIME_FORMAT, time.gmtime(float(time_constraint.end_datetime))) if time_constraint.end_datetime else ""
        else:
            descriptions[d._id]['start_time'] = descriptions[d._id]['end_time'] = ""

    # first get the all site and instrument objects
    site_ids = []
    objects,associations = rr.find_subjects_mult(objects=deployment_ids, id_only=False)
    if log.isEnabledFor(TRACE):
        log.trace('have %d deployment-associated objects, %d are hasDeployment', len(associations), sum([1 if assoc.p==PRED.hasDeployment else 0 for assoc in associations]))
    for obj,assoc in zip(objects,associations):
        # if this is a hasDeployment association...
        if assoc.p == PRED.hasDeployment:
            description = descriptions[assoc.o]

            # save site or device info in the description
            type = obj.type_
            if type==RT.InstrumentSite or type==RT.PlatformSite:
                description['site_id'] = obj._id
                description['site_name'] = obj.name
                description['site_type'] = type
                if obj._id not in site_ids:
                    site_ids.append(obj._id)
            elif type==RT.InstrumentDevice or type==RT.PlatformDevice:
                description['device_id'] = obj._id
                description['device_name'] = obj.name
                description['device_type'] = type
                for instrument, status in zip(instruments, instrument_status):
                    if obj._id==instrument._id:
                        description['device_status'] = status
            else:
                log.warn('unexpected association: %s %s %s %s %s', assoc.st, assoc.s, assoc.p, assoc.ot, assoc.o)

    # now look for hasDevice associations to determine which deployments are "primary" or "active"
    objects2,associations = rr.find_objects_mult(subjects=site_ids)
    if log.isEnabledFor(TRACE):
        log.trace('have %d site-associated objects, %d are hasDeployment', len(associations), sum([1 if assoc.p==PRED.hasDeployment else 0 for assoc in associations]))
    for obj,assoc in zip(objects2,associations):
        if assoc.p==PRED.hasDevice:
            found_match = False
            for description in descriptions.itervalues():
                if description.get('site_id', None)==assoc.s and description.get('device_id', None)==assoc.o:
                    if found_match:
                        log.warn('more than one primary deployment for site %s (%s) and device %s (%s)',
                                 assoc.s, description['site_name'], assoc.o, description['device_name'])
                    description['is_primary']=found_match=True

    # finally get parents of sites using hasSite
    objects3,associations = rr.find_subjects_mult(objects=site_ids)
    if log.isEnabledFor(TRACE):
        log.trace('have %d site-associated objects, %d are hasDeployment', len(associations), sum([1 if assoc.p==PRED.hasDeployment else 0 for assoc in associations]))
    for obj,assoc in zip(objects3,associations):
        if assoc.p==PRED.hasSite:
            found_match = False
            for description in descriptions.itervalues():
                if description['site_id']==assoc.o:
                    if found_match:
                        log.warn('more than one parent for site %s (%s)', assoc.o, description['site_name'])
                    description['parent_site_id']=obj._id
                    description['parent_site_name']=obj.name
                    description['parent_site_description']=obj.description

    # convert to array
    descriptions = [ descriptions[d._id] for d in deployments ]

    if log.isEnabledFor(DEBUG):
        log.debug('%d deployments, %d associated sites/devices, %d activations, %d missing status', len(deployments), len(objects), len(objects2),
            sum([0 if 'device_status' in d else 1 for d in descriptions]))

    return descriptions