def _register_id(self, alias, resid, res_obj=None, is_update=False): """Keep preload resource in internal dict for later reference""" if not is_update and alias in self.resource_ids: raise BadRequest("ID alias %s used twice" % alias) self.resource_ids[alias] = resid self.resource_objs[alias] = res_obj log.trace("Added resource alias=%s to id=%s", alias, resid)
def _update_dsa_config(self, dsa_instance): """ Update the dsa configuration prior to loading the agent. This is where we can alter production configurations for use in a controlled test environment. """ dsa_obj = self.rr.read_object(object_type=RT.ExternalDatasetAgent, predicate=PRED.hasAgentDefinition, subject=dsa_instance._id, id_only=False) log.info("dsa agent definition found: %s", dsa_obj) # If we don't want to load from an egg then we need to # alter the driver config read from preload if self.test_config.mi_repo is not None: dsa_obj.driver_uri = None # Strip the custom namespace dsa_obj.driver_module = ".".join( dsa_obj.driver_module.split('.')[1:]) log.info("saving new dsa agent config: %s", dsa_obj) self.rr.update(dsa_obj) if not self.test_config.mi_repo in sys.path: sys.path.insert(0, self.test_config.mi_repo) log.debug("Driver module: %s", dsa_obj.driver_module) log.debug("MI Repo: %s", self.test_config.mi_repo) log.trace("Sys Path: %s", sys.path)
def recv_packet(self, msg, stream_route, stream_id): ''' The consumer callback to parse and manage the granule. The message is ACK'd once the function returns ''' log.trace('received granule for stream %s', stream_id) if msg == {}: log.error('Received empty message from stream: %s', stream_id) return # Message validation if not isinstance(msg, Granule): log.error('Ingestion received a message that is not a granule: %s', msg) return rdt = RecordDictionaryTool.load_from_granule(msg) if rdt is None: log.error('Invalid granule (no RDT) for stream %s', stream_id) return if not len(rdt): log.debug('Empty granule for stream %s', stream_id) return self.persist_or_timeout(stream_id, rdt)
def set_ports(pnode): platform_id = pnode.platform_id port_infos = rsn_oms.get_platform_ports(platform_id) if not isinstance(port_infos, dict): log.warn("%r: get_platform_ports returned: %s", platform_id, port_infos) return if log.isEnabledFor(logging.TRACE): log.trace("%r: port_infos: %s", platform_id, port_infos) assert platform_id in port_infos ports = port_infos[platform_id] for port_id, dic in ports.iteritems(): port = PortNode(port_id, dic['network']) pnode.add_port(port) # add connected instruments: instrs_res = rsn_oms.get_connected_instruments( platform_id, port_id) if not isinstance(instrs_res, dict): log.warn("%r: port_id=%r: get_connected_instruments " "returned: %s" % (platform_id, port_id, instrs_res)) continue if log.isEnabledFor(logging.TRACE): log.trace("%r: port_id=%r: get_connected_instruments " "returned: %s" % (platform_id, port_id, instrs_res)) assert platform_id in instrs_res assert port_id in instrs_res[platform_id] instr = instrs_res[platform_id][port_id] for instrument_id, attrs in instr.iteritems(): port.add_instrument(InstrumentNode(instrument_id, attrs))
def _get_dsa_instance(self): """ Find the dsa instance in preload and return an instance of that object """ name = self.test_config.instrument_device_name log.debug("Start dataset agent process for instrument device: %s", name) objects, _ = self.rr.find_resources(RT.InstrumentDevice, name=name) log.debug("Found Instrument Devices: %s", objects) if not objects: raise ConfigNotFound( "No appropriate InstrumentDevice objects loaded") instrument_device = objects[0] log.trace("Found instrument device: %s", instrument_device) dsa_instance = self.rr.read_object( subject=instrument_device._id, predicate=PRED.hasAgentInstance, object_type=RT.ExternalDatasetAgentInstance) log.info("dsa_instance found: %s", dsa_instance) return instrument_device, dsa_instance
def __application(self, environ, start_response): input = environ['wsgi.input'] body = "\n".join(input.readlines()) # log.trace('notification received payload=%s', body) event_instance = yaml.load(body) log.trace('notification received event_instance=%s', event_instance) if not 'url' in event_instance: log.warn("expecting 'url' entry in notification call") return if not 'ref_id' in event_instance: log.warn("expecting 'ref_id' entry in notification call") return url = event_instance['url'] event_type = event_instance['ref_id'] if self._url == url: self._event_received(event_type, event_instance) else: log.warn( "got notification call with an unexpected url=%s (expected url=%s)", url, self._url) # generic OK response TODO determine appropriate variations if any status = '200 OK' headers = [('Content-Type', 'text/plain')] start_response(status, headers) return event_type
def test_build_network_definition(self): ndef = RsnOmsUtil.build_network_definition(self._rsn_oms) if log.isEnabledFor(logging.TRACE): # serialize object to string serialization = NetworkUtil.serialize_network_definition(ndef) log.trace("NetworkDefinition serialization:\n%s", serialization) if not isinstance(self._rsn_oms, CIOMSSimulator): # OK, no more tests if we are not using the embedded simulator return # Else: do some verifications against network.yml (the spec used by # the simulator): self.assertTrue("UPS" in ndef.platform_types) pnode = ndef.root self.assertEqual(pnode.platform_id, "ShoreStation") self.assertTrue("ShoreStation_attr_1" in pnode.attrs) self.assertTrue("ShoreStation_port_1" in pnode.ports) sub_pnodes = pnode.subplatforms self.assertTrue("L3-UPS1" in sub_pnodes) self.assertTrue("Node1A" in sub_pnodes) self.assertTrue("input_voltage" in sub_pnodes["Node1A"].attrs) self.assertTrue("Node1A_port_1" in sub_pnodes["Node1A"].ports)
def _handler_connected_turn_off_port(self, *args, **kwargs): """ """ if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r/%s args=%s kwargs=%s" % ( self._platform_id, self.get_driver_state(), str(args), str(kwargs))) port_id = kwargs.get('port_id', None) if port_id is None: raise FSMError('turn_off_port: missing port_id argument') port_id = kwargs.get('port_id', None) instrument_id = kwargs.get('instrument_id', None) if port_id is None and instrument_id is None: raise FSMError('turn_off_port: at least one of port_id and ' 'instrument_id argument must be given') try: result = self.turn_off_port(port_id=port_id, instrument_id=instrument_id) return None, result except PlatformConnectionException as e: return self._connection_lost(RSNPlatformDriverEvent.TURN_OFF_PORT, args, kwargs, e)
def _construct_stream_and_publisher(self, stream_name, stream_config): if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r: _construct_stream_and_publisher: " "stream_name:%r, stream_config:\n%s", self._platform_id, stream_name, self._pp.pformat(stream_config)) decoder = IonObjectDeserializer(obj_registry=get_obj_registry()) if 'stream_def_dict' not in stream_config: # should not happen: PlatformAgent._validate_configuration validates this. log.error("'stream_def_dict' key not in configuration for stream %r" % stream_name) return stream_def_dict = stream_config['stream_def_dict'] stream_def_dict['type_'] = 'StreamDefinition' stream_def_obj = decoder.deserialize(stream_def_dict) self._stream_defs[stream_name] = stream_def_obj routing_key = stream_config['routing_key'] stream_id = stream_config['stream_id'] exchange_point = stream_config['exchange_point'] parameter_dictionary = stream_def_dict['parameter_dictionary'] log.debug("%r: got parameter_dictionary from stream_def_dict", self._platform_id) self._data_streams[stream_name] = stream_id self._param_dicts[stream_name] = ParameterDictionary.load(parameter_dictionary) stream_route = StreamRoute(exchange_point=exchange_point, routing_key=routing_key) publisher = self._create_publisher(stream_id, stream_route) self._data_publishers[stream_name] = publisher log.debug("%r: created publisher for stream_name=%r", self._platform_id, stream_name)
def _parse_message(self, recv_message): log.trace("_parse_message: recv_message=%r", recv_message) dst, src, msg_type, msg = basic_message_verification(recv_message) # # destination must be us, CIPOP: # if dst != CIPOP: raise MalformedMessage( "unexpected destination in received message: " "%d (we are %d)" % (dst, CIPOP)) # # verify message type: # TODO how does this exactly work? # if msg_type != CICGINT: MalformedMessage("unexpected msg_type in received message: " "%d (should be %d)" % (msg_type, CICGINT)) # # TODO verification of source. # ... return src, msg_type, msg
def _parse_message(self, recv_message): log.trace("_parse_message: recv_message=%r", recv_message) dst, src, msg_type, msg = basic_message_verification(recv_message) # # destination must be us, CIPOP: # if dst != CIPOP: raise MalformedMessage( "unexpected destination in received message: " "%d (we are %d)" % (dst, CIPOP)) # # verify message type: # TODO how does this exactly work? # if msg_type != CICGINT: MalformedMessage( "unexpected msg_type in received message: " "%d (should be %d)" % (msg_type, CICGINT)) # # TODO verification of source. # ... return src, msg_type, msg
def got_event(evt, *args, **kwargs): if not self._active: log.warn("%r: got_event called but manager has been destroyed", self._platform_id) return if evt.type_ != event_type: log.trace("%r: ignoring event type %r. Only handle %r directly", self._platform_id, evt.type_, event_type) return if evt.sub_type != sub_type: log.trace("%r: ignoring event sub_type %r. Only handle %r", self._platform_id, evt.sub_type, sub_type) return state = self._agent.get_agent_state() statuses = formatted_statuses(self.aparam_aggstatus, self.aparam_child_agg_status, self.aparam_rollup_status) invalidated_children = self._agent._get_invalidated_children() log.info("%r/%s: (%s) status report triggered by diagnostic event:\n" "%s\n" "%40s : %s\n", self._platform_id, state, self.resource_id, statuses, "invalidated_children", invalidated_children)
def _update_dsa_config(self, dsa_instance): """ Update the dsa configuration prior to loading the agent. This is where we can alter production configurations for use in a controlled test environment. """ rr = self.container.resource_registry dsa_obj = rr.read_object( object_type=RT.ExternalDatasetAgent, predicate=PRED.hasAgentDefinition, subject=dsa_instance._id, id_only=False) log.info("dsa agent found: %s", dsa_obj) # If we don't want to load from an egg then we need to # alter the driver config read from preload if self.test_config.mi_repo is not None: dsa_obj.driver_uri = None # Strip the custom namespace dsa_obj.driver_module = ".".join(dsa_obj.driver_module.split('.')[1:]) log.info("saving new dsa agent config: %s", dsa_obj) rr.update(dsa_obj) if not self.test_config.mi_repo in sys.path: sys.path.insert(0, self.test_config.mi_repo) log.debug("Driver module: %s", dsa_obj.driver_module) log.debug("MI Repo: %s", self.test_config.mi_repo) log.trace("Sys Path: %s", sys.path)
def _handler_connected_connect_instrument(self, *args, **kwargs): """ """ if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r/%s args=%s kwargs=%s" % ( self._platform_id, self.get_driver_state(), str(args), str(kwargs))) port_id = kwargs.get('port_id', None) if port_id is None: raise FSMError('connect_instrument: missing port_id argument') instrument_id = kwargs.get('instrument_id', None) if instrument_id is None: raise FSMError('connect_instrument: missing instrument_id argument') attributes = kwargs.get('attributes', None) if attributes is None: raise FSMError('connect_instrument: missing attributes argument') try: result = self.connect_instrument(port_id, instrument_id, attributes) return None, result except PlatformConnectionException as e: return self._connection_lost(RSNPlatformDriverEvent.CONNECT_INSTRUMENT, args, kwargs, e)
def _get_dsa_instance(self): """ Find the dsa instance in preload and return an instance of that object :return: """ name = self.test_config.instrument_device_name rr = self.container.resource_registry log.debug("Start dataset agent process for instrument device: %s", name) objects,_ = rr.find_resources(RT.InstrumentDevice) log.debug("Found Instrument Devices: %s", objects) filtered_objs = [obj for obj in objects if obj.name == name] if (filtered_objs) == []: raise ConfigNotFound("No appropriate InstrumentDevice objects loaded") instrument_device = filtered_objs[0] log.trace("Found instrument device: %s", instrument_device) dsa_instance = rr.read_object(subject=instrument_device._id, predicate=PRED.hasAgentInstance, object_type=RT.ExternalDatasetAgentInstance) log.info("dsa_instance found: %s", dsa_instance) return (instrument_device, dsa_instance)
def handle_attribute_value_event(self, driver_event): if log.isEnabledFor(logging.TRACE): # pragma: no cover # show driver_event as retrieved (driver_event.vals_dict might be large) log.trace("%r: driver_event = %s", self._platform_id, driver_event) log.trace("%r: vals_dict:\n%s", self._platform_id, self._pp.pformat(driver_event.vals_dict)) elif log.isEnabledFor(logging.DEBUG): # pragma: no cover log.debug("%r: driver_event = %s", self._platform_id, driver_event.brief()) stream_name = driver_event.stream_name publisher = self._data_publishers.get(stream_name, None) if not publisher: log.warn('%r: no publisher configured for stream_name=%r. ' 'Configured streams are: %s', self._platform_id, stream_name, self._data_publishers.keys()) return param_dict = self._param_dicts[stream_name] stream_def = self._stream_defs[stream_name] if isinstance(stream_def, str): rdt = RecordDictionaryTool(param_dictionary=param_dict.dump(), stream_definition_id=stream_def) else: rdt = RecordDictionaryTool(stream_definition=stream_def) self._publish_granule_with_multiple_params(publisher, driver_event, param_dict, rdt)
def setUp(self): self._start_container() self.container.start_rel_from_url('res/deploy/r2deploy.yml') self.RR = ResourceRegistryServiceClient(node=self.container.node) self.IMS = InstrumentManagementServiceClient(node=self.container.node) self.DAMS = DataAcquisitionManagementServiceClient(node=self.container.node) self.DP = DataProductManagementServiceClient(node=self.container.node) self.PSC = PubsubManagementServiceClient(node=self.container.node) self.PDC = ProcessDispatcherServiceClient(node=self.container.node) self.DSC = DatasetManagementServiceClient() self.IDS = IdentityManagementServiceClient(node=self.container.node) self.RR2 = EnhancedResourceRegistryClient(self.RR) # Use the network definition provided by RSN OMS directly. rsn_oms = CIOMSClientFactory.create_instance(DVR_CONFIG['oms_uri']) self._network_definition = RsnOmsUtil.build_network_definition(rsn_oms) # get serialized version for the configuration: self._network_definition_ser = NetworkUtil.serialize_network_definition(self._network_definition) if log.isEnabledFor(logging.TRACE): log.trace("NetworkDefinition serialization:\n%s", self._network_definition_ser) self._async_data_result = AsyncResult() self._data_subscribers = [] self._samples_received = [] self.addCleanup(self._stop_data_subscribers) self._async_event_result = AsyncResult() self._event_subscribers = [] self._events_received = [] self.addCleanup(self._stop_event_subscribers) self._start_event_subscriber()
def _handler_connected_set_over_current(self, *args, **kwargs): """ """ if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r/%s args=%s kwargs=%s" % (self._platform_id, self.get_driver_state(), str(args), str(kwargs))) port_id = kwargs.get('port_id', None) if port_id is None: raise FSMError('set_over_current: missing port_id argument') ma = kwargs.get('ma', None) if ma is None: raise FSMError('set_over_current: missing ma argument') us = kwargs.get('us', None) if us is None: raise FSMError('set_over_current: missing us argument') # TODO: provide source info if not explicitly given: src = kwargs.get('src', 'source TBD') try: result = self.set_over_current(port_id, ma, us, src) return None, result except PlatformConnectionException as e: return self._connection_lost(RSNPlatformDriverEvent.TURN_OFF_PORT, args, kwargs, e)
def __application(self, environ, start_response): input = environ['wsgi.input'] body = "\n".join(input.readlines()) # log.trace('notification received payload=%s', body) event_instance = yaml.load(body) log.trace('notification received event_instance=%s', str(event_instance)) if not 'url' in event_instance: log.warn("expecting 'url' entry in notification call") return if not 'ref_id' in event_instance: log.warn("expecting 'ref_id' entry in notification call") return url = event_instance['url'] event_type = event_instance['ref_id'] if self._url == url: self._event_received(event_type, event_instance) else: log.warn("got notification call with an unexpected url=%s (expected url=%s)", url, self._url) # generic OK response TODO determine appropriate variations status = '200 OK' headers = [('Content-Type', 'text/plain')] start_response(status, headers) return event_type
def _handler_connected_set_over_current(self, *args, **kwargs): """ """ if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r/%s args=%s kwargs=%s" % ( self._platform_id, self.get_driver_state(), str(args), str(kwargs))) port_id = kwargs.get('port_id', None) if port_id is None: raise FSMError('set_over_current: missing port_id argument') ma = kwargs.get('ma', None) if ma is None: raise FSMError('set_over_current: missing ma argument') us = kwargs.get('us', None) if us is None: raise FSMError('set_over_current: missing us argument') # TODO: provide source info if not explicitly given: src = kwargs.get('src', 'source TBD') try: result = self.set_over_current(port_id, ma, us, src) return None, result except PlatformConnectionException as e: return self._connection_lost(RSNPlatformDriverEvent.TURN_OFF_PORT, args, kwargs, e)
def _get_dsa_client(self, instrument_device, dsa_instance): """ Launch the agent and return a client """ fake_process = FakeProcess() fake_process.container = self.container clients = DataAcquisitionManagementServiceDependentClients(fake_process) config_builder = ExternalDatasetAgentConfigurationBuilder(clients) try: config_builder.set_agent_instance_object(dsa_instance) self.agent_config = config_builder.prepare() log.trace("Using dataset agent configuration: %s", pprint.pformat(self.agent_config)) except Exception as e: log.error('failed to launch: %s', e, exc_info=True) raise ServerError('failed to launch') dispatcher = ProcessDispatcherServiceClient() launcher = AgentLauncher(dispatcher) log.debug("Launching agent process!") process_id = launcher.launch(self.agent_config, config_builder._get_process_definition()._id) if not process_id: raise ServerError("Launched external dataset agent instance but no process_id") config_builder.record_launch_parameters(self.agent_config) launcher.await_launch(10.0) return ResourceAgentClient(instrument_device._id, process=FakeProcess())
def _sendto(self, data): if log.isEnabledFor(logging.DEBUG): log.debug("calling sendto(%r)" % data) nobytes = self._sock.sendto(data, self._address) if log.isEnabledFor(logging.TRACE): log.trace("sendto returned: %i" % nobytes) return nobytes
def add_granule(self,stream_id, granule): ''' Appends the granule's data to the coverage and persists it. ''' #-------------------------------------------------------------------------------- # Coverage determiniation and appending #-------------------------------------------------------------------------------- dataset_id = self.get_dataset(stream_id) if not dataset_id: log.error('No dataset could be determined on this stream: %s', stream_id) return coverage = self.get_coverage(stream_id) if not coverage: log.error('Could not persist coverage from granule, coverage is None') return #-------------------------------------------------------------------------------- # Actual persistence #-------------------------------------------------------------------------------- rdt = RecordDictionaryTool.load_from_granule(granule) elements = len(rdt) if not elements: return coverage.insert_timesteps(elements) start_index = coverage.num_timesteps - elements for k,v in rdt.iteritems(): if k == 'image_obj': log.trace( '%s:', k) else: log.trace( '%s: %s', k, v) slice_ = slice(start_index, None) coverage.set_parameter_values(param_name=k, tdoa=slice_, value=v) coverage.flush()
def _handler_connected_disconnect_instrument(self, *args, **kwargs): """ """ if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r/%s args=%s kwargs=%s" % ( self._platform_id, self.get_driver_state(), str(args), str(kwargs))) port_id = kwargs.get('port_id', None) if port_id is None: raise FSMError('disconnect_instrument: missing port_id argument') instrument_id = kwargs.get('instrument_id', None) if instrument_id is None: raise FSMError('disconnect_instrument: missing instrument_id argument') try: result = self.disconnect_instrument(port_id, instrument_id) next_state = None except PlatformConnectionException as e: return self._connection_lost(RSNPlatformDriverEvent.DISCONNECT_INSTRUMENT, args, kwargs, e) return next_state, result
def set_ports(pnode): platform_id = pnode.platform_id port_infos = rsn_oms.get_platform_ports(platform_id) if not isinstance(port_infos, dict): log.warn("%r: get_platform_ports returned: %s", platform_id, port_infos) return if log.isEnabledFor(logging.TRACE): log.trace("%r: port_infos: %s", platform_id, port_infos) assert platform_id in port_infos ports = port_infos[platform_id] for port_id, dic in ports.iteritems(): port = PortNode(port_id, dic['network']) port.set_on(dic['is_on']) pnode.add_port(port) # add connected instruments: instrs_res = rsn_oms.get_connected_instruments(platform_id, port_id) if not isinstance(instrs_res, dict): log.warn("%r: port_id=%r: get_connected_instruments " "returned: %s" % (platform_id, port_id, instrs_res)) continue if log.isEnabledFor(logging.TRACE): log.trace("%r: port_id=%r: get_connected_instruments " "returned: %s" % (platform_id, port_id, instrs_res)) assert platform_id in instrs_res assert port_id in instrs_res[platform_id] instr = instrs_res[platform_id][port_id] for instrument_id, attrs in instr.iteritems(): port.add_instrument(InstrumentNode(instrument_id, attrs))
def test_build_network_definition(self): ndef = RsnOmsUtil.build_network_definition(self._rsn_oms) if log.isEnabledFor(logging.TRACE): # serialize object to string serialization = NetworkUtil.serialize_network_definition(ndef) log.trace("NetworkDefinition serialization:\n%s", serialization) if not isinstance(self._rsn_oms, CIOMSSimulator): # OK, no more tests if we are not using the embedded simulator return # Else: do some verifications against network.yml (the spec used by # the simulator): self.assertTrue("UPS" in ndef.platform_types) pnode = ndef.root self.assertEqual(pnode.platform_id, "ShoreStation") self.assertIn("ShoreStation_attr_1|0", pnode.attrs) self.assertIn("ShoreStation_port_1", pnode.ports) sub_pnodes = pnode.subplatforms self.assertIn("L3-UPS1", sub_pnodes) self.assertIn("Node1A", sub_pnodes) self.assertIn("input_voltage|0", sub_pnodes["Node1A"].attrs) self.assertIn("Node1A_port_1", sub_pnodes["Node1A"].ports)
def got_event(evt, *args, **kwargs): if not self._active: log.warn("%r: got_event called but manager has been destroyed", self._platform_id) return if evt.type_ != event_type: log.trace( "%r: ignoring event type %r. Only handle %r directly", self._platform_id, evt.type_, event_type) return if evt.sub_type != sub_type: log.trace("%r: ignoring event sub_type %r. Only handle %r", self._platform_id, evt.sub_type, sub_type) return state = self._agent.get_agent_state() statuses = formatted_statuses(self.aparam_aggstatus, self.aparam_child_agg_status, self.aparam_rollup_status) invalidated_children = self._agent._get_invalidated_children() log.info( "%r/%s: (%s) status report triggered by diagnostic event:\n" "%s\n" "%40s : %s\n", self._platform_id, state, self.resource_id, statuses, "invalidated_children", invalidated_children)
def _start_dataset_agent_process(self): # Create agent config. name = self.test_config.instrument_device_name rr = self.container.resource_registry log.debug("Start dataset agent process for instrument device: %s", name) objects,_ = rr.find_resources(RT.InstrumentDevice) log.debug("Found Instrument Devices: %s", objects) filtered_objs = [obj for obj in objects if obj.name == name] if (filtered_objs) == []: raise ConfigNotFound("No appropriate InstrumentDevice objects loaded") instrument_device = filtered_objs[0] log.trace("Found instrument device: %s", instrument_device) dsa_instance = rr.read_object(subject=instrument_device._id, predicate=PRED.hasAgentInstance, object_type=RT.ExternalDatasetAgentInstance) log.debug("dsa_instance found: %s", dsa_instance) self._driver_config = dsa_instance.driver_config self.clear_sample_data() self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node) proc_id = self.damsclient.start_external_dataset_agent_instance(dsa_instance._id) client = ResourceAgentClient(instrument_device._id, process=FakeProcess()) return client
def subplatform_launched(self, pa_client, sub_resource_id): """ PlatformAgent calls this to indicate that a child sub-platform has been launched. - Since the sub-platform may have been running already by the time the PlatformAgent is to add it, this method directly gets the "rollup_status" and the "child_agg_status" of the child and do updates here. NOTE : *no* publications of DeviceAggregateStatusEvent events are done because ancestors may not already have entries for this platform. - also does the corresponding "device_added" event publication. @param pa_client sub-platform's resource client @param sub_resource_id sub-platform's resource ID """ self._start_subscriber_resource_agent_lifecycle_event(sub_resource_id) # do any updates from sub-platform's rollup_status and child_agg_status: try: resp = pa_client.get_agent(['child_agg_status', 'rollup_status']) child_child_agg_status = resp['child_agg_status'] child_rollup_status = resp['rollup_status'] log.trace( "%r: retrieved from sub-platform %r: " "child_agg_status=%s rollup_status=%s", self._platform_id, sub_resource_id, child_child_agg_status, child_rollup_status) with self._lock: # take the child's child_agg_status'es: for sub_origin, sub_statuses in child_child_agg_status.iteritems( ): self._prepare_new_child(sub_origin, False, sub_statuses) # update my own child_agg_status from the child's rollup_status # and also my rollup_status: for status_name, status in child_rollup_status.iteritems(): self.aparam_child_agg_status[sub_resource_id][ status_name] = status self._update_rollup_status(status_name) log.trace( "%r: my updated child status after processing sub-platform %r: %s", self._platform_id, sub_resource_id, self.aparam_child_agg_status) except Exception as e: log.warn( "%r: could not get rollup_status or reported rollup_status is " "invalid from sub-platform %r: %s", self._platform_id, sub_resource_id, e) # publish device_added event: self.publish_device_added_event(sub_resource_id)
def _load_uri_aliases(cls): try: cls._uri_aliases = yaml.load(file(_URI_ALIASES_FILENAME)) if log.isEnabledFor(logging.TRACE): log.trace("Loaded CGSN URI aliases = %s" % cls._uri_aliases) except Exception as e: log.warn("Cannot loaded %s: %s" % (_URI_ALIASES_FILENAME, e)) cls._uri_aliases = {}
def publish_alert(self): """ Publishes the alert to ION. """ event_data = self.make_event_data() log.trace("publishing alert: %s", event_data) pub = EventPublisher() pub.publish_event(**event_data)
def set_ports(pnode): platform_id = pnode.platform_id port_infos = rsn_oms.port.get_platform_ports(platform_id) if not isinstance(port_infos, dict): raise PlatformDriverException( "%r: get_platform_ports response is not a dict: %s" % (platform_id, port_infos)) if log.isEnabledFor(logging.TRACE): log.trace("%r: port_infos: %s", platform_id, port_infos) if not platform_id in port_infos: raise PlatformDriverException( "%r: get_platform_ports response does not include " "platform_id: %s" % (platform_id, port_infos)) ports = port_infos[platform_id] if not isinstance(ports, dict): raise PlatformDriverException( "%r: get_platform_ports: entry for platform_id is " "not a dict: %s" % (platform_id, ports)) for port_id, dic in ports.iteritems(): port = PortNode(port_id, dic['network']) port.set_state(dic['state']) pnode.add_port(port) # add connected instruments: instrs_res = rsn_oms.instr.get_connected_instruments( platform_id, port_id) if not isinstance(instrs_res, dict): log.warn("%r: port_id=%r: get_connected_instruments " "response is not a dict: %s" % (platform_id, port_id, instrs_res)) continue if log.isEnabledFor(logging.TRACE): log.trace("%r: port_id=%r: get_connected_instruments " "returned: %s" % (platform_id, port_id, instrs_res)) if not platform_id in instrs_res: raise PlatformDriverException( "%r: port_id=%r: get_connected_instruments response" "does not have entry for platform_id: %s" % (platform_id, ports)) if not port_id in instrs_res[platform_id]: raise PlatformDriverException( "%r: port_id=%r: get_connected_instruments response " "for platform_id does not have entry for port_id: %s" % (platform_id, port_id, instrs_res[platform_id])) instr = instrs_res[platform_id][port_id] for instrument_id, attrs in instr.iteritems(): port.add_instrument(InstrumentNode(instrument_id, attrs))
def create_granule(self, stream_name, param_dict_name, particle_list): pd_id = self.dataset_management.read_parameter_dictionary_by_name(param_dict_name, id_only=True) stream_def_id = self.pubsub_client.create_stream_definition(name=stream_name, parameter_dictionary_id=pd_id) stream_def = self.pubsub_client.read_stream_definition(stream_def_id) rdt = RecordDictionaryTool(stream_definition=stream_def) rdt = populate_rdt(rdt, particle_list) log.trace("RDT: %s", str(rdt)) g = rdt.to_granule(data_producer_id='fake_agent_id') return g
def _publish_granule_with_multiple_params(self, publisher, driver_event, param_dict, rdt): stream_name = driver_event.stream_name pub_params = {} selected_timestamps = None for param_name, param_value in driver_event.vals_dict.iteritems(): param_name = param_name.lower() if param_name not in rdt: if param_name not in self._unconfigured_params: # an unrecognized attribute for this platform: self._unconfigured_params.add(param_name) log.warn('%r: got attribute value event for unconfigured parameter %r in stream %r' ' rdt.keys=%s', self._platform_id, param_name, stream_name, list(rdt.iterkeys())) continue # separate values and timestamps: vals, timestamps = zip(*param_value) self._agent._dispatch_value_alerts(stream_name, param_name, vals) # Use fill_value in context to replace any None values: param_ctx = param_dict.get_context(param_name) if param_ctx: fill_value = param_ctx.fill_value log.debug("%r: param_name=%r fill_value=%s", self._platform_id, param_name, fill_value) # do the replacement: vals = [fill_value if val is None else val for val in vals] if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r: vals array after replacing None with fill_value:\n%s", self._platform_id, self._pp.pformat(vals)) else: log.warn("%r: unexpected: parameter context not found for %r", self._platform_id, param_name) # Set values in rdt: rdt[param_name] = numpy.array(vals) pub_params[param_name] = vals selected_timestamps = timestamps if selected_timestamps is None: # that is, all param_name's were unrecognized; just return: return self._publish_granule(stream_name, publisher, param_dict, rdt, pub_params, selected_timestamps)
def _handle_recv_data(self, recv_data): if log.isEnabledFor(logging.TRACE): log.trace("_handle_recv_data: recv_data=%r" % recv_data) for c in recv_data: if c == '\n': self._handle_new_line(self._line) self._line = '' else: self._line += c
def _create_user(self): """ Create user resources that serve as device owners. This test user does not have contact information, user credentials or notification preferences. Results in these objects: ActorIdentity({'_rev': '1', '_id': '07f92986b34e426bba0fca00b73cf4a5', 'lcstate': 'DEPLOYED', 'alt_ids': [], 'description': '', 'ts_updated': '1391542388312', 'actor_type': 1, 'addl': {}, 'ts_created': '1391542388312', 'availability': 'AVAILABLE', 'name': 'Identity for Adam Activationtest'}) UserInfo({'_rev': '1', '_id': 'ac8d368e6ea247d996fd60dd0f9c7f89', 'lcstate': 'DEPLOYED', 'alt_ids': [], 'description': 'Activation Test User', 'tokens': [], 'ts_updated': '1391542388345', 'contact': ContactInformation({'individual_names_given': '', 'city': '', 'roles': [], 'administrative_area': '', 'url': '', 'country': '', 'variables': [{'name': '', 'value': ''}], 'organization_name': '', 'postal_code': '', 'individual_name_family': '', 'phones': [], 'position_name': '', 'email': '', 'street_address': ''}), 'variables': [{'name': '', 'value': ''}], 'addl': {}, 'ts_created': '1391542388345', 'availability': 'AVAILABLE', 'name': 'Adam Activationtest'}) """ # Basic user attributes for test user. user_attrs = { 'name' : 'Adam Activationtest', 'description' : 'Activation Test User' } # Create ActorIdentity. actor_name = "Identity for %s" % user_attrs['name'] actor_identity_obj = IonObject("ActorIdentity", name=actor_name) log.trace("creating user %s with headers: %r", user_attrs['name'], self.webauth_actor_headers) self.actor_id = self.idms.create_actor_identity(actor_identity_obj, headers=self.webauth_actor_headers) # Create UserInfo. user_info_obj = IonObject("UserInfo", **user_attrs) self.user_info_id = self.idms.create_user_info(self.actor_id, user_info_obj,headers=self.webauth_actor_headers)
def _publish_granule(self, stream_name, publisher, param_dict, rdt, pub_params, timestamps): log.trace("%r: ======== publish_granule", self._platform_id) # Set timestamp info in rdt: if param_dict.temporal_parameter_name is not None: temp_param_name = param_dict.temporal_parameter_name rdt[temp_param_name] = numpy.array(timestamps) #@TODO: Ensure that the preferred_timestamp field is correct rdt['preferred_timestamp'] = numpy.array(['internal_timestamp'] * len(timestamps)) if log.isEnabledFor(logging.DEBUG): # pragma: no cover log.debug( 'Preferred timestamp is unresolved, using "internal_timestamp"' ) else: log.warn( "%r: Not including timestamp info in granule: " "temporal_parameter_name not defined in parameter dictionary", self._platform_id) g = rdt.to_granule(data_producer_id=self.resource_id, connection_id=self._connection_ID.hex, connection_index=str( self._connection_index[stream_name])) try: publisher.publish(g) if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace( "%r: Platform agent published data granule on stream %r: " "%s timestamps: %s", self._platform_id, stream_name, self._pp.pformat(pub_params), self._pp.pformat(timestamps)) elif log.isEnabledFor(logging.DEBUG): # pragma: no cover summary_params = { attr_id: "(%d vals)" % len(vals) for attr_id, vals in pub_params.iteritems() } summary_timestamps = "(%d vals)" % len(timestamps) log.debug( "%r: Platform agent published data granule on stream %r: " "%s timestamps: %s", self._platform_id, stream_name, summary_params, summary_timestamps) log.debug( "%r: granule published with connection_id=%s, connection_index=%i", self._platform_id, self._connection_ID.hex, self._connection_index[stream_name]) self._connection_index[stream_name] += 1 except Exception: log.exception( "%r: Platform agent could not publish data on stream %s.", self._platform_id, stream_name)
def subplatform_launched(self, pa_client, sub_resource_id): """ PlatformAgent calls this to indicate that a child sub-platform has been launched. - Since the sub-platform may have been running already by the time the PlatformAgent is to add it, this method directly gets the "rollup_status" and the "child_agg_status" of the child and do updates here. NOTE : *no* publications of DeviceAggregateStatusEvent events are done because ancestors may not already have entries for this platform. - also does the corresponding "device_added" event publication. @param pa_client sub-platform's resource client @param sub_resource_id sub-platform's resource ID """ self._start_subscriber_resource_agent_lifecycle_event(sub_resource_id) # do any updates from sub-platform's rollup_status and child_agg_status: try: resp = pa_client.get_agent(['child_agg_status', 'rollup_status']) child_child_agg_status = resp['child_agg_status'] child_rollup_status = resp['rollup_status'] log.trace("%r: retrieved from sub-platform %r: " "child_agg_status=%s rollup_status=%s", self._platform_id, sub_resource_id, child_child_agg_status, child_rollup_status) with self._lock: # take the child's child_agg_status'es: for sub_origin, sub_statuses in child_child_agg_status.iteritems(): self._prepare_new_child(sub_origin, False, sub_statuses) # update my own child_agg_status from the child's rollup_status # and also my rollup_status: for status_name, status in child_rollup_status.iteritems(): self.aparam_child_agg_status[sub_resource_id][status_name] = status self._update_rollup_status(status_name) log.trace("%r: my updated child status after processing sub-platform %r: %s", self._platform_id, sub_resource_id, self.aparam_child_agg_status) except Exception as e: log.warn("%r: could not get rollup_status or reported rollup_status is " "invalid from sub-platform %r: %s", self._platform_id, sub_resource_id, e) # publish device_added event: self.publish_device_added_event(sub_resource_id)
def _publish_granule_with_multiple_params(self, publisher, driver_event, param_dict, rdt): stream_name = driver_event.stream_name pub_params = {} selected_timestamps = None for param_name, param_value in driver_event.vals_dict.iteritems(): param_name = param_name.lower() if not param_name in rdt: if param_name not in self._unconfigured_params: # an unrecognized attribute for this platform: self._unconfigured_params.add(param_name) log.warn('%r: got attribute value event for unconfigured parameter %r in stream %r' ' rdt.keys=%s', self._platform_id, param_name, stream_name, list(rdt.iterkeys())) continue # separate values and timestamps: vals, timestamps = zip(*param_value) self._agent._dispatch_value_alerts(stream_name, param_name, vals) # Use fill_value in context to replace any None values: param_ctx = param_dict.get_context(param_name) if param_ctx: fill_value = param_ctx.fill_value log.debug("%r: param_name=%r fill_value=%s", self._platform_id, param_name, fill_value) # do the replacement: vals = [fill_value if val is None else val for val in vals] if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r: vals array after replacing None with fill_value:\n%s", self._platform_id, self._pp.pformat(vals)) else: log.warn("%r: unexpected: parameter context not found for %r", self._platform_id, param_name) # Set values in rdt: rdt[param_name] = numpy.array(vals) pub_params[param_name] = vals selected_timestamps = timestamps if selected_timestamps is None: # that is, all param_name's were unrecognized; just return: return self._publish_granule(stream_name, publisher, param_dict, rdt, pub_params, selected_timestamps)
def set_ports(pnode): platform_id = pnode.platform_id port_infos = rsn_oms.port.get_platform_ports(platform_id) if not isinstance(port_infos, dict): raise PlatformDriverException( "%r: get_platform_ports response is not a dict: %s" % ( platform_id, port_infos)) if log.isEnabledFor(logging.TRACE): log.trace("%r: port_infos: %s", platform_id, port_infos) if not platform_id in port_infos: raise PlatformDriverException( "%r: get_platform_ports response does not include " "platform_id: %s" % (platform_id, port_infos)) ports = port_infos[platform_id] if not isinstance(ports, dict): raise PlatformDriverException( "%r: get_platform_ports: entry for platform_id is " "not a dict: %s" % (platform_id, ports)) for port_id, dic in ports.iteritems(): port = PortNode(port_id, dic['network']) port.set_state(dic['state']) pnode.add_port(port) # add connected instruments: instrs_res = rsn_oms.instr.get_connected_instruments(platform_id, port_id) if not isinstance(instrs_res, dict): log.warn("%r: port_id=%r: get_connected_instruments " "response is not a dict: %s" % (platform_id, port_id, instrs_res)) continue if log.isEnabledFor(logging.TRACE): log.trace("%r: port_id=%r: get_connected_instruments " "returned: %s" % (platform_id, port_id, instrs_res)) if not platform_id in instrs_res: raise PlatformDriverException( "%r: port_id=%r: get_connected_instruments response" "does not have entry for platform_id: %s" % ( platform_id, ports)) if not port_id in instrs_res[platform_id]: raise PlatformDriverException( "%r: port_id=%r: get_connected_instruments response " "for platform_id does not have entry for port_id: %s" % ( platform_id, port_id, instrs_res[platform_id])) instr = instrs_res[platform_id][port_id] for instrument_id, attrs in instr.iteritems(): port.add_instrument(InstrumentNode(instrument_id, attrs))
def _handler_connected_disconnect(self, *args, **kwargs): """ """ if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r/%s args=%s kwargs=%s" % ( self._platform_id, self.get_driver_state(), str(args), str(kwargs))) result = self.disconnect(*args, **kwargs) next_state = PlatformDriverState.DISCONNECTED return next_state, result
def _handler_connected_get_checksum(self, *args, **kwargs): """ """ if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r/%s args=%s kwargs=%s" % ( self._platform_id, self.get_driver_state(), str(args), str(kwargs))) result = self.get_checksum() next_state = None return next_state, result
def _event_received(self, event_instance): log.trace('%r: received event_instance=%s', self._platform_id, event_instance) if self._notifications: self._notifications.append(event_instance) else: self._notifications = [event_instance] log.debug('%r: notifying event_instance=%s', self._platform_id, event_instance) driver_event = ExternalEventDriverEvent(event_instance) self._notify_driver_event(driver_event)
def _handler_connected_disconnect(self, *args, **kwargs): """ """ if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r/%s args=%s kwargs=%s" % ( self._platform_id, self.get_driver_state(), str(args), str(kwargs))) result = self.disconnect() next_state = PlatformDriverState.DISCONNECTED return next_state, result
def _event_received(self, event_type, event_instance): log.trace('received event_instance=%s', event_instance) if self._notifications: if event_type in self._notifications: self._notifications[event_type].append(event_instance) else: self._notifications[event_type] = [event_instance] log.debug('notifying event_instance=%s', event_instance) driver_event = ExternalEventDriverEvent(event_type, event_instance) self._notify_driver_event(driver_event)
def _handler_disconnected_disconnect(self, *args, **kwargs): """ We allow the DISCONNECT event in DISCONNECTED state for convenience, in particular it facilitates the overall handling of the connection_lost event, which is processed by a subsequent call to disconnect from the platform agent. The handler here does nothing. """ if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r/%s args=%s kwargs=%s" % ( self._platform_id, self.get_driver_state(), str(args), str(kwargs))) return None, None
def get_samples(self, stream_name, sample_count=1, timeout=30): """ listen on a stream until 'sample_count' samples are read and return a list of all samples read. If the required number of samples aren't read then throw an exception. Note that this method does not clear the sample queue for the stream. This should be done explicitly by the caller. However, samples that are consumed by this method are removed. @raise SampleTimeout - if the required number of samples aren't read """ to = gevent.Timeout(timeout) to.start() done = False result = [] i = 0 log.debug("Fetch %s sample(s) from stream '%s'" % (sample_count, stream_name)) stream_id = self._stream_id_map.get(stream_name) log.debug("Stream ID Map: %s ", self._stream_id_map) self.assertIsNotNone(stream_id, msg="Unable to find stream name '%s'" % stream_name) try: while (not done): if (self._samples_received.has_key(stream_id) and len(self._samples_received.get(stream_id))): log.trace("get_samples() received sample #%d!", i) result.append(self._samples_received[stream_id].pop(0)) i += 1 if i >= sample_count: done = True else: log.debug( "No samples in %s. Sleep a bit to wait for the data queue to fill up.", stream_name) gevent.sleep(1) except Timeout: log.error("Failed to get %d records from %s. received: %d", sample_count, stream_name, i) self.fail("Failed to read samples from stream %s", stream_name) finally: to.cancel() return result
def _handler_connected_get(self, *args, **kwargs): """ """ if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r/%s args=%s kwargs=%s" % ( self._platform_id, self.get_driver_state(), str(args), str(kwargs))) try: result = self.get(*args, **kwargs) return None, result except PlatformConnectionException as e: return self._connection_lost(PlatformDriverEvent.GET, args, kwargs, e)
def _handler_disconnected_connect(self, *args, **kwargs): """ """ if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r/%s args=%s kwargs=%s" % ( self._platform_id, self.get_driver_state(), str(args), str(kwargs))) recursion = kwargs.get('recursion', None) self.connect(recursion=recursion) result = next_state = PlatformDriverState.CONNECTED return next_state, result
def _handler_connected_set_attribute_values(self, *args, **kwargs): """ """ if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r/%s args=%s kwargs=%s" % ( self._platform_id, self.get_driver_state(), str(args), str(kwargs))) attrs = kwargs.get('attrs', None) if attrs is None: raise FSMError('set_attribute_values: missing attrs argument') result = self.set_attribute_values(attrs) next_state = None return next_state, result
def _handler_connected_turn_off_port(self, *args, **kwargs): """ """ if log.isEnabledFor(logging.TRACE): # pragma: no cover log.trace("%r/%s args=%s kwargs=%s" % ( self._platform_id, self.get_driver_state(), str(args), str(kwargs))) port_id = kwargs.get('port_id', None) if port_id is None: raise FSMError('turn_off_port: missing port_id argument') result = self.turn_off_port(port_id) next_state = None return next_state, result