def publish_device_removed_event(self, sub_resource_id): """ Publishes a DeviceStatusEvent indicating that the given child has been removed from the platform. @param sub_resource_id resource id of child """ description = "Child device %r has been removed from platform %r (%r)" % \ (sub_resource_id, self.resource_id, self._platform_id) values = [sub_resource_id] evt = dict(event_type='DeviceStatusEvent', sub_type="device_removed", origin_type="PlatformDevice", origin=self.resource_id, description=description, values=values) try: log.debug('%r: publish_device_removed_event for %r: %s', self._platform_id, sub_resource_id, evt) self._event_publisher.publish_event(**evt) except Exception: log.exception('%r: platform agent could not publish event: %s', self._platform_id, evt)
def record_launch_parameters(self, agent_config): """ record process id of the launch """ #self.RR2.update(self.agent_instance_obj) log.debug('completed agent start')
def _match_devices(self, device_id, device_tree, site_ref_designator_map): # there will not be a port assignment for the top device if device_id == self.top_device._id: self._validate_models(self.top_site._id, self.top_device._id) self.match_list.append((self.top_site._id, self.top_device._id)) tuple_list = device_tree[device_id] for (pt, child_id, ct) in tuple_list: log.debug(" tuple - pt: %s child_id: %s ct: %s", pt, child_id, ct) # match this child device then if it has children, call _match_devices with this id # check that this device is represented in device tree and in port assignments if child_id in self.device_resources and child_id in self.deployment_obj.port_assignments: platform_port = self.deployment_obj.port_assignments[child_id] log.debug("device platform_port: %s", platform_port) # validate PlatformPort info for this device self._validate_port_assignments(child_id, platform_port) if platform_port.reference_designator in site_ref_designator_map: matched_site = site_ref_designator_map[platform_port.reference_designator] self._validate_models(matched_site, child_id) log.info("match_list append site: %s device: %s", matched_site, child_id) self.match_list.append((matched_site, child_id)) #recurse on the children of this device self._match_devices(child_id, device_tree, site_ref_designator_map) # otherwise cant be matched to a site else: self.unmatched_device_list.append(child_id)
def test_schema_generation(self): self.maxDiff = None result = self.param_dict.generate_dict() json_result = json.dumps(result, indent=4, sort_keys=True) log.debug("Expected: %s", self.target_schema) log.debug("Result: %s", json_result) self.assertEqual(result, self.target_schema)
def receiver(self): while True: with self._active_work_lock: if self._do_stop and len(self._active_work) == 0: break resp_type, worker_guid, work_key, work = unpack(self.resp_sock.recv()) work = list(work) if work is not None else work if resp_type == SUCCESS: log.debug('Worker %s was successful', worker_guid) with self._active_work_lock: self._active_work.pop(work_key) elif resp_type == FAILURE: log.debug('===> FAILURE reported for work on %s by worker %s', work_key, worker_guid) if work_key is None: # Worker failed before it did anything, put all work back on the prep queue to be reorganized by the organizer with self._active_work_lock: # Because it failed so miserably, need to find the work_key based on guid for k, v in self._active_work.iteritems(): if v[0] == worker_guid: work_key = k break if work_key is not None: wguid, wp = self._active_work.pop(work_key) self.put_work(work_key, wp[0], wp[1]) else: # Normal failure with self._active_work_lock: # Pop the work from active work, and queue the work returned by the worker wguid, wp = self._active_work.pop(work_key) self.put_work(work_key, wp[0], work)
def stop_launched_simulator(cls): """ Utility to stop the process launched with launch_simulator. The stop is attempted a couple of times in case of errors (with a few seconds of sleep in between). @return None if process seems to have been stopped properly. Otherwise the exception of the last attempt to stop it. """ if cls._sim_process: sim_proc, cls._sim_process = cls._sim_process, None attempts = 3 attempt = 0 while attempt <= attempts: attempt += 1 log.debug( "[OMSim] stopping launched simulator (attempt=%d) ...", attempt) try: sim_proc.stop() log.debug( "[OMSim] simulator process seems to have stopped properly" ) return None except Exception as ex: if attempt < attempts: sleep(10) else: log.warn( "[OMSim] error while stopping simulator process: %s", ex) return ex
def advance_lcs(self, resource_id, transition_event): """ attempt to advance the lifecycle state of a resource @resource_id the resource id @new_state the new lifecycle state """ assert(type("") == type(resource_id)) assert(type(LCE.PLAN) == type(transition_event)) # no checking here. the policy framework does the work. #self.check_lcs_precondition_satisfied(resource_id, transition_event) if LCE.RETIRE == transition_event: log.debug("Using RR.retire") ret = self.RR.retire(resource_id) return ret else: log.debug("Moving %s resource life cycle with transition event=%s", self.iontype, transition_event) ret = self.RR.execute_lifecycle_transition(resource_id=resource_id, transition_event=transition_event) log.info("%s lifecycle transition=%s resulted in lifecycle state=%s", self.iontype, transition_event, str(ret)) return ret
def openNode(self, platform_id, node_config_filename, stream_definition_filename=DEFAULT_STREAM_DEF_FILENAME): """ Opens up and parses the node configuration files. @param platform_id - id to associate with this set of Node Configuration Files @param nc_file - yaml file with information about the platform @raise NodeConfigurationException """ self._platform_id = platform_id log.debug("%r: Open: %s", self._platform_id, node_config_filename) with open(node_config_filename, 'r') as nc_file, open(stream_definition_filename, 'r') as sc_file: try: node_config = yaml.load(nc_file) stream_definitions = yaml.load(sc_file) self._node_yaml = NodeYAML.factory(node_config, stream_definitions) self._node_yaml.validate() except Exception as e: import traceback traceback.print_exc() msg = "%s Cannot parse yaml node specific config file : %s" % ( e, node_config_filename) raise NodeConfigurationFileException(msg=msg) except IOError as e: msg = "%s Cannot open node specific config file : %s" % ( e, node_config_filename) raise NodeConfigurationFileException(msg=msg)
def recv_evt_messages(driver_client): """ A looping function that monitors a ZMQ SUB socket for asynchronous driver events. Can be run as a thread or greenlet. @param driver_client The client object that launches the thread. """ context = zmq.Context() sock = context.socket(zmq.SUB) sock.connect(driver_client.event_host_string) sock.setsockopt(zmq.SUBSCRIBE, '') log.info('Driver client event thread connected to %s.' % driver_client.event_host_string) driver_client.stop_event_thread = False #last_time = time.time() while not driver_client.stop_event_thread: try: evt = sock.recv_pyobj(flags=zmq.NOBLOCK) log.debug('got event: %s' % str(evt)) if driver_client.evt_callback: driver_client.evt_callback(evt) except zmq.ZMQError: time.sleep(.5) except Exception, e: log.error( 'Driver client error reading from zmq event socket: ' + str(e)) log.error('Driver client error type: ' + str(type(e)))
def _collect_deployment(self, device_id=None): deployment_objs = self.RR2.find_objects(device_id, PRED.hasDeployment, RT.Deployment) # find current deployment using time constraints current_time = int(calendar.timegm(time.gmtime())) for d in deployment_objs: # find deployment start and end time time_constraint = None for constraint in d.constraint_list: if constraint.type_ == OT.TemporalBounds: if time_constraint: log.warn( 'deployment %s has more than one time constraint (using first)', d.name) else: time_constraint = constraint if time_constraint: # a time constraint was provided, check if the current time is in this window if int(time_constraint.start_datetime) < current_time < int( time_constraint.end_datetime): log.debug( '_collect_deployment found current deployment start time: %s, end time: %s current time: %s deployment: %s ', time_constraint.start_datetime, time_constraint.end_datetime, current_time, d) return d return None
def _generate_driver_config(self): log.debug("_generate_driver_config for %s", self.agent_instance_obj.name) # get default config driver_config = super(InstrumentAgentConfigurationBuilder, self)._generate_driver_config() #add port assignments port_assignments = {} #find the associated Deployment resource for this device deployment_obj = self._collect_deployment(self._get_device()._id) if deployment_obj: self._validate_reference_designator( deployment_obj.port_assignments) port_assignments = self._serialize_port_assigments( deployment_obj.port_assignments) instrument_agent_instance_obj = self.agent_instance_obj # Create driver config. add_driver_config = { 'comms_config': instrument_agent_instance_obj.driver_config.get('comms_config'), 'pagent_pid': instrument_agent_instance_obj.driver_config.get('pagent_pid'), 'ports': port_assignments, } self._augment_dict("Instrument Agent driver_config", driver_config, add_driver_config) return driver_config
def ret_fn(obj_id, subj_id): log.info("Dynamically creating association (1)%s -> %s -> %s", isubj, ipred, iobj) # see if there are any other objects of this type and pred on this subject existing_subjs, _ = self.RR.find_subjects(isubj, ipred, obj_id, id_only=True) if len(existing_subjs) > 1: raise Inconsistent( "Multiple %s-%s subjects found associated to the same %s object with id='%s'" % (isubj, ipred, iobj, obj_id)) if len(existing_subjs) > 0: try: self.RR.get_association(subj_id, ipred, obj_id) except NotFound: raise BadRequest( "Attempted to add a second %s-%s association to a %s with id='%s'" % (isubj, ipred, iobj, obj_id)) else: log.debug( "Create %s Association (single subject): ALREADY EXISTS", ipred) return self.RR.create_association(subj_id, ipred, obj_id)
def _make_dynamic_assign_single_subject_function(self, item): inputs = self._parse_function_name_for_subj_pred_obj( "assign single subject function w/pred", item, r"(assign_)(\w+)(_to_one_)(\w+)(_with_)(\w+)", [2, 3, 4, 5, 6], { "subject": 4, "predicate": 6, "object": 2 }) if None is inputs: inputs = self._parse_function_name_for_subj_pred_obj( "assign single subject function", item, r"(assign_)(\w+)(_to_one_)(\w+)", [2, 3, 4], { "subject": 4, "predicate": None, "object": 2 }) if None is inputs: return None isubj = inputs["RT.subject"] iobj = inputs["RT.object"] ipred = inputs["PRED.predicate"] log.debug("Making function to create associations (1)%s -> %s -> %s", isubj, ipred, iobj) def freeze(): def ret_fn(obj_id, subj_id): log.info("Dynamically creating association (1)%s -> %s -> %s", isubj, ipred, iobj) # see if there are any other objects of this type and pred on this subject existing_subjs, _ = self.RR.find_subjects(isubj, ipred, obj_id, id_only=True) if len(existing_subjs) > 1: raise Inconsistent( "Multiple %s-%s subjects found associated to the same %s object with id='%s'" % (isubj, ipred, iobj, obj_id)) if len(existing_subjs) > 0: try: self.RR.get_association(subj_id, ipred, obj_id) except NotFound: raise BadRequest( "Attempted to add a second %s-%s association to a %s with id='%s'" % (isubj, ipred, iobj, obj_id)) else: log.debug( "Create %s Association (single subject): ALREADY EXISTS", ipred) return self.RR.create_association(subj_id, ipred, obj_id) return ret_fn ret = freeze() return ret
def _device_removed_event(self, evt): """ Handles the device_removed event to remove associated information and status updates, which mauy result in events being published. """ # the actual child removed is in the values component of the event: if isinstance(evt.values, (list, tuple)): # normally it will be just one element but handle as array: for sub_resource_id in evt.values: self._remove_child(sub_resource_id) else: log.warn( "%r: Got device_removed event with invalid values member: %r", self._platform_id, evt) return # finally forward event so ancestors also get notified: # only adjustment is that now my platform's resource_id is the origin: evt = dict(event_type=evt.type_, sub_type=evt.sub_type, origin_type=evt.origin_type, origin=self.resource_id, description=evt.description, values=evt.values) try: log.debug('%r: _device_removed_event: forwarding to ancestors: %s', self._platform_id, evt) self._event_publisher.publish_event(**evt) except Exception: log.exception('%r: platform agent could not publish event: %s', self._platform_id, evt)
def _get_device_resources(self, device_tree): # create a map of device ids to their full resource object to assit with lookup and validation device_objs = self.clients.resource_registry.read_mult( device_tree.keys()) log.debug("prepare_activation device_objectss: %s", device_objs) for device_obj in device_objs: self.device_resources[device_obj._id] = device_obj
def __init__(self, datastore_name=None, config=None, scope=None, profile=None, **kwargs): super(CouchDataStore, self).__init__(datastore_name=datastore_name, config=config, scope=scope, profile=profile) if self.config.get("type", None) and self.config['type'] != "couchdb": raise BadRequest("Datastore server config is not couchdb: %s" % self.config) if self.datastore_name and self.datastore_name != self.datastore_name.lower(): raise BadRequest("Invalid CouchDB datastore name: '%s'" % self.datastore_name) if self.scope and self.scope != self.scope.lower(): raise BadRequest("Invalid CouchDB scope name: '%s'" % self.scope) # Connection if self.username and self.password: connection_str = "http://%s:%s@%s:%s" % (self.username, self.password, self.host, self.port) log_connection_str = "http://%s:%s@%s:%s" % ("username", "password", self.host, self.port) log.debug("Using username:password authentication to connect to datastore") else: connection_str = "http://%s:%s" % (self.host, self.port) log_connection_str = connection_str log.info("Connecting to CouchDB server: %s", log_connection_str) self.server = couchdb.Server(connection_str) self._id_factory = None # TODO # Just to test existence of the datastore if self.datastore_name: try: ds, _ = self._get_datastore() except NotFound: self.create_datastore() ds, _ = self._get_datastore()
def _find_top_site_device(self, deployment_id): top_site = '' top_device = '' #retrieve the site tree information using the OUTIL functions; site info as well has site children deploy_items_objs, _ = self.clients.resource_registry.find_subjects( predicate=PRED.hasDeployment, object=deployment_id, id_only=False) log.debug("site_ids associated to this deployment: %s", deploy_items_objs) for obj in deploy_items_objs: rsrc_type = obj.type_ log.debug("resource type associated to this deployment:: %s", rsrc_type) if RT.PlatformDevice == rsrc_type or RT.InstrumentDevice == rsrc_type: top_device = obj elif RT.PlatformSite == rsrc_type or RT.InstrumentSite == rsrc_type: top_site = obj else: log.error( 'Deployment may only link to devices and sites. Deployment: %s', str(self.deployment_obj)) if not top_device or not top_site: log.error( 'Deployment must associate to both site and device. Deployment: %s', str(self.deployment_obj)) raise BadRequest( 'Deployment must associate to both site and device. Deployment: %s', str(self.deployment_obj)) return top_site, top_device
def _generate_driver_config(self): log.debug("_generate_driver_config for %s", self.agent_instance_obj.name) # get default config driver_config = super(ExternalDatasetAgentConfigurationBuilder, self)._generate_driver_config() agent_instance_obj = self.agent_instance_obj agent_obj = self._get_agent() parser_cfg = copy.deepcopy(agent_obj.parser_default_config) poller_cfg = copy.deepcopy(agent_obj.poller_default_config) # Create driver config. base_driver_config = { 'parser': { 'uri': agent_obj.parser_uri, 'module': agent_obj.parser_module, 'class': agent_obj.parser_class, 'config': parser_cfg, }, 'poller': { 'uri': agent_obj.poller_uri, 'module': agent_obj.poller_module, 'class': agent_obj.poller_class, 'config': poller_cfg, }, } res_driver_config = dict_merge(base_driver_config, driver_config) return res_driver_config
def stop_launched_simulator(cls): """ Utility to stop the process launched with launch_simulator. The stop is attempted a couple of times in case of errors (with a few seconds of sleep in between). @return None if process seems to have been stopped properly. Otherwise the exception of the last attempt to stop it. """ if cls._sim_process: sim_proc, cls._sim_process = cls._sim_process, None attempts = 3 attempt = 0 while attempt <= attempts: attempt += 1 log.debug("[OMSim] stopping launched simulator (attempt=%d) ...", attempt) try: sim_proc.stop() log.debug("[OMSim] simulator process seems to have stopped properly") return None except Exception as ex: if attempt < attempts: sleep(10) else: log.warn("[OMSim] error while stopping simulator process: %s", ex) return ex
def any_old(resource_type, extra_fields=None): """ Create any old resource... a generic and unique object of a given type @param resource_type the resource type @param extra_fields dict of any extra fields to set """ if not extra_fields: extra_fields = {} if resource_type not in _sa_test_helpers_ionobj_count: _sa_test_helpers_ionobj_count[resource_type] = 0 _sa_test_helpers_ionobj_count[ resource_type] = _sa_test_helpers_ionobj_count[resource_type] + 1 name = "%s_%d" % (resource_type, _sa_test_helpers_ionobj_count[resource_type]) desc = "My %s #%d" % (resource_type, _sa_test_helpers_ionobj_count[resource_type]) log.debug("Creating any old %s IonObject (#%d)", resource_type, _sa_test_helpers_ionobj_count[resource_type]) ret = IonObject(resource_type, name=name, description=desc) #add any extra fields for k, v in extra_fields.iteritems(): setattr(ret, k, v) return ret
def any_old(resource_type, extra_fields=None): """ Create any old resource... a generic and unique object of a given type @param resource_type the resource type @param extra_fields dict of any extra fields to set """ if not extra_fields: extra_fields = {} if resource_type not in _sa_test_helpers_ionobj_count: _sa_test_helpers_ionobj_count[resource_type] = 0 _sa_test_helpers_ionobj_count[resource_type] = _sa_test_helpers_ionobj_count[resource_type] + 1 name = "%s_%d" % (resource_type, _sa_test_helpers_ionobj_count[resource_type]) desc = "My %s #%d" % (resource_type, _sa_test_helpers_ionobj_count[resource_type]) log.debug("Creating any old %s IonObject (#%d)", resource_type, _sa_test_helpers_ionobj_count[resource_type]) ret = IonObject(resource_type, name=name, description=desc) #add any extra fields for k, v in extra_fields.iteritems(): setattr(ret, k, v) return ret
def test_force_delete_fun(self): """ self is an instance of the tester class """ log.debug("test_force_delete_fun") # get objects svc = self._utg_getservice() testfun = self._utg_getcrudmethod(resource_label, "force_delete") myret = sample_resource() #configure Mock if all_in_one: svc.clients.resource_registry.delete.reset_mock() if all_in_one: svc.clients.resource_registry.retire.reset_mock() svc.clients.resource_registry.read.return_value = myret svc.clients.resource_registry.delete.return_value = None svc.clients.resource_registry.find_resources.return_value = None svc.clients.resource_registry.find_objects.return_value = ([], []) svc.clients.resource_registry.find_subjects.return_value = ([], []) try: testfun("111") except TypeError as te: # for logic tests that run into mock trouble if "'Mock' object is not iterable" != te.message: raise te elif all_in_one: return else: raise SkipTest("Must test this with INT test") svc.clients.resource_registry.delete.assert_called_once_with("111")
def publish_callback(self, particle): """ Publish particles to the agent. TODO: currently we are generating JSON serialized objects we should be able to send with objects because we don't have the zmq boundray issue in this client. @return: number of records published """ publish_count = 0 try: for p in particle: # Can we use p.generate_dict() here? p_obj = p.generate() log.debug("Particle received: %s", p_obj) self._async_driver_event_sample(p_obj, None) publish_count += 1 except Exception as e: log.error("Error logging particle: %s", e, exc_info=True) # Reset the connection id because we can not ensure contiguous # data. self._asp.reset_connection() log.debug("Publish ResourceAgentErrorEvent from publisher_callback") self._event_publisher.publish_event( error_msg = "Sample Parsing Exception: %s" % e, event_type='ResourceAgentErrorEvent', origin_type=self.ORIGIN_TYPE, origin=self.resource_id ) return publish_count
def test_force_delete_bad_wrongtype_fun(self): """ self is an inst ance of the tester class """ log.debug("test_force_delete_bad_wrongtype_fun") # get objects svc = self._utg_getservice() testfun = self._utg_getcrudmethod(resource_label, "force_delete") myret = IonObject(RT.Resource, name="Generic Name") #configure Mock if all_in_one: svc.clients.resource_registry.delete.reset_mock() if all_in_one: svc.clients.resource_registry.retire.reset_mock() if all_in_one: svc.clients.resource_registry.read.reset_mock() svc.clients.resource_registry.find_objects.return_value = ([], []) svc.clients.resource_registry.find_subjects.return_value = ([], []) svc.clients.resource_registry.read.return_value = myret try: self.assertRaisesRegexp(BadRequest, "type", testfun, "111") except TypeError as te: # for logic tests that run into mock trouble if "'Mock' object is not iterable" != te.message: raise te elif all_in_one: return else: raise SkipTest("Must test this with INT test") self.assertEqual(0, svc.clients.resource_registry.retire.call_count) self.assertEqual(0, svc.clients.resource_registry.delete.call_count)
def _generate_driver_config(self): log.debug("_generate_driver_config for %s", self.agent_instance_obj.name) # get default config driver_config = super(InstrumentAgentConfigurationBuilder, self)._generate_driver_config() #add port assignments port_assignments = {} #find the associated Deployment resource for this device deployment_obj = self._collect_deployment(self._get_device()._id) if deployment_obj: self._validate_reference_designator(deployment_obj.port_assignments) port_assignments = self._serialize_port_assigments(deployment_obj.port_assignments ) instrument_agent_instance_obj = self.agent_instance_obj # Create driver config. add_driver_config = { 'comms_config' : instrument_agent_instance_obj.driver_config.get('comms_config'), 'pagent_pid' : instrument_agent_instance_obj.driver_config.get('pagent_pid'), 'ports' : port_assignments, } self._augment_dict("Instrument Agent driver_config", driver_config, add_driver_config) return driver_config
def convert_to_attachments(self): assert(self.csv_reader is not None) assert(self.qa_zip_obj is not None) #create attachment resources for each document in the zip log.debug("creating attachment objects") attachments = [] for row in self.csv_reader: att_name = row["filename"] att_desc = row["description"] att_content_type = row["content_type"] att_keywords = string.split(row["keywords"], ",") if not att_name in self.qa_zip_obj.namelist(): return None, ("Manifest refers to a file called '%s' which is not in the zip" % att_name) attachments.append(IonObject(RT.Attachment, name=att_name, description=att_desc, content=self.qa_zip_obj.read(att_name), content_type=att_content_type, keywords=att_keywords, attachment_type=AttachmentType.BLOB)) log.debug("Sanity checking manifest vs zip file") if len(self.qa_zip_obj.namelist()) - 1 > len(attachments): log.warn("There were %d files in the zip but only %d in the manifest", len(self.qa_zip_obj.namelist()) - 1, len(attachments)) return attachments, ""
def advance_lcs(self, resource_id, transition_event): """ attempt to advance the lifecycle state of a resource @resource_id the resource id @new_state the new lifecycle state """ assert (type("") == type(resource_id)) assert (type(LCE.PLAN) == type(transition_event)) if LCE.RETIRE == transition_event: log.debug("Using RR.retire") ret = self.RR.retire(resource_id) return ret else: log.debug("Moving resource life cycle with transition event=%s", transition_event) ret = self.RR.execute_lifecycle_transition( resource_id=resource_id, transition_event=transition_event) log.info("lifecycle transition=%s resulted in lifecycle state=%s", transition_event, str(ret)) return ret
def insert_spans(self, uuid, spans, cur): log.debug("Inserting spans") try: for span in spans: cols, values = self.span_values(uuid, span) dic = dict(zip(cols, values)) if len(cols) > 0: span_addr = span.address.get_db_str() statement = '' if self._span_exists(span_addr): statement = ''.join(['UPDATE ', self.span_table_name, ' SET ']) for k, v in dic.iteritems(): statement = ''.join([statement, k, '=', v, ', ']) statement = statement.rstrip(', ') statement = ''.join([statement, " WHERE span_address = '", span_addr, "'"]) else: statement = """INSERT into """ + self.span_table_name + """ (""" for col in cols: statement = ''.join([statement, col, ', ']) statement = statement.rstrip(', ') statement = ''.join([statement, """) VALUES ("""]) for val in values: statement = ''.join([statement, val, ', ']) statement = statement.rstrip(', ') statement = ''.join([statement, """)"""]) log.trace("Inserting span into datastore: %s", statement) with self.span_store.pool.cursor(**self.datastore.cursor_args) as cur: cur.execute(statement) except Exception as ex: log.warn('Unable to insert spans %s %s', str(spans), ex.message)
def publish_device_failed_command_event(self, sub_resource_id, cmd, err_msg): """ PlatformAgent calls this method to publish a DeviceStatusEvent indicating that the given child failed to complete the given command. @param sub_resource_id resource id of child (included in values) @param cmd command (included in description) @param err_msg error message (included in description) """ values = [sub_resource_id] description = "Child device %r failed to complete command from platform %r (%r)" % \ (sub_resource_id, self.resource_id, self._platform_id) description += ": cmd=%r; err_msg=%r" % (str(cmd), err_msg) evt = dict(event_type='DeviceStatusEvent', sub_type="device_failed_command", origin_type="PlatformDevice", origin=self.resource_id, values=values, description=description) try: log.debug('%r: publish_device_failed_command_event for %r: %s', self._platform_id, sub_resource_id, evt) self._event_publisher.publish_event(**evt) except Exception: log.exception('%r: platform agent could not publish event: %s', self._platform_id, evt)
def invoke_action(self, component_id, request): log.debug(component_id + ' performing: ' + str(request)) component = self.components[component_id] try: component.perform_action(request) except Exception as ex: log.error('request failed: ' + str(request) + ' ' + str(ex), exc_info=True)
def device_failed_command_event(self, evt): """ @todo Handles the device_failed_command event """ # TODO what should be done? log.debug("%r: device_failed_command_event: evt=%s", self._platform_id, str(evt))
def launch(self, agent_config, process_definition_id): """ schedule the launch """ if isinstance(agent_config, dict) and "instance_id" in agent_config: agent_instance_id = agent_config.get("instance_id", None) log.debug("Save the agent spawn config to the object store") obj_id = "agent_spawncfg_%s" % agent_instance_id obj_store = bootstrap.container_instance.object_store try: obj_store.delete_doc(obj_id) except Exception: pass obj_store.create_doc(agent_config, obj_id) config_ref = "objects:%s/" % obj_id launch_config = {'process': {'config_ref': config_ref}} else: launch_config = agent_config log.debug("schedule agent process") process_schedule = ProcessSchedule(restart_mode=ProcessRestartMode.ABNORMAL, queueing_mode=ProcessQueueingMode.ALWAYS) process_id = self.process_dispatcher_client.schedule_process(process_definition_id=process_definition_id, schedule=process_schedule, configuration=launch_config) log.info("AgentLauncher got process id='%s' from process_dispatcher.schedule_process()", process_id) self.process_id = process_id return process_id
def recv_evt_messages(driver_client): """ A looping function that monitors a ZMQ SUB socket for asynchronous driver events. Can be run as a thread or greenlet. @param driver_client The client object that launches the thread. """ context = zmq.Context() sock = context.socket(zmq.SUB) sock.connect(driver_client.event_host_string) sock.setsockopt(zmq.SUBSCRIBE, '') log.info('Driver client event thread connected to %s.' % driver_client.event_host_string) driver_client.stop_event_thread = False #last_time = time.time() while not driver_client.stop_event_thread: try: evt = sock.recv_pyobj(flags=zmq.NOBLOCK) log.debug('got event: %s' % str(evt)) if driver_client.evt_callback: driver_client.evt_callback(evt) except zmq.ZMQError: time.sleep(.5) #cur_time = time.time() #if cur_time - last_time > 5: # log.info('event thread listening') # last_time = cur_time sock.close() context.term() log.info('Client event socket closed.')
def _clean_data_list(self, index): """ Clean up the data list in place so that it, if a fragment is consumed by a get_next_raw call, the data chunk is remove and added back to the non-data list. @param index The index that things are being cleared up to """ new_nondata_list = [] log.debug("Cleaning data chunk, data_chunk_list: %s, nondata_chunk_list: %s", self.data_chunk_list, self.nondata_chunk_list) for (s, e) in self.data_chunk_list: if (e <= index): self.data_chunk_list.remove((s, e)) if (e > index): self.data_chunk_list.remove((s, e)) # add remaining to non data for (nds, nde) in self.nondata_chunk_list: if (nde < s): new_nondata_list.append((nds, nde)) elif (nde == s): new_nondata_list.append((nds, e)) elif (nde > s): new_nondata_list.append((nds, nde)) self.nondata_chunk_list = new_nondata_list
def flush(self): if self.mode == 'r': log.warn('SimplePersistenceLayer not open for writing: mode=%s', self.mode) return log.debug('Flushing MasterManager...') self.master_manager.flush()
def launch(self): """ Launches the simulator process as indicated by _COMMAND. @return (rsn_oms, uri) A pair with the CIOMSSimulator instance and the associated URI to establish connection with it. """ log.debug("[OMSim] Launching: %s", _COMMAND) self._process = self._spawn(_COMMAND) if not self._process or not self.poll(): msg = "[OMSim] Failed to launch simulator: %s" % _COMMAND log.error(msg) raise Exception(msg) log.debug("[OMSim] process started, pid: %s", self.getpid()) # give it some time to start up sleep(5) # get URI: uri = None with open("logs/rsn_oms_simulator.yml", buffering=1) as f: # we expect one of the first few lines to be of the form: # rsn_oms_simulator_uri=xxxx # where xxxx is the uri -- see oms_simulator_server. while uri is None: line = f.readline() if line.index("rsn_oms_simulator_uri=") == 0: uri = line[len("rsn_oms_simulator_uri="):].strip() self._rsn_oms = CIOMSClientFactory.create_instance(uri) return self._rsn_oms, uri
def OpenNode(self,platform_id,node_config_filename): """ Opens up and parses the node configuration files. @param platform_id - id to associate with this set of Node Configuration Files @param node_config_file - yaml file with information about the platform @raise NodeConfigurationException """ self._platform_id = platform_id log.debug("%r: Open: %s", self._platform_id, node_config_filename) try: node_config_file = open(node_config_filename, 'r') except Exception as e: raise NodeConfigurationFileException(msg="%s Cannot open node specific config file : %s" % (str(e),node_config_filename)) try: node_config = yaml.load(node_config_file) except Exception as e: raise NodeConfigurationFileException(msg="%s Cannot parse yaml node specific config file : %s" % (str(e),node_config_filename)) self.node_meta_data = copy.deepcopy(node_config["node_meta_data"]) #info about this specific node self.node_port_info = copy.deepcopy(node_config["port_info"]) #info about this specific node self.node_streams = copy.deepcopy(node_config["node_streams"]) #info about this specific node
def test_force_delete_fun(self): """ self is an instance of the tester class """ log.debug("test_force_delete_fun") # get objects svc = self._utg_getservice() testfun = self._utg_getcrudmethod(resource_label, "force_delete") myret = sample_resource() #configure Mock if all_in_one: svc.clients.resource_registry.delete.reset_mock() if all_in_one: svc.clients.resource_registry.lcs_delete.reset_mock() svc.clients.resource_registry.read.return_value = myret svc.clients.resource_registry.delete.return_value = None svc.clients.resource_registry.find_resources.return_value = None svc.clients.resource_registry.find_objects.return_value = ([], []) svc.clients.resource_registry.find_subjects.return_value = ([], []) try: testfun("111") except TypeError as te: # for logic tests that run into mock trouble if "'Mock' object is not iterable" != te.message: raise te elif all_in_one: return else: raise SkipTest("Must test this with INT test") svc.clients.resource_registry.delete.assert_called_once_with("111")
def _link_resources_lowlevel(self, subject_id='', association_type='', object_id='', check_duplicates=True): """ create an association @param subject_id the resource ID of the predefined type @param association_type the predicate @param object_id the resource ID of the type to be joined @param check_duplicates whether to check for an existing association of this exact subj-pred-obj @todo check for errors: does RR check for bogus ids? """ assert(type("") == type(subject_id) == type(object_id)) if check_duplicates: dups = self._resource_link_exists(subject_id, association_type, object_id) if dups: log.debug("Create %s Association from '%s': ALREADY EXISTS", self._assn_name(association_type), self._toplevel_call()) return dups associate_success = self.RR.create_association(subject_id, association_type, object_id) log.debug("Create %s Association from '%s': %s", self._assn_name(association_type), self._toplevel_call(), str(associate_success)) return associate_success
def test_force_delete_bad_wrongtype_fun(self): """ self is an inst ance of the tester class """ log.debug("test_force_delete_bad_wrongtype_fun") # get objects svc = self._utg_getservice() testfun = self._utg_getcrudmethod(resource_label, "force_delete") myret = IonObject(RT.Resource, name="Generic Name") #configure Mock if all_in_one: svc.clients.resource_registry.delete.reset_mock() if all_in_one: svc.clients.resource_registry.lcs_delete.reset_mock() if all_in_one: svc.clients.resource_registry.read.reset_mock() svc.clients.resource_registry.find_objects.return_value = ([], []) svc.clients.resource_registry.find_subjects.return_value = ([], []) svc.clients.resource_registry.read.return_value = myret try: self.assertRaisesRegexp(BadRequest, "type", testfun, "111") except TypeError as te: # for logic tests that run into mock trouble if "'Mock' object is not iterable" != te.message: raise te elif all_in_one: return else: raise SkipTest("Must test this with INT test") self.assertEqual(0, svc.clients.resource_registry.lcs_delete.call_count) self.assertEqual(0, svc.clients.resource_registry.delete.call_count)
def _get_array_shape_from_slice(self, slice_): """ Calculates and returns the shape of the slice in each dimension of the total domain @param slice_ Requested slice @return A tuple object denoting the shape of the slice in each dimension of the total domain """ log.debug('Getting array shape for slice_: %s', slice_) vals = self.brick_list.values() log.trace('vals: %s', vals) if len(vals) == 0: return 0 # Calculate the min and max brick value indices for each dimension if len(vals[0][1]) > 1: min_len = min([min(*x[0][i])+1 for i,x in enumerate(vals)]) max_len = max([min(*x[0][i])+min(x[3]) for i,x in enumerate(vals)]) else: min_len = min([min(*x[0])+1 for i,x in enumerate(vals)]) max_len = max([min(*x[0])+min(x[3]) for i,x in enumerate(vals)]) maxes = [max_len, min_len] # Calculate the shape base on the type of slice_ shp = [] for i, s in enumerate(slice_): if isinstance(s, int): shp.append(1) elif isinstance(s, (list,tuple)): shp.append(len(s)) elif isinstance(s, slice): shp.append(len(range(*s.indices(maxes[i])))) # TODO: Does not support n-dimensional return tuple(shp)
def openNode(self,platform_id,node_config_filename): """ Opens up and parses the node configuration files. @param platform_id - id to associate with this set of Node Configuration Files @param node_config_file - yaml file with information about the platform @raise NodeConfigurationException """ self._platform_id = platform_id log.debug("%r: Open: %s", self._platform_id, node_config_filename) try: with open(node_config_filename, 'r') as node_config_file: try: node_config = yaml.load(node_config_file) except Exception as e: raise NodeConfigurationFileException(msg="%s Cannot parse yaml node specific config file : %s" % (str(e),node_config_filename)) except Exception as e: raise NodeConfigurationFileException(msg="%s Cannot open node specific config file : %s" % (str(e),node_config_filename)) self._node_yaml = NodeYAML.factory(node_config) self._node_yaml.validate()
def add_device_rollup_statuses_to_computed_attributes(self, device_id, extension_computed, child_device_ids=None): rollup_statuses, reason, child_agg_status = self.get_device_rollup_statuses_and_child_agg_status( device_id, child_device_ids ) log.debug("Got rollup_statuses = %s, reason = %s", rollup_statuses, reason) if None is rollup_statuses: log.debug("setting status notavailable") self.set_status_computed_attributes_notavailable(extension_computed, reason) if hasattr(extension_computed, "child_device_status"): extension_computed.child_device_status = ComputedDictValue( status=ComputedValueAvailability.NOTAVAILABLE, reason=reason ) return None # no rolling up necessary for instruments (no child_device_ids list) if None is child_device_ids: self.set_status_computed_attributes(extension_computed, rollup_statuses, ComputedValueAvailability.PROVIDED) return None # get child agg status if we can set child_device_status # todo: split into instrument and platform if child_agg_status and hasattr(extension_computed, "child_device_status"): crushed = dict([(k, self._crush_status_dict(v)) for k, v in child_agg_status.iteritems()]) extension_computed.child_device_status = ComputedDictValue( status=ComputedValueAvailability.PROVIDED, value=crushed ) self.set_status_computed_attributes(extension_computed, rollup_statuses, ComputedValueAvailability.PROVIDED) return child_agg_status
def _validate_driver_config(self): """ Verify the agent configuration contains a driver config. called by uninitialize_initialize handler in the IA class """ log.debug("Driver Config: %s", self._dvr_config) out = True for key in ('startup_config', 'dvr_mod', 'dvr_cls'): if key not in self._dvr_config: log.error('missing key: %s', key) out = False for key in ('stream_config', ): if key not in self.CFG: log.error('missing key: %s', key) out = False if get_safe(self._dvr_config, 'max_records', 100) < 1: log.error( 'max_records=%d, must be at least 1 or unset (default 100)', self.max_records) out = False return out
def _create_driver_plugin(self): try: # Ensure the egg cache directory exists. ooi.reflections will fail # somewhat silently when this directory doesn't exists. if not os.path.isdir(EGG_CACHE_DIR): os.makedirs(EGG_CACHE_DIR) log.debug("getting plugin config") uri = get_safe(self._dvr_config, 'dvr_egg') module_name = self._dvr_config['dvr_mod'] class_name = self._dvr_config['dvr_cls'] config = self._dvr_config['startup_config'] except: log.error('error in configuration', exc_info=True) raise egg_name = None egg_repo = None memento = self._get_state(DSA_STATE_KEY) log.warn("Get driver object: %s, %s, %s, %s", class_name, module_name, egg_name, egg_repo) if uri: egg_name = uri.split('/')[-1] if uri.startswith('http') else uri egg_repo = uri[0:len(uri) - len(egg_name) - 1] if uri.startswith('http') else None log.info("instantiate driver plugin %s.%s", module_name, class_name) params = [ config, memento, self.publish_callback, self.persist_state_callback, self.exception_callback ] return EGG_CACHE.get_object(class_name, module_name, egg_name, egg_repo, params)
def _create_driver_plugin(self): try: # Ensure the egg cache directory exists. ooi.reflections will fail # somewhat silently when this directory doesn't exists. if not os.path.isdir(EGG_CACHE_DIR): os.makedirs(EGG_CACHE_DIR) log.debug("getting plugin config") uri = get_safe(self._dvr_config, 'dvr_egg') module_name = self._dvr_config['dvr_mod'] class_name = self._dvr_config['dvr_cls'] config = self._dvr_config['startup_config'] except: log.error('error in configuration', exc_info=True) raise egg_name = None egg_repo = None memento = self._get_state(DSA_STATE_KEY) log.warn("Get driver object: %s, %s, %s, %s, %s", class_name, module_name, egg_name, egg_repo, memento) if uri: egg_name = uri.split('/')[-1] if uri.startswith('http') else uri egg_repo = uri[0:len(uri)-len(egg_name)-1] if uri.startswith('http') else None log.info("instantiate driver plugin %s.%s", module_name, class_name) params = [config, memento, self.publish_callback, self.persist_state_callback, self.exception_callback] return EGG_CACHE.get_object(class_name, module_name, egg_name, egg_repo, params)
def _get_site_ref_designator_map(self): # create a map of site ids to their reference designator codes to facilitate matching site_ref_designator_map = {} for id, site_obj in self.site_resources.iteritems(): site_ref_designator_map[site_obj.reference_designator] = id log.debug("prepare_activation site_ref_designator_map: %s", site_ref_designator_map) return site_ref_designator_map
def test_plot_1(): from coverage_model.test.examples import SimplexCoverage import matplotlib.pyplot as plt cov=SimplexCoverage.load('test_data/usgs.cov') log.debug('Plot the \'water_temperature\' and \'streamflow\' for all times') wtemp = cov.get_parameter_values('water_temperature') wtemp_pc = cov.get_parameter_context('water_temperature') sflow = cov.get_parameter_values('streamflow') sflow_pc = cov.get_parameter_context('streamflow') times = cov.get_parameter_values('time') time_pc = cov.get_parameter_context('time') fig = plt.figure() ax1 = fig.add_subplot(2,1,1) ax1.plot(times,wtemp) ax1.set_xlabel('{0} ({1})'.format(time_pc.name, time_pc.uom)) ax1.set_ylabel('{0} ({1})'.format(wtemp_pc.name, wtemp_pc.uom)) ax2 = fig.add_subplot(2,1,2) ax2.plot(times,sflow) ax2.set_xlabel('{0} ({1})'.format(time_pc.name, time_pc.uom)) ax2.set_ylabel('{0} ({1})'.format(sflow_pc.name, sflow_pc.uom)) plt.show(0)
def _match_devices(device_id): # there will not be a port assignment for the top device if device_id == self.top_device._id: self._validate_models(self.top_site._id, self.top_device._id) self.match_list.append((self.top_site._id, self.top_device._id)) tuple_list = device_tree[device_id] for (pt, child_id, ct) in tuple_list: log.debug(" tuple - pt: %s child_id: %s ct: %s", pt, child_id, ct) # match this child device then if it has children, call _match_devices with this id # check that this device is represented in device tree and in port assignments if child_id in self.device_resources and child_id in self.deployment_obj.port_assignments: platform_port = self.deployment_obj.port_assignments[child_id] log.debug("device platform_port: %s", platform_port) # validate PlatformPort info for this device self._validate_port_assignments(child_id, platform_port) if platform_port.reference_designator in site_ref_designator_map: matched_site = site_ref_designator_map[platform_port.reference_designator] self._validate_models(matched_site, child_id) log.info("match_list append site: %s device: %s", matched_site, child_id) self.match_list.append((matched_site, child_id)) #recurse on the children of this device _match_devices(child_id) # otherwise cant be matched to a site else: self.unmatched_device_list.append(child_id)
def _find_existing_relationship(self, site_id, device_id, site_type=None, device_type=None): # look for an existing relationship between the site_id and another device. # if this site/device pair already exists, we leave it alone assert(type("") == type(site_id) == type(device_id)) log.debug("checking %s/%s pair for deployment", site_type, device_type) #return a pair that should be REMOVED, or None if site_type is None and site_id in self.site_resources: site_type = self.site_resources[site_id].type_ if device_type is None and device_id in self.device_resources: device_type = self.device_resources[device_id].type_ log.debug("checking existing %s hasDevice %s links", site_type, device_type) ret_remove = None ret_ignore = None try: found_device_id = self.enhanced_rr.find_object(site_id, PRED.hasDevice, device_type, True) if found_device_id == device_id: ret_ignore = (site_id, device_id) else: ret_remove = (site_id, found_device_id) log.warning("%s '%s' already hasDevice %s", site_type, site_id, device_type) except NotFound: pass return ret_remove, ret_ignore
def _generate_driver_config(self): # get default config driver_config = super(PlatformAgentConfigurationBuilder, self)._generate_driver_config() #add port assignments port_assignments_raw = {} #find the associated Deployment resource for this device deployment_obj = self._collect_deployment(self._get_device()._id) if deployment_obj: self._validate_reference_designator(deployment_obj.port_assignments) port_assignments_raw.update( deployment_obj.port_assignments) child_device_ids = self._build_child_list() #Deployment info for all children must be added to the driver_config of the platform for dev_id in child_device_ids: deployment_obj = self._collect_deployment(dev_id) if deployment_obj: self._validate_reference_designator(deployment_obj.port_assignments) port_assignments_raw.update(deployment_obj.port_assignments) port_assignments = self._serialize_port_assigments(port_assignments_raw) log.debug(' port assignments for platform %s', port_assignments) # Create driver config. add_driver_config = { 'ports' : port_assignments, } self._augment_dict("Platform Agent driver_config", driver_config, add_driver_config) return driver_config
def _find_existing_relationship(self, site_id, device_id, site_type=None, device_type=None): # look for an existing relationship between the site_id and another device. # if this site/device pair already exists, we leave it alone assert type("") == type(site_id) == type(device_id) log.debug("checking %s/%s pair for deployment", site_type, device_type) # return a pair that should be REMOVED, or None if site_type is None: site_type = self.resource_collector.get_resource_type(site_id) if device_type is None: device_type = self.resource_collector.get_resource_type(device_id) log.debug("checking existing %s hasDevice %s links", site_type, device_type) ret_remove = None ret_ignore = None try: found_device_id = self.RR2.find_object(site_id, PRED.hasDevice, device_type, True) if found_device_id == device_id: ret_ignore = (site_id, device_id) else: ret_remove = (site_id, found_device_id) log.info("%s '%s' already hasDevice %s", site_type, site_id, device_type) except NotFound: pass return ret_remove, ret_ignore
def generate_and_notify_event(self): if self._index >= len(EventInfo.EVENT_TYPES): self._index = 0 event_type = EventInfo.EVENT_TYPES.values()[self._index] self._index += 1 platform_id = "TODO_some_platform_id" message = "%s (synthetic event generated from simulator)" % event_type[ 'name'] group = event_type['group'] timestamp = ntplib.system_to_ntp_time(time.time()) first_time_timestamp = timestamp severity = event_type['severity'] event_instance = { 'message': message, 'platform_id': platform_id, 'timestamp': timestamp, 'first_time_timestamp': first_time_timestamp, 'severity': severity, 'group': group, } log.debug("notifying event_instance=%s", str(event_instance)) self._notifier.notify(event_instance)