def publish_device_failed_command_event(self, sub_resource_id, cmd, err_msg): """ PlatformAgent calls this method to publish a DeviceStatusEvent indicating that the given child failed to complete the given command. @param sub_resource_id resource id of child (included in values) @param cmd command (included in description) @param err_msg error message (included in description) """ values = [sub_resource_id] description = "Child device %r failed to complete command from platform %r (%r)" % \ (sub_resource_id, self.resource_id, self._platform_id) description += ": cmd=%r; err_msg=%r" % (str(cmd), err_msg) evt = dict(event_type='DeviceStatusEvent', sub_type="device_failed_command", origin_type="PlatformDevice", origin=self.resource_id, values=values, description=description) try: log.debug('%r: publish_device_failed_command_event for %r: %s', self._platform_id, sub_resource_id, evt) self._event_publisher.publish_event(**evt) except Exception: log.exception('%r: platform agent could not publish event: %s', self._platform_id, evt)
def publish_device_removed_event(self, sub_resource_id): """ Publishes a DeviceStatusEvent indicating that the given child has been removed from the platform. @param sub_resource_id resource id of child """ description = "Child device %r has been removed from platform %r (%r)" % \ (sub_resource_id, self.resource_id, self._platform_id) values = [sub_resource_id] evt = dict(event_type='DeviceStatusEvent', sub_type="device_removed", origin_type="PlatformDevice", origin=self.resource_id, description=description, values=values) try: log.debug('%r: publish_device_removed_event for %r: %s', self._platform_id, sub_resource_id, evt) self._event_publisher.publish_event(**evt) except Exception: log.exception('%r: platform agent could not publish event: %s', self._platform_id, evt)
def got_data(self, packet): try: self.protocol.got_data(packet) except KeyboardInterrupt: raise except Exception as e: log.exception(e)
def _device_removed_event(self, evt): """ Handles the device_removed event to remove associated information and status updates, which mauy result in events being published. """ # the actual child removed is in the values component of the event: if isinstance(evt.values, (list, tuple)): # normally it will be just one element but handle as array: for sub_resource_id in evt.values: self._remove_child(sub_resource_id) else: log.warn("%r: Got device_removed event with invalid values member: %r", self._platform_id, evt) return # finally forward event so ancestors also get notified: # only adjustment is that now my platform's resource_id is the origin: evt = dict(event_type = evt.type_, sub_type = evt.sub_type, origin_type = evt.origin_type, origin = self.resource_id, description = evt.description, values = evt.values) try: log.debug('%r: _device_removed_event: forwarding to ancestors: %s', self._platform_id, evt) self._event_publisher.publish_event(**evt) except Exception: log.exception('%r: platform agent could not publish event: %s', self._platform_id, evt)
def _device_removed_event(self, evt): """ Handles the device_removed event to remove associated information and status updates, which mauy result in events being published. """ # the actual child removed is in the values component of the event: if isinstance(evt.values, (list, tuple)): # normally it will be just one element but handle as array: for sub_resource_id in evt.values: self._remove_child(sub_resource_id) else: log.warn( "%r: Got device_removed event with invalid values member: %r", self._platform_id, evt) return # finally forward event so ancestors also get notified: # only adjustment is that now my platform's resource_id is the origin: evt = dict(event_type=evt.type_, sub_type=evt.sub_type, origin_type=evt.origin_type, origin=self.resource_id, description=evt.description, values=evt.values) try: log.debug('%r: _device_removed_event: forwarding to ancestors: %s', self._platform_id, evt) self._event_publisher.publish_event(**evt) except Exception: log.exception('%r: platform agent could not publish event: %s', self._platform_id, evt)
def on_message(self, body, message): try: self.qpid.send(str(body), message.headers) message.ack() except Exception as e: log.exception("Exception while publishing message to QPID, requeueing") message.requeue() self.qpid.sender = None
def on_message(self, body, message): try: self.qpid.send(str(body), message.headers) message.ack() except Exception as e: log.exception('Exception while publishing message to QPID, requeueing') message.requeue() self.qpid.sender = None
def _publish(self, events, headers): for e in events: try: json.dumps(e) except (ValueError, UnicodeDecodeError) as err: log.exception('Unable to publish event: %r %r', e, err) count = len(events) self.total += count log.info('Publish %d events (%d total)', count, self.total)
def delete_datastore(self, datastore_name=None): try: super(CouchbaseDataStore, self).delete_datastore(datastore_name) except BucketUnavailableException as e: raise NotFound('Couchbase unable to delete bucket named %s on %s. Exception: %s ' % ( datastore_name, e.parameters.get('host', None), e._message)) # This was added due to Couchbase generating a JSON exception error when trying to delete non-existent bucket. except Exception: log.exception("Couchbase error") raise ServerError('Couchbase returned unknown error')
def _clear_postgres(config, prefix, verbose=False, sysname=None): cfg_copy = dict(config) if "password" in cfg_copy: cfg_copy["password"] = "******" if "admin_password" in cfg_copy: cfg_copy["admin_password"] = "******" print 'clear_couch: Clearing PostgreSQL databases using config=', cfg_copy import getpass db_name = prefix if not sysname else sysname + "_" + config.get('database', 'ion') username = config.get("admin_username", None) or getpass.getuser() password = config.get("admin_password", None) or "" host = config.get('host', None) or 'localhost' port = str(config.get('port', None) or '5432') default_database = config.get('default_database', None) or 'postgres' import psycopg2 from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT dsn = "host=%s port=%s dbname=%s user=%s password=%s" % (host, port, default_database, username, password) with psycopg2.connect(dsn) as conn: print "clear_couch: Connected to PostgreSQL as:", dsn.rsplit("=", 1)[0] + "=***" conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) with conn.cursor() as cur: cur.execute("SELECT pid, datname FROM pg_stat_activity") rows = cur.fetchall() conn_by_db = {} for row in rows: conn_id, dbn = row[0], row[1] conn_by_db.setdefault(dbn, []).append(conn_id) print "clear_couch: Found %s open connections" % len(rows) cur.execute("SELECT datname FROM pg_database") rows = cur.fetchall() ignored_num = 0 for row in rows: try: db_name = row[0] if (prefix == '*' and not db_name.startswith('_')) or db_name.lower().startswith(prefix.lower()): print "clear_couch: (PostgreSQL) DROP DATABASE", db_name if conn_by_db.get(db_name, None): for conn_id in conn_by_db[db_name]: cur.execute("SELECT pg_terminate_backend(%s)", (conn_id, )) print "clear_couch: Dropped %s open connections to database '%s'" % (len(conn_by_db[db_name]), db_name) cur.execute("DROP DATABASE %s" % db_name) else: ignored_num += 1 except Exception as ex: log.exception("") print "Could not drop database '%s'" % db_name print 'clear_couch: Ignored %s existing databases' % ignored_num
def refresh_viewset(self, design, datastore_name=None): """ Triggers the rebuild of a design document (set of views). """ ds, datastore_name = self._get_datastore(datastore_name) doc_id = self._get_design_name(design) try: design_doc = ds[doc_id] view_name = design_doc["views"].keys()[0] ds.view(self._get_view_name(design, view_name)) except Exception: log.exception("Problem with design %s/%s", datastore_name, doc_id)
def refresh_views(self, design, datastore_name=None): """ Triggers the rebuild of a design document (set of views). """ ds, datastore_name = self._get_datastore(datastore_name) doc_id = self._get_design_name(design) try: design_doc = ds[doc_id] view_name = design_doc["views"].keys()[0] ds.view(self._get_view_name(design, view_name)) except Exception: log.exception("Problem with design %s/%s", datastore_name, doc_id)
def main(): """ This main routine will get the configuration file from the command line parameter and set the values for required URIs for the OMS, the OMS Alert Alarm Server and the qpid Server. It will then get the qpid publisher for publishing the OMS events. Finally, it will start the web service. """ global aa_publisher oms_uri = None alert_alarm_server_uri = None qpid_uri = None options = docopt(__doc__) server_config_file = options['<server_config>'] config = yaml.load(open(server_config_file)) oms_uri = config.get('oms_uri') alert_alarm_server_uri = config.get('alert_alarm_server_uri') qpid_uri = config.get('qpid_uri') if not all((oms_uri, alert_alarm_server_uri, qpid_uri)): log.error('Missing mandatory configuration values missing from ' + server_config_file) else: headers = {'aaServerUri': alert_alarm_server_uri} try: aa_publisher = Publisher.from_url(qpid_uri, headers) start_web_service(oms_uri, alert_alarm_server_uri) except Exception as ex: log.exception('Error starting OMS Alert and Alarm web service: %r', ex) log.info('Stopping OMS Alert & Alarm Web Service.\n')
def _device_added_event(self, evt): """ Handles the device_added event to do all related preparations and updates statuses. """ # look at the event's origin itself to make sure is included: self._prepare_new_child(evt.origin) # the actual child added is in the values component of the event: if isinstance(evt.values, (list, tuple)): # normally it will be just one element for sub_resource_id in evt.values: self._prepare_new_child(sub_resource_id) else: log.warn( "%r: Got device_added event with invalid values member: %r", self._platform_id, evt) return # finally re-publish event so ancestors also get notified: # only adjustment is that now I'm the origin: evt = dict(event_type=evt.type_, sub_type=evt.sub_type, origin_type=evt.origin_type, origin=self.resource_id, description=evt.description, values=evt.values) try: log.debug('%r: _device_added_event: re-publishing: %s', self._platform_id, evt) self._event_publisher.publish_event(**evt) except Exception: log.exception('%r: platform agent could not publish event: %s', self._platform_id, evt)
def _device_added_event(self, evt): """ Handles the device_added event to do all related preparations and updates statuses. """ # look at the event's origin itself to make sure is included: self._prepare_new_child(evt.origin) # the actual child added is in the values component of the event: if isinstance(evt.values, (list, tuple)): # normally it will be just one element for sub_resource_id in evt.values: self._prepare_new_child(sub_resource_id) else: log.warn("%r: Got device_added event with invalid values member: %r", self._platform_id, evt) return # finally re-publish event so ancestors also get notified: # only adjustment is that now I'm the origin: evt = dict(event_type = evt.type_, sub_type = evt.sub_type, origin_type = evt.origin_type, origin = self.resource_id, description = evt.description, values = evt.values) try: log.debug('%r: _device_added_event: re-publishing: %s', self._platform_id, evt) self._event_publisher.publish_event(**evt) except Exception: log.exception('%r: platform agent could not publish event: %s', self._platform_id, evt)
def _restore_resource(self, state, prev_state): """ Restore agent/resource configuration and state. """ log.debug("starting agent restore process, State: %s, Prev State: %s", state, prev_state) # Get state to restore. If the last state was lost connection, # use the prior connected state. if not state: log.debug("State not defined, not restoring") return if state == ResourceAgentState.LOST_CONNECTION: state = prev_state try: cur_state = self._fsm.get_current_state() # If unitialized, confirm and do nothing. if state == ResourceAgentState.UNINITIALIZED: if cur_state != state: raise Exception() # If inactive, initialize and confirm. elif state == ResourceAgentState.INACTIVE: self._fsm.on_event(ResourceAgentEvent.INITIALIZE) cur_state = self._fsm.get_current_state() if cur_state != state: raise Exception() # If idle, initialize, activate and confirm. elif state == ResourceAgentState.IDLE: self._fsm.on_event(ResourceAgentEvent.INITIALIZE) self._fsm.on_event(ResourceAgentEvent.GO_ACTIVE) cur_state = self._fsm.get_current_state() if cur_state != state: raise Exception() # If streaming, initialize, activate and confirm. # Driver discover should put us in streaming mode. elif state == ResourceAgentState.STREAMING: self._fsm.on_event(ResourceAgentEvent.INITIALIZE) self._fsm.on_event(ResourceAgentEvent.GO_ACTIVE) self._fsm.on_event(ResourceAgentEvent.RUN) self._fsm.on_event(ResourceAgentEvent.EXECUTE_RESOURCE, DriverEvent.START_AUTOSAMPLE) cur_state = self._fsm.get_current_state() if cur_state != state: raise Exception() # If command, initialize, activate, confirm idle, # run and confirm command. elif state == ResourceAgentState.COMMAND: self._fsm.on_event(ResourceAgentEvent.INITIALIZE) self._fsm.on_event(ResourceAgentEvent.GO_ACTIVE) cur_state = self._fsm.get_current_state() if cur_state != ResourceAgentState.IDLE: raise Exception() self._fsm.on_event(ResourceAgentEvent.RUN) cur_state = self._fsm.get_current_state() if cur_state != state: raise Exception() # If paused, initialize, activate, confirm idle, # run, confirm command, pause and confirm stopped. elif state == ResourceAgentState.STOPPED: self._fsm.on_event(ResourceAgentEvent.INITIALIZE) self._fsm.on_event(ResourceAgentEvent.GO_ACTIVE) cur_state = self._fsm.get_current_state() if cur_state != ResourceAgentState.IDLE: raise Exception() self._fsm.on_event(ResourceAgentEvent.RUN) cur_state = self._fsm.get_current_state() if cur_state != ResourceAgentState.COMMAND: raise Exception() self._fsm.on_event(ResourceAgentEvent.PAUSE) cur_state = self._fsm.get_current_state() if cur_state != state: raise Exception() # If in a command reachable substate, attempt to return to command. # Initialize, activate, confirm idle, run confirm command. elif state in [ResourceAgentState.TEST, ResourceAgentState.CALIBRATE, ResourceAgentState.DIRECT_ACCESS, ResourceAgentState.BUSY]: self._fsm.on_event(ResourceAgentEvent.INITIALIZE) self._fsm.on_event(ResourceAgentEvent.GO_ACTIVE) cur_state = self._fsm.get_current_state() if cur_state != ResourceAgentState.IDLE: raise Exception() self._fsm.on_event(ResourceAgentEvent.RUN) cur_state = self._fsm.get_current_state() if cur_state != ResourceAgentState.COMMAND: raise Exception() # If active unknown, return to active unknown or command if # possible. Initialize, activate, confirm active unknown, else # confirm idle, run, confirm command. elif state == ResourceAgentState.ACTIVE_UNKNOWN: self._fsm.on_event(ResourceAgentEvent.INITIALIZE) self._fsm.on_event(ResourceAgentEvent.GO_ACTIVE) cur_state = self._fsm.get_current_state() if cur_state == ResourceAgentState.ACTIVE_UNKNOWN: return elif cur_state != ResourceAgentState.IDLE: raise Exception() self._fsm.on_event(ResourceAgentEvent.RUN) cur_state = self._fsm.get_current_state() if cur_state != ResourceAgentState.COMMAND: raise Exception() else: log.error('Instrument agent %s error restoring unhandled state %s, current state %s.', self.id, state, cur_state) except Exception as ex: log.error('Instrument agent %s error restoring state %s, current state %s, exception %s.', self.id, state, cur_state, str(ex)) log.exception('###### Agent restore stack trace:') else: log.debug('Instrument agent %s restored state %s = %s.', self.id, state, cur_state)
def describe_deployments(self, deployments, status_map=None): """ For a list of deployment IDs, generate a list of dicts with information about the deployments suitable for the UI table: [ { 'ui_column': 'string_value'... } , ...] @param deployments list of Deployment resource objects @param status_map map of device id to device status dict @retval list with Deployment info dicts coindexed with argument deployments list """ dep_info_list = [] dep_site_map, dep_dev_map = self.get_deployments_relations( deployments, return_objects=True) site_structure = status_map.get("_system", {}).get( "devices", None) if status_map else None dep_by_id = {} for dep in deployments: dep_info = {} dep_info_list.append(dep_info) dep_by_id[dep._id] = dep_info # Set temporal bounds temp_const = self.get_temporal_constraint(dep) if temp_const: dep_info['start_time'] = time.strftime( TIME_FORMAT, time.gmtime(float(temp_const.start_datetime)) ) if temp_const.start_datetime else "" dep_info['end_time'] = time.strftime( TIME_FORMAT, time.gmtime(float(temp_const.end_datetime)) ) if temp_const.end_datetime else "" else: dep_info['start_time'] = dep_info['end_time'] = "" # Set device information device_obj = dep_dev_map.get(dep._id, None) if device_obj: dep_info['device_id'] = device_obj._id dep_info['device_name'] = device_obj.name dep_info['device_type'] = device_obj.type_ dep_info['device_status'] = status_map.get( device_obj._id, {}).get("agg", DeviceStatusType.STATUS_UNKNOWN) else: log.warn("Deployment %s has no Device", dep._id) dep_info['device_id'] = dep_info['device_name'] = dep_info[ 'device_type'] = dep_info['device_status'] = None # Set site information site_obj = dep_site_map.get(dep._id, None) if site_obj: dep_info['site_id'] = site_obj._id dep_info['site_name'] = site_obj.name dep_info['site_type'] = site_obj.type_ else: log.warn("Deployment %s has no Site", dep._id) dep_info['site_id'] = dep_info['site_name'] = dep_info[ 'site_type'] = None # Set status information if status_map and dep.lcstate == LCS.DEPLOYED: dep_info["is_primary"] = DeviceStatusType.STATUS_OK if site_structure and site_obj and device_obj and site_obj._id in site_structure: try: # Additionally check deployment date now = time.time() if temp_const and ( now < float(temp_const.start_datetime) or now > float(temp_const.end_datetime)): dep_info[ "is_primary"] = DeviceStatusType.STATUS_WARNING # Additionally check assoc between site and device site_deps = site_structure[site_obj._id] if not any(True for st, did, dt in site_deps if did == device_obj._id and dt in (RT.PlatformDevice, RT.InstrumentDevice)): dep_info[ "is_primary"] = DeviceStatusType.STATUS_WARNING except Exception: log.exception("Error determining site structure") else: dep_info["is_primary"] = DeviceStatusType.STATUS_UNKNOWN # Set site parent - seems unused currently, not gonna bother parent_site_obj = None if parent_site_obj: dep_info['parent_site_id'] = parent_site_obj._id dep_info['parent_site_name'] = parent_site_obj.name dep_info[ 'parent_site_description'] = parent_site_obj.description else: #log.warn("Deployment %s has no parent Site", dep._id) dep_info['parent_site_id'] = dep_info[ 'parent_site_name'] = dep_info[ 'parent_site_description'] = None return dep_info_list
def describe_deployments(self, deployments, status_map=None): """ For a list of deployment IDs, generate a list of dicts with information about the deployments suitable for the UI table: [ { 'ui_column': 'string_value'... } , ...] @param deployments list of Deployment resource objects @param status_map map of device id to device status dict @retval list with Deployment info dicts coindexed with argument deployments list """ dep_info_list = [] dep_site_map, dep_dev_map = self.get_deployments_relations(deployments, return_objects=True) site_structure = status_map.get("_system", {}).get("devices", None) if status_map else None dep_by_id = {} for dep in deployments: dep_info = {} dep_info_list.append(dep_info) dep_by_id[dep._id] = dep_info # Set temporal bounds temp_const = self.get_temporal_constraint(dep) if temp_const: dep_info['start_time'] = time.strftime(TIME_FORMAT, time.gmtime( float(temp_const.start_datetime))) if temp_const.start_datetime else "" dep_info['end_time'] = time.strftime(TIME_FORMAT, time.gmtime( float(temp_const.end_datetime))) if temp_const.end_datetime else "" else: dep_info['start_time'] = dep_info['end_time'] = "" # Set device information device_obj = dep_dev_map.get(dep._id, None) if device_obj: dep_info['device_id'] = device_obj._id dep_info['device_name'] = device_obj.name dep_info['device_type'] = device_obj.type_ dep_info['device_status'] = status_map.get(device_obj._id,{}).get("agg", DeviceStatusType.STATUS_UNKNOWN) else: log.warn("Deployment %s has no Device", dep._id) dep_info['device_id'] = dep_info['device_name'] = dep_info['device_type'] = dep_info['device_status'] = None # Set site information site_obj = dep_site_map.get(dep._id, None) if site_obj: dep_info['site_id'] = site_obj._id dep_info['site_name'] = site_obj.name dep_info['site_type'] = site_obj.type_ else: log.warn("Deployment %s has no Site", dep._id) dep_info['site_id'] = dep_info['site_name'] = dep_info['site_type'] = None # Set status information if status_map and dep.lcstate == LCS.DEPLOYED: dep_info["is_primary"] = DeviceStatusType.STATUS_OK if site_structure and site_obj and device_obj and site_obj._id in site_structure: try: # Additionally check deployment date now = time.time() if temp_const and (now < float(temp_const.start_datetime) or now > float(temp_const.end_datetime)): dep_info["is_primary"] = DeviceStatusType.STATUS_WARNING # Additionally check assoc between site and device site_deps = site_structure[site_obj._id] if not any(True for st, did, dt in site_deps if did == device_obj._id and dt in (RT.PlatformDevice, RT.InstrumentDevice)): dep_info["is_primary"] = DeviceStatusType.STATUS_WARNING except Exception: log.exception("Error determining site structure") else: dep_info["is_primary"] = DeviceStatusType.STATUS_UNKNOWN # Set site parent - seems unused currently, not gonna bother parent_site_obj = None if parent_site_obj: dep_info['parent_site_id'] = parent_site_obj._id dep_info['parent_site_name'] = parent_site_obj.name dep_info['parent_site_description'] = parent_site_obj.description else: #log.warn("Deployment %s has no parent Site", dep._id) dep_info['parent_site_id'] = dep_info['parent_site_name'] = dep_info['parent_site_description'] = None return dep_info_list