def __init__(self, driver_event): super(Protocol, self).__init__(driver_event) self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent, ProtocolEvent.ENTER, ProtocolEvent.EXIT) handlers = { ProtocolState.UNKNOWN: ( (ProtocolEvent.ENTER, self._handler_unknown_enter), (ProtocolEvent.EXIT, self._handler_unknown_exit), (ProtocolEvent.DISCOVER, self._handler_unknown_discover), ), ProtocolState.COMMAND: ( (ProtocolEvent.ENTER, self._handler_command_enter), (ProtocolEvent.EXIT, self._handler_command_exit), (ProtocolEvent.GET, self._handler_get), (ProtocolEvent.SET, self._handler_set), (ProtocolEvent.START_AUTOSAMPLE, self._handler_command_start_autosample), ), ProtocolState.AUTOSAMPLE: ( (ProtocolEvent.ENTER, self._handler_autosample_enter), (ProtocolEvent.EXIT, self._handler_autosample_exit), (ProtocolEvent.GET, self._handler_get), (ProtocolEvent.FLUSH, self._flush), (ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop_autosample), ), ProtocolState.WRITE_ERROR: ( (ProtocolEvent.ENTER, self._handler_write_error_enter), (ProtocolEvent.EXIT, self._handler_write_error_exit), (ProtocolEvent.CLEAR_WRITE_ERROR, self._handler_clear_write_error), )} for state in handlers: for event, handler in handlers[state]: self._protocol_fsm.add_handler(state, event, handler) # Build dictionaries for driver schema self._build_param_dict() self._build_command_dict() self._build_driver_dict() # State state machine in UNKNOWN state. self._protocol_fsm.start(ProtocolState.UNKNOWN) self._logs = {} self._filled_logs = [] self._pickle_cache = [] # persistent store, cannot initialize until startup config has been applied # since we need the address for postgres self._persistent_store = None # lock for flush actions to prevent writing or altering the data files # during flush self._lock = Lock() self._pktid = 0
class Protocol(InstrumentProtocol): #__metaclass__ = meta def __init__(self, driver_event): super(Protocol, self).__init__(driver_event) self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent, ProtocolEvent.ENTER, ProtocolEvent.EXIT) handlers = { ProtocolState.UNKNOWN: ( (ProtocolEvent.ENTER, self._handler_unknown_enter), (ProtocolEvent.EXIT, self._handler_unknown_exit), (ProtocolEvent.DISCOVER, self._handler_unknown_discover), ), ProtocolState.COMMAND: ( (ProtocolEvent.ENTER, self._handler_command_enter), (ProtocolEvent.EXIT, self._handler_command_exit), (ProtocolEvent.GET, self._handler_get), (ProtocolEvent.SET, self._handler_set), (ProtocolEvent.START_AUTOSAMPLE, self._handler_command_start_autosample), ), ProtocolState.AUTOSAMPLE: ( (ProtocolEvent.ENTER, self._handler_autosample_enter), (ProtocolEvent.EXIT, self._handler_autosample_exit), (ProtocolEvent.GET, self._handler_get), (ProtocolEvent.FLUSH, self._flush), (ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop_autosample), ), ProtocolState.WRITE_ERROR: ( (ProtocolEvent.ENTER, self._handler_write_error_enter), (ProtocolEvent.EXIT, self._handler_write_error_exit), (ProtocolEvent.CLEAR_WRITE_ERROR, self._handler_clear_write_error), )} for state in handlers: for event, handler in handlers[state]: self._protocol_fsm.add_handler(state, event, handler) # Build dictionaries for driver schema self._build_param_dict() self._build_command_dict() self._build_driver_dict() # State state machine in UNKNOWN state. self._protocol_fsm.start(ProtocolState.UNKNOWN) self._logs = {} self._filled_logs = [] self._pickle_cache = [] # persistent store, cannot initialize until startup config has been applied # since we need the address for postgres self._persistent_store = None # lock for flush actions to prevent writing or altering the data files # during flush self._lock = Lock() self._pktid = 0 def _filter_capabilities(self, events): """ Filter a list of events to only include valid capabilities @param events: list of events to be filtered @return: list of filtered events """ return [x for x in events if Capability.has(x)] def _build_command_dict(self): """ Populate the command dictionary with commands. """ self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="Start Autosample") self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="Stop Autosample") self._cmd_dict.add(Capability.GET, display_name="Get") self._cmd_dict.add(Capability.SET, display_name="Set") self._cmd_dict.add(Capability.DISCOVER, display_name="Discover") self._cmd_dict.add(Capability.CLEAR_WRITE_ERROR, display_name="Clear Write Error") def _build_param_dict(self): self._param_dict.add(Parameter.REFDES, 'NA', str, str, visibility=ParameterDictVisibility.IMMUTABLE, startup_param=True, display_name='Reference Designator', description='Reference Designator for this driver', type=ParameterDictType.STRING) self._param_dict.add(Parameter.SOURCE_REGEX, 'NA', str, str, visibility=ParameterDictVisibility.IMMUTABLE, startup_param=True, display_name='Source Filter Regex', description='Filter sources to be processed from the ORB', type=ParameterDictType.STRING, value_description='Regular expression') self._param_dict.add(Parameter.FLUSH_INTERVAL, 'NA', str, str, visibility=ParameterDictVisibility.IMMUTABLE, startup_param=True, display_name='Flush Interval', description='Interval after which all records are flushed to disk', type=ParameterDictType.INT, value_description='Interval, in seconds', units=Units.SECOND) self._param_dict.add(Parameter.DB_ADDR, 'NA', str, str, visibility=ParameterDictVisibility.IMMUTABLE, startup_param=True, default_value='localhost', display_name='Database Address', description='Postgres database IP address or hostname', type=ParameterDictType.STRING, value_description='IP address or hostname') self._param_dict.add(Parameter.DB_PORT, 'NA', str, str, visibility=ParameterDictVisibility.IMMUTABLE, startup_param=True, default_value=5432, display_name='Database Port', description='Postgres database port number', type=ParameterDictType.INT, value_description='Integer port number (default 5432)') self._param_dict.add(Parameter.FILE_LOCATION, 'NA', str, str, visibility=ParameterDictVisibility.IMMUTABLE, startup_param=True, default_value="./antelope_data", display_name='File Location', description='Root file path of the packet data files', type=ParameterDictType.STRING, value_description='String representing the packet data root file path') def _build_driver_dict(self): """ Populate the driver dictionary with options """ self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, False) def _build_persistent_dict(self): name = 'antelope' refdes = self._param_dict.get(Parameter.REFDES) host = self._param_dict.get(Parameter.DB_ADDR) port = self._param_dict.get(Parameter.DB_PORT) self._persistent_store = PersistentStoreDict(name, refdes, host=host, port=port) if not 'pktid' in self._persistent_store: self._persistent_store['pktid'] = ORBOLDEST def _handler_set(self, *args, **kwargs): pass def _update_params(self, *args, **kwargs): pass def _set_params(self, *args, **kwargs): """ Set various parameters @param args: arglist, should contain a dictionary of parameters/values to be set """ try: params = args[0] except IndexError: raise InstrumentParameterException('Set command requires a parameter dict.') self._verify_not_readonly(*args, **kwargs) old_config = self._param_dict.get_config() # all constraints met or no constraints exist, set the values for key, value in params.iteritems(): self._param_dict.set_value(key, value) new_config = self._param_dict.get_config() if not old_config == new_config: self._driver_event(DriverAsyncEvent.CONFIG_CHANGE) # Set the base directory for the packet data file location. PacketLog.base_dir = os.path.join(self._param_dict.get(Parameter.FILE_LOCATION), self._param_dict.get(Parameter.REFDES)) def _flush(self, close_all=False): log.info('flush') particles = [] with self._lock: log.info('got lock') if close_all: self._filled_logs.extend(self._logs.values()) self._logs = {} for _log in self._logs.itervalues(): try: _log.flush() except InstrumentProtocolException as ex: # Ensure the current logs are clear to prevent residual data from being flushed. self._driver_event(DriverAsyncEvent.ERROR, ex) self._logs = {} self._filled_logs = [] return ProtocolState.WRITE_ERROR, (ResourceAgentState.STOPPED, None) particles.append(AntelopeMetadataParticle(_log)) for _log in self._filled_logs: try: _log.flush() except InstrumentProtocolException as ex: # Ensure the current logs are clear to prevent residual data from being flushed. self._driver_event(DriverAsyncEvent.ERROR, ex) self._logs = {} self._filled_logs = [] return ProtocolState.WRITE_ERROR, (ResourceAgentState.STOPPED, None) particles.append(AntelopeMetadataParticle(_log)) _log.data = [] self._filled_logs = [] log.info('updating persistent store') self._persistent_store['pktid'] = self._pktid for particle in particles: self._driver_event(DriverAsyncEvent.SAMPLE, particle.generate()) return None, None def _orbstart(self): self._connection._command_port_agent('orbselect %s' % self._param_dict.get(Parameter.SOURCE_REGEX)) self._connection._command_port_agent('orbseek %s' % self._persistent_store['pktid']) self._connection._command_port_agent('orbstart') def _orbstop(self): self._connection._command_port_agent('orbstop') def stop_scheduled_job(self, schedule_job): """ Remove the scheduled job @param schedule_job scheduling job. """ if self._scheduler is not None: try: self._remove_scheduler(schedule_job) except KeyError: log.warn("_remove_scheduler could not find %s", schedule_job) def start_scheduled_job(self, param, schedule_job, protocol_event): """ Add a scheduled job """ self.stop_scheduled_job(schedule_job) val = self._param_dict.get(param) try: seconds = int(val) except ValueError: raise InstrumentParameterException('Bad interval. Cannot parse %r as integer' % val) if seconds > 0: config = { DriverConfigKey.SCHEDULER: { schedule_job: { DriverSchedulerConfigKey.TRIGGER: { DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.INTERVAL, DriverSchedulerConfigKey.SECONDS: seconds } } } } self.set_init_params(config) self._add_scheduler_event(schedule_job, protocol_event) def got_data(self, port_agent_packet): data_length = port_agent_packet.get_data_length() data_type = port_agent_packet.get_header_type() if data_type == PortAgentPacket.PICKLED_FROM_INSTRUMENT: self._pickle_cache.append(port_agent_packet.get_data()) if data_length != 65519: data = pickle.loads(''.join(self._pickle_cache)) self._pickle_cache = [] self._bin_data(data) else: raise InstrumentProtocolException('Received unpickled data from port agent') def got_raw(self, port_agent_packet): pass def _get_bin(self, packet): rate_map = { 1: 86400 * 7, # 1 week 8: 86400, # 1 day 40: 86400, # 1 day 200: 86400, # 1 day 64000: 60 * 5, # 5 minutes 256000: 60, # 1 minute } start_time = packet['time'] rate = packet['samprate'] bin_size = rate_map.get(rate, 60) bin_value = int(start_time/bin_size) bin_start = bin_value * bin_size bin_end = (bin_value + 1) * bin_size return bin_start, bin_end def _bin_data(self, packet): key = '%s.%s.%s.%s' % (packet['net'], packet.get('location', ''), packet.get('sta', ''), packet['chan']) start, end = self._get_bin(packet) with self._lock: self._pktid = packet['pktid'] if key not in self._logs: self._logs[key] = PacketLog.from_packet(packet, end) try: while True: packet = self._logs[key].add_packet(packet) if packet is None: break # residual, we need a new bin # log is complete, move to holding list until next flush self._filled_logs.append(self._logs[key]) del self._logs[key] # create the new log... start, end = self._get_bin(packet) self._logs[key] = PacketLog.from_packet(packet, end) except GapException: # non-contiguous data detected, close this log and open a new one self._filled_logs.append(self._logs[key]) del self._logs[key] # create the new log self._logs[key] = PacketLog.from_packet(packet, end) self._logs[key].add_packet(packet) ######################################################################## # UNKNOWN handlers. ######################################################################## def _handler_unknown_enter(self, *args, **kwargs): """ Enter unknown state. """ # Tell driver superclass to send a state change event. # Superclass will query the state. self._driver_event(DriverAsyncEvent.STATE_CHANGE) def _handler_unknown_exit(self, *args, **kwargs): """ Exit unknown state. """ def _handler_unknown_discover(self, *args, **kwargs): """ Discover current state; always COMMAND. @return protocol_state, agent_state """ return ProtocolState.COMMAND, ResourceAgentState.IDLE ######################################################################## # COMMAND handlers. ######################################################################## def _handler_command_enter(self, *args, **kwargs): """ Enter command state. @throws InstrumentTimeoutException if the device cannot be woken. @throws InstrumentProtocolException if the update commands and not recognized. """ self._init_params() # We can't build the persistent dict until parameters are applied, so build it here if self._persistent_store is None: self._build_persistent_dict() self._driver_event(DriverAsyncEvent.STATE_CHANGE) def _handler_command_exit(self, *args, **kwargs): """ Exit command state. """ def _handler_command_start_autosample(self, *args, **kwargs): """ Switch into autosample mode. @return next_state, (next_agent_state, result) if successful. """ result = None # Ensure the current logs are clear to prevent residual data from being flushed. self._logs = {} self._filled_logs = [] self._orbstart() next_state = ProtocolState.AUTOSAMPLE next_agent_state = ResourceAgentState.STREAMING return next_state, (next_agent_state, result) ###################################################### # AUTOSAMPLE handlers ###################################################### def _handler_autosample_enter(self, *args, **kwargs): """ Enter autosample state. """ self.start_scheduled_job(Parameter.FLUSH_INTERVAL, ScheduledJob.FLUSH, ProtocolEvent.FLUSH) self._driver_event(DriverAsyncEvent.STATE_CHANGE) def _handler_autosample_exit(self, *args, **kwargs): """ Exit autosample state. """ self._orbstop() self.stop_scheduled_job(ScheduledJob.FLUSH) def _handler_autosample_stop_autosample(self, *args, **kwargs): """ Stop autosample and switch back to command mode. @return next_state, (next_agent_state, result) if successful. incorrect prompt received. """ result = None states = self._flush(True) if states != (None, None): next_state, next_agent_state = states else: next_state = ProtocolState.COMMAND next_agent_state = ResourceAgentState.COMMAND return next_state, (next_agent_state, result) ###################################################### # WRITE_ERROR handlers ###################################################### def _handler_write_error_enter(self, *args, **kwargs): """ Enter write error state. """ # Tell driver superclass to send a state change event. # Superclass will query the state. self._driver_event(DriverAsyncEvent.STATE_CHANGE) def _handler_write_error_exit(self, *args, **kwargs): """ Exit write error state. """ pass def _handler_clear_write_error(self, *args, **kwargs): """ Clear the WRITE_ERROR state by transitioning to the COMMAND state. @return next_state, (next_agent_state, result) """ return ProtocolState.COMMAND, (ResourceAgentState.COMMAND, None)