class TestUnitProtocolCommandDict(TestUnitStringsDict): """ Test cases for instrument driver class. Functions in this class provide instrument driver unit tests and provide a tutorial on use of the driver interface. """ __test__ = True def setUp(self): #self.param_dict = None self.cmd_dict = ProtocolCommandDict() self.cmd_dict.add("cmd1", timeout=60, display_name="Command 1", description="Execute a foo on the instrument", return_type="bool", return_units="Success", return_description="Success (true) or failure (false)", arguments=[CommandArgument( name="coeff", required=True, display_name="coefficient", description="The coefficient to use for calculation", type=CommandDictType.FLOAT, value_description="Should be between 1.97 and 2.34" ), CommandArgument( name="delay", required=False, display_name="delay time", description="The delay time to wait before executing", type=CommandDictType.FLOAT, units="seconds", value_description="Should be between 1.0 and 3.3 in increments of 0.1" ) ] ) # different way of creating things, possibly more clear in some cases # and allows for testing arg and command later self.cmd2_arg1 = CommandArgument(name="trigger", required=True, display_name="sensor trigger", description="The trigger value to use for calculation", type=CommandDictType.INT, value_description="Should be between 1 and 20" ) self.cmd2 = Command("cmd2", display_name="Command 2", description="The second test command", return_type=CommandDictType.INT, return_units="counts", return_description="The number of items encountered during the run.", arguments=[self.cmd2_arg1]) self.cmd_dict.add_command(self.cmd2) self.param_dict = self.cmd_dict # link for ease of parent class operation self.target_arg_schema = """{ "description": "The trigger value to use for calculation", "display_name": "sensor trigger", "required": true, "value": { "description": "Should be between 1 and 20", "type": "int" } }""" self.target_cmd_schema = """{ "arguments": { "trigger": { "description": "The trigger value to use for calculation", "display_name": "sensor trigger", "required": true, "value": { "description": "Should be between 1 and 20", "type": "int" } } }, "description": "The second test command", "display_name": "Command 2", "return": { "description": "The number of items encountered during the run.", "type": "int", "units": "counts" }, "timeout": 10 }""" self.target_schema = """{ "cmd1": { "arguments": { "coeff": { "description": "The coefficient to use for calculation", "display_name": "coefficient", "required": true, "value": { "description": "Should be between 1.97 and 2.34", "type": "float" } }, "delay": { "description": "The delay time to wait before executing", "display_name": "delay time", "required": false, "value": { "description": "Should be between 1.0 and 3.3 in increments of 0.1", "type": "float", "units": "seconds" } } }, "description": "Execute a foo on the instrument", "display_name": "Command 1", "return": { "description": "Success (true) or failure (false)", "type": "bool", "units": "Success" }, "timeout": 60 }, "cmd2": { "arguments": { "trigger": { "description": "The trigger value to use for calculation", "display_name": "sensor trigger", "required": true, "value": { "description": "Should be between 1 and 20", "type": "int" } } }, "description": "The second test command", "display_name": "Command 2", "return": { "description": "The number of items encountered during the run.", "type": "int", "units": "counts" }, "timeout": 10 } }""" self.test_yaml = ''' parameters: { dummy: stuff } commands: { bad_command: { description: "bad command" }, cmd1: { arguments: { coeff: { description: "Cmd1Coeff", display_name: "C1co", value: { description: "C1coDesc", units: "counts", type: "float" } }, }, description: "C1Desc", display_name: "C1", return: { description: "C1Ret", type: "C1RetType", units: "C1RetUnit" }, }, cmd2: { arguments: { trigger: { description: "C2TriggerDesc", display_name: "C2TriggerDisp", value: { description: "C2TriggerValueDesc", type: "C2TriggerType", units: "C2Units" } }, test: { description: "C2TestDesc", display_name: "C2TestDisp", value: { description: "C2TestValueDesc", type: "C2TestType", units: "C2TestUnits" } }, }, description: "C2Desc", display_name: "C2Disp", return: { description: "C2RetDesc", type: "C2RetType", units: "C2RetUnits" }, } } ''' def test_sub_schema_generation(self): result_dict = self.cmd2_arg1.generate_dict() self.assertEqual(json.dumps(result_dict, indent=4, sort_keys=True), self.target_arg_schema) result_dict = self.cmd2.generate_dict() self.assertEqual(json.dumps(result_dict, indent=4, sort_keys=True), self.target_cmd_schema) def test_schema_dict_generation(self): """ Tests that a dict is created that can then be JSONified """ result = self.cmd_dict.generate_dict() json_result = json.dumps(result, indent=4, sort_keys=True) self.assertEqual(json_result, self.target_schema) def test_empty_schema(self): self.cmd_dict = ProtocolCommandDict() result = self.cmd_dict.generate_dict() self.assertEqual(result, {}) def test_argument_exceptions(self): self.assertRaises(InstrumentParameterException, Command, "foo", arguments="bad_arg") self.assertRaises(InstrumentParameterException, Command, "foo", arguments=["bad arg"]) def test_add_get(self): good_cmd = Command(name="some_name") result = self.cmd_dict.get_command("some_name") self.assert_(not isinstance(result, Command)) self.cmd_dict.add_command(good_cmd) result = self.cmd_dict.get_command("some_name") self.assert_(isinstance(result, Command)) # exception cases bad_cmd = Command(name=1) self.assertRaises(InstrumentParameterException, self.cmd_dict.add_command, bad_cmd) self.assertRaises(InstrumentParameterException, self.cmd_dict.add_command, "bad_command") self.assertEqual(self.cmd_dict.get_command(None), None) self.assertEqual(self.cmd_dict.get_command("bad"), None) def _assert_metadata_change(self): new_dict = self.param_dict.generate_dict() log.debug("Generated dictionary: %s", new_dict) self.assertEqual(new_dict["cmd1"][CommandDictKey.DESCRIPTION], "C1Desc") self.assertEqual(new_dict["cmd1"][CommandDictKey.DISPLAY_NAME], "C1") self.assertEqual(new_dict["cmd1"][CommandDictKey.ARGUMENTS]['coeff'][CommandDictKey.DESCRIPTION], "Cmd1Coeff") self.assertEqual(new_dict["cmd1"][CommandDictKey.ARGUMENTS]['coeff'][CommandDictKey.DISPLAY_NAME], "C1co") self.assertEqual(new_dict["cmd1"][CommandDictKey.ARGUMENTS]['coeff'][CommandDictKey.VALUE][CommandDictKey.UNITS], "counts") self.assertEqual(new_dict["cmd1"][CommandDictKey.ARGUMENTS]['coeff'][CommandDictKey.VALUE][CommandDictKey.DESCRIPTION], "C1coDesc") self.assertEqual(new_dict["cmd1"][CommandDictKey.ARGUMENTS]['coeff'][CommandDictKey.VALUE][CommandDictKey.TYPE], "float") # Should come from hard code self.assertEqual(new_dict["cmd1"][CommandDictKey.ARGUMENTS]['delay'][CommandDictKey.DESCRIPTION], "The delay time to wait before executing") self.assertEqual(new_dict["cmd1"][CommandDictKey.ARGUMENTS]['delay'][CommandDictKey.DISPLAY_NAME], "delay time") self.assertEqual(new_dict["cmd1"][CommandDictKey.ARGUMENTS]['delay'][CommandDictKey.VALUE][CommandDictKey.UNITS], "seconds") self.assertEqual(new_dict["cmd1"][CommandDictKey.ARGUMENTS]['delay'][CommandDictKey.VALUE][CommandDictKey.DESCRIPTION], "Should be between 1.0 and 3.3 in increments of 0.1") self.assertEqual(new_dict["cmd1"][CommandDictKey.ARGUMENTS]['delay'][CommandDictKey.VALUE][CommandDictKey.TYPE], "float") # Command 1 return values self.assertEqual(new_dict["cmd1"][CommandDictKey.RETURN][CommandDictKey.DESCRIPTION], "C1Ret") self.assertEqual(new_dict["cmd1"][CommandDictKey.RETURN][CommandDictKey.UNITS], "C1RetUnit") self.assertEqual(new_dict["cmd1"][CommandDictKey.RETURN][CommandDictKey.TYPE], "C1RetType") self.assertEqual(new_dict["cmd2"][CommandDictKey.DESCRIPTION], "C2Desc") self.assertEqual(new_dict["cmd2"][CommandDictKey.DISPLAY_NAME], "C2Disp") self.assertEqual(new_dict["cmd2"][CommandDictKey.ARGUMENTS]['trigger'][CommandDictKey.DESCRIPTION], "C2TriggerDesc") self.assertEqual(new_dict["cmd2"][CommandDictKey.ARGUMENTS]['trigger'][CommandDictKey.DISPLAY_NAME], "C2TriggerDisp") self.assertEqual(new_dict["cmd2"][CommandDictKey.ARGUMENTS]['trigger'][CommandDictKey.VALUE][CommandDictKey.DESCRIPTION], "C2TriggerValueDesc") self.assertEqual(new_dict["cmd2"][CommandDictKey.ARGUMENTS]['trigger'][CommandDictKey.VALUE][CommandDictKey.TYPE], "C2TriggerType") self.assertEqual(new_dict["cmd2"][CommandDictKey.ARGUMENTS]['trigger'][CommandDictKey.VALUE][CommandDictKey.UNITS], "C2Units") # Should come from hard code # Command 2 return values self.assertEqual(new_dict["cmd2"][CommandDictKey.RETURN][CommandDictKey.DESCRIPTION], "C2RetDesc") self.assertEqual(new_dict["cmd2"][CommandDictKey.RETURN][CommandDictKey.UNITS], "C2RetUnits") self.assertEqual(new_dict["cmd2"][CommandDictKey.RETURN][CommandDictKey.TYPE], "C2RetType") # shouldnt be any extra arguments, either self.assertFalse('test' in new_dict["cmd2"][CommandDictKey.ARGUMENTS]) self.assertFalse('bad_command' in new_dict)
class DataSetDriver(object): """ Base class for data set drivers. Provides: - an interface via callback to publish data - an interface via callback to persist driver state - an interface via callback to handle exceptions - an start and stop sampling - a client interface for execute resource Subclasses need to include harvesters and parsers and be specialized to handle the interaction between the two. Configurations should contain keys from the DataSetDriverConfigKey class and should look something like this example (more full documentation in the "Dataset Agent Architecture" page on the OOI wiki): { 'harvester': { 'directory': '/tmp/dsatest', 'pattern': '*.txt', 'frequency': 1, }, 'parser': {} 'driver': { 'records_per_second' 'harvester_polling_interval' 'batched_particle_count' } } """ def __init__(self, config, memento, data_callback, state_callback, exception_callback): self._config = config self._data_callback = data_callback self._state_callback = state_callback self._exception_callback = exception_callback self._memento = memento self._publisher_thread = None self._verify_config() # Updated my set_resource, defaults defined in build_param_dict self._polling_interval = None self._generate_particle_count = None self._particle_count_per_second = None self._param_dict = ProtocolParameterDict() self._cmd_dict = ProtocolCommandDict() self._driver_dict = DriverDict() self._build_command_dict() self._build_driver_dict() self._build_param_dict() def shutdown(self): self.stop_sampling() def start_sampling(self): """ Start a new thread to monitor for data """ self._start_sampling() self._start_publisher_thread() def stop_sampling(self): """ Stop the sampling thread """ log.debug("Stopping driver now") self._stop_sampling() self._stop_publisher_thread() def _start_sampling(self): raise NotImplementedException('virtual method needs to be specialized') def _stop_sampling(self): raise NotImplementedException('virtual method needs to be specialized') def _is_sampling(self): """ Currently the drivers only have two states, command and streaming and all resource commands are common, either start or stop autosample. Therefore we didn't implement an enitre state machine to manage states and commands. If it does get more complex than this we should take the time to implement a state machine to add some flexibility """ raise NotImplementedException('virtual method needs to be specialized') def cmd_dvr(self, cmd, *args, **kwargs): log.warn("DRIVER: cmd_dvr %s", cmd) if cmd == 'execute_resource': resource_cmd = args[0] if resource_cmd == DriverEvent.START_AUTOSAMPLE: return (ResourceAgentState.STREAMING, None) elif resource_cmd == DriverEvent.STOP_AUTOSAMPLE: self.stop_sampling() return (ResourceAgentState.COMMAND, None) else: log.error("Unhandled resource command: %s", resource_cmd) raise elif cmd == 'get_resource_capabilities': return self.get_resource_capabilities() elif cmd == 'set_resource': return self.set_resource(*args, **kwargs) elif cmd == 'get_resource': return self.get_resource(*args, **kwargs) elif cmd == 'get_config_metadata': return self.get_config_metadata(*args, **kwargs) elif cmd == 'disconnect': pass elif cmd == 'initialize': pass else: log.error("Unhandled command: %s", cmd) raise InstrumentStateException("Unhandled command: %s" % cmd) def get_resource_capabilities(self, current_state=True, *args, **kwargs): """ Return driver commands and parameters. @param current_state True to retrieve commands available in current state, otherwise reutrn all commands. @retval list of AgentCapability objects representing the drivers capabilities. @raises NotImplementedException if not implemented by subclass. """ res_params = self._param_dict.get_keys() res_cmds = [DriverEvent.STOP_AUTOSAMPLE, DriverEvent.START_AUTOSAMPLE] if current_state and self._is_sampling(): res_cmds = [DriverEvent.STOP_AUTOSAMPLE] elif current_state and not self._is_sampling(): res_cmds = [DriverEvent.START_AUTOSAMPLE] return [res_cmds, res_params] def set_resource(self, *args, **kwargs): """ Set the driver parameter """ log.trace("start set_resource") try: params = args[0] except IndexError: raise InstrumentParameterException('Set command requires a parameter dict.') log.trace("set_resource: iterate through params: %s", params) for (key, val) in params.iteritems(): if key in [DriverParameter.BATCHED_PARTICLE_COUNT, DriverParameter.RECORDS_PER_SECOND]: if not isinstance(val, int): raise InstrumentParameterException("%s must be an integer" % key) if key in [DriverParameter.PUBLISHER_POLLING_INTERVAL]: if not isinstance(val, (int, float)): raise InstrumentParameterException("%s must be an float" % key) if val <= 0: raise InstrumentParameterException("%s must be > 0" % key) self._param_dict.set_value(key, val) # Set the driver parameters self._generate_particle_count = self._param_dict.get(DriverParameter.BATCHED_PARTICLE_COUNT) self._particle_count_per_second = self._param_dict.get(DriverParameter.RECORDS_PER_SECOND) self._polling_interval = self._param_dict.get(DriverParameter.PUBLISHER_POLLING_INTERVAL) log.trace("Driver Parameters: %s, %s, %s", self._polling_interval, self._particle_count_per_second, self._generate_particle_count) def get_resource(self, *args, **kwargs): """ Get driver parameter """ result = {} try: params = args[0] except IndexError: raise InstrumentParameterException('Set command requires a parameter list.') # If all params requested, retrieve config. if params == DriverParameter.ALL: result = self._param_dict.get_config() # If not all params, confirm a list or tuple of params to retrieve. # Raise if not a list or tuple. # Retrieve each key in the list, raise if any are invalid. else: if not isinstance(params, (list, tuple)): raise InstrumentParameterException('Get argument not a list or tuple.') result = {} for key in params: try: val = self._param_dict.get(key) result[key] = val except KeyError: raise InstrumentParameterException(('%s is not a valid parameter.' % key)) return result def get_config_metadata(self): """ Return the configuration metadata object in JSON format @retval The description of the parameters, commands, and driver info in a JSON string @see https://confluence.oceanobservatories.org/display/syseng/CIAD+MI+SV+Instrument+Driver-Agent+parameter+and+command+metadata+exchange """ log.debug("Getting metadata from driver...") log.debug("Getting metadata dict from protocol...") return_dict = {} return_dict[ConfigMetadataKey.DRIVER] = self._driver_dict.generate_dict() return_dict[ConfigMetadataKey.COMMANDS] = self._cmd_dict.generate_dict() return_dict[ConfigMetadataKey.PARAMETERS] = self._param_dict.generate_dict() return return_dict def _verify_config(self): """ virtual method to verify the supplied driver configuration is value. Must be overloaded in sub classes. raises an ConfigurationException when a configuration error is detected. """ raise NotImplementedException('virtual methond needs to be specialized') def _build_driver_dict(self): """ Populate the driver dictionary with options """ pass def _build_command_dict(self): """ Populate the command dictionary with command. """ self._cmd_dict.add(DriverEvent.START_AUTOSAMPLE, display_name="start autosample") self._cmd_dict.add(DriverEvent.STOP_AUTOSAMPLE, display_name="stop autosample") def _build_param_dict(self): """ Setup three common driver parameters """ self._param_dict.add_parameter( Parameter( DriverParameter.RECORDS_PER_SECOND, int, value=60, type=ParameterDictType.INT, visibility=ParameterDictVisibility.IMMUTABLE, display_name="Records Per Second", description="Number of records to process per second") ) self._param_dict.add_parameter( Parameter( DriverParameter.PUBLISHER_POLLING_INTERVAL, float, value=1, type=ParameterDictType.FLOAT, visibility=ParameterDictVisibility.IMMUTABLE, display_name="Harvester Polling Interval", description="Duration in minutes to wait before checking for new files.") ) self._param_dict.add_parameter( Parameter( DriverParameter.BATCHED_PARTICLE_COUNT, int, value=1, type=ParameterDictType.INT, visibility=ParameterDictVisibility.IMMUTABLE, display_name="Batched Particle Count", description="Number of particles to batch before sending to the agent") ) config = self._config.get(DataSourceConfigKey.DRIVER, {}) log.debug("set_resource on startup with: %s", config) self.set_resource(config) def _start_publisher_thread(self): self._publisher_thread = gevent.spawn(self._publisher_loop) self._publisher_shutdown = False def _stop_publisher_thread(self): log.debug("Signal shutdown") self._publisher_shutdown = True if self._publisher_thread: self._publisher_thread.kill(block=False) log.debug("shutdown complete") def _publisher_loop(self): """ Main loop to listen for new files to parse. Parse them and move on. """ log.info("Starting main publishing loop") try: while(not self._publisher_shutdown): self._poll() gevent.sleep(self._polling_interval) except Exception as e: log.error("Exception in publisher thread: %s", e) self._exception_callback(e) log.debug("publisher thread detected shutdown request") def _poll(self): raise NotImplementedException('virtual methond needs to be specialized') def _new_file_exception(self): raise NotImplementedException('virtual methond needs to be specialized')
class DataSetDriver(object): """ Base class for data set drivers. Provides: - an interface via callback to publish data - an interface via callback to persist driver state - an interface via callback to handle exceptions - an start and stop sampling - a client interface for execute resource Subclasses need to include harvesters and parsers and be specialized to handle the interaction between the two. Configurations should contain keys from the DataSetDriverConfigKey class and should look something like this example (more full documentation in the "Dataset Agent Architecture" page on the OOI wiki): { 'harvester': { 'directory': '/tmp/dsatest', 'storage_directory': '/tmp/stored_dsatest', 'pattern': '*.txt', 'frequency': 1, 'file_mod_wait_time': 30, }, 'parser': {} 'driver': { 'records_per_second' 'harvester_polling_interval' 'batched_particle_count' } } """ def __init__(self, config, memento, data_callback, state_callback, event_callback, exception_callback): self._config = copy.deepcopy(config) self._data_callback = data_callback self._state_callback = state_callback self._event_callback = event_callback self._exception_callback = exception_callback self._memento = memento self._publisher_thread = None self._verify_config() # Updated my set_resource, defaults defined in build_param_dict self._polling_interval = None self._generate_particle_count = None self._particle_count_per_second = None self._resource_id = None self._param_dict = ProtocolParameterDict() self._cmd_dict = ProtocolCommandDict() self._driver_dict = DriverDict() self._build_command_dict() self._build_driver_dict() self._build_param_dict() def shutdown(self): self.stop_sampling() def start_sampling(self): """ Start a new thread to monitor for data """ self._start_sampling() self._start_publisher_thread() def stop_sampling(self): """ Stop the sampling thread """ log.debug("Stopping sampling and publisher now") self._stop_sampling() self._stop_publisher_thread() def _start_sampling(self): raise NotImplementedException('virtual method needs to be specialized') def _stop_sampling(self): raise NotImplementedException('virtual method needs to be specialized') def _is_sampling(self): """ Currently the drivers only have two states, command and streaming and all resource commands are common, either start or stop autosample. Therefore we didn't implement an enitre state machine to manage states and commands. If it does get more complex than this we should take the time to implement a state machine to add some flexibility """ raise NotImplementedException('virtual method needs to be specialized') def cmd_dvr(self, cmd, *args, **kwargs): log.warn("DRIVER: cmd_dvr %s", cmd) if cmd == 'execute_resource': resource_cmd = args[0] if resource_cmd == DriverEvent.START_AUTOSAMPLE: return (ResourceAgentState.STREAMING, None) elif resource_cmd == DriverEvent.STOP_AUTOSAMPLE: self.stop_sampling() return (ResourceAgentState.COMMAND, None) else: log.error("Unhandled resource command: %s", resource_cmd) raise elif cmd == 'get_resource_capabilities': return self.get_resource_capabilities() elif cmd == 'set_resource': return self.set_resource(*args, **kwargs) elif cmd == 'get_resource': return self.get_resource(*args, **kwargs) elif cmd == 'get_config_metadata': return self.get_config_metadata(*args, **kwargs) elif cmd == 'disconnect': pass elif cmd == 'initialize': pass else: log.error("Unhandled command: %s", cmd) raise InstrumentStateException("Unhandled command: %s" % cmd) def get_resource_capabilities(self, current_state=True, *args, **kwargs): """ Return driver commands and parameters. @param current_state True to retrieve commands available in current state, otherwise reutrn all commands. @retval list of AgentCapability objects representing the drivers capabilities. @raises NotImplementedException if not implemented by subclass. """ res_params = self._param_dict.get_keys() res_cmds = [DriverEvent.STOP_AUTOSAMPLE, DriverEvent.START_AUTOSAMPLE] if current_state and self._is_sampling(): res_cmds = [DriverEvent.STOP_AUTOSAMPLE] elif current_state and not self._is_sampling(): res_cmds = [DriverEvent.START_AUTOSAMPLE] return [res_cmds, res_params] def set_resource(self, *args, **kwargs): """ Set the driver parameter """ log.trace("start set_resource") try: params = args[0] except IndexError: raise InstrumentParameterException('Set command requires a parameter dict.') log.trace("set_resource: iterate through params: %s", params) for (key, val) in params.iteritems(): if key in [DriverParameter.BATCHED_PARTICLE_COUNT, DriverParameter.RECORDS_PER_SECOND]: if not isinstance(val, int): raise InstrumentParameterException("%s must be an integer" % key) if key in [DriverParameter.PUBLISHER_POLLING_INTERVAL]: if not isinstance(val, (int, float)): raise InstrumentParameterException("%s must be an float" % key) if val <= 0: raise InstrumentParameterException("%s must be > 0" % key) self._param_dict.set_value(key, val) # Set the driver parameters self._generate_particle_count = self._param_dict.get(DriverParameter.BATCHED_PARTICLE_COUNT) self._particle_count_per_second = self._param_dict.get(DriverParameter.RECORDS_PER_SECOND) self._polling_interval = self._param_dict.get(DriverParameter.PUBLISHER_POLLING_INTERVAL) log.trace("Driver Parameters: %s, %s, %s", self._polling_interval, self._particle_count_per_second, self._generate_particle_count) def get_resource(self, *args, **kwargs): """ Get driver parameter """ result = {} try: params = args[0] except IndexError: raise InstrumentParameterException('Set command requires a parameter list.') # If all params requested, retrieve config. if params == DriverParameter.ALL: result = self._param_dict.get_config() # If not all params, confirm a list or tuple of params to retrieve. # Raise if not a list or tuple. # Retrieve each key in the list, raise if any are invalid. else: if not isinstance(params, (list, tuple)): raise InstrumentParameterException('Get argument not a list or tuple.') result = {} for key in params: try: val = self._param_dict.get(key) result[key] = val except KeyError: raise InstrumentParameterException(('%s is not a valid parameter.' % key)) return result def get_config_metadata(self): """ Return the configuration metadata object in JSON format @retval The description of the parameters, commands, and driver info in a JSON string @see https://confluence.oceanobservatories.org/display/syseng/CIAD+MI+SV+Instrument+Driver-Agent+parameter+and+command+metadata+exchange """ log.debug("Getting metadata from driver...") log.debug("Getting metadata dict from protocol...") return_dict = {} return_dict[ConfigMetadataKey.DRIVER] = self._driver_dict.generate_dict() return_dict[ConfigMetadataKey.COMMANDS] = self._cmd_dict.generate_dict() return_dict[ConfigMetadataKey.PARAMETERS] = self._param_dict.generate_dict() return return_dict def _verify_config(self): """ virtual method to verify the supplied driver configuration is value. Must be overloaded in sub classes. raises an ConfigurationException when a configuration error is detected. """ raise NotImplementedException('virtual methond needs to be specialized') def _build_driver_dict(self): """ Populate the driver dictionary with options """ pass def _build_command_dict(self): """ Populate the command dictionary with command. """ self._cmd_dict.add(DriverEvent.START_AUTOSAMPLE, display_name="start autosample") self._cmd_dict.add(DriverEvent.STOP_AUTOSAMPLE, display_name="stop autosample") def _build_param_dict(self): """ Setup three common driver parameters """ self._param_dict.add_parameter( Parameter( DriverParameter.RECORDS_PER_SECOND, int, value=60, type=ParameterDictType.INT, visibility=ParameterDictVisibility.IMMUTABLE, display_name="Records Per Second", description="Number of records to process per second") ) self._param_dict.add_parameter( Parameter( DriverParameter.PUBLISHER_POLLING_INTERVAL, float, value=1, type=ParameterDictType.FLOAT, visibility=ParameterDictVisibility.IMMUTABLE, display_name="Harvester Polling Interval", description="Duration in minutes to wait before checking for new files.") ) self._param_dict.add_parameter( Parameter( DriverParameter.BATCHED_PARTICLE_COUNT, int, value=1, type=ParameterDictType.INT, visibility=ParameterDictVisibility.IMMUTABLE, display_name="Batched Particle Count", description="Number of particles to batch before sending to the agent") ) config = self._config.get(DataSourceConfigKey.DRIVER, {}) log.debug("set_resource on startup with: %s", config) self.set_resource(config) def _start_publisher_thread(self): self._publisher_thread = gevent.spawn(self._publisher_loop) self._publisher_shutdown = False def _stop_publisher_thread(self): log.debug("Signal shutdown") self._publisher_shutdown = True if self._publisher_thread: self._publisher_thread.kill(block=False) log.debug("shutdown complete") def _publisher_loop(self): """ Main loop to listen for new files to parse. Parse them and move on. """ log.info("Starting main publishing loop") try: while(not self._publisher_shutdown): self._poll() gevent.sleep(self._polling_interval) except Exception as e: log.error("Exception in publisher thread (resource id: %s): %s", self._resource_id, traceback.format_exc(e)) self._exception_callback(e) log.debug("publisher thread detected shutdown request") def _poll(self): raise NotImplementedException('virtual methond needs to be specialized') def _new_file_exception(self): raise NotImplementedException('virtual methond needs to be specialized') def _sample_exception_callback(self, exception): """ Publish an event when a sample exception is detected """ self._event_callback(event_type="ResourceAgentErrorEvent", error_msg = "%s" % exception) def _raise_new_file_event(self, name): """ Raise a ResourceAgentIOEvent when a new file is detected. Add file stats to the payload of the event. """ s = os.stat(name) checksum = "" with open(name, 'rb') as filehandle: checksum = hashlib.md5(filehandle.read()).hexdigest() stats = { 'name': name, 'size': s.st_size, 'mod': s.st_mtime, 'md5_checksum': checksum } self._event_callback(event_type="ResourceAgentIOEvent", source_type="new file", stats=stats)