def test_recorder(self): if (python_version[0] == 2 and python_version[1] < 7): print("Skipping test since we're on {1}".format( "".join(python_version))) pass # Grab an input/output recording, the results of a prior countmatches run recording = os.path.join(self._package_path, 'recordings', 'scpv2', 'Splunk-6.3', 'countmatches.') with gzip.open(recording + 'input.gz', 'rb') as file_1: with io.open(recording + 'output', 'rb') as file_2: ifile = BytesIO(file_1.read()) result = BytesIO(file_2.read()) # Set up the input/output recorders that are under test ifile = Recorder(mktemp(), ifile) try: ofile = Recorder(mktemp(), BytesIO()) try: # Read and then write a line ifile.readline() ofile.write(result.readline()) # Read and then write a block ifile.read() ofile.write(result.read()) # Verify that what we wrote is equivalent to the original recording, the result from a prior # countmatches run self.assertEqual(ofile.getvalue(), result.getvalue()) # Verify that we faithfully recorded the input and output files ifile._recording.close() ofile._recording.close() with gzip.open(ifile._recording.name, 'rb') as file_1: with gzip.open(ofile._recording.name, 'rb') as file_2: self.assertEqual(file_1.read(), ifile._file.getvalue()) self.assertEqual(file_2.read(), ofile._file.getvalue()) finally: ofile._recording.close() os.remove(ofile._recording.name) finally: ifile._recording.close() os.remove(ifile._recording.name) return
def playback(self, path): with open(path, 'rb') as f: test_data = pickle.load(f) self._output = BytesIO() self._recording = test_data['inputs'] self._recording_part = self._recording.popleft() def get(self, method, *args, **kwargs): return self._recording_part[method.__name__].popleft() self.get = MethodType(get, self, self.__class__) def next_part(self): self._recording_part = self._recording.popleft() self.next_part = MethodType(next_part, self, self.__class__) def stop(self): self._test_case.assertEqual(test_data['results'], self._output.getvalue()) self.stop = MethodType(stop, self, self.__class__) return
def test_messages_header(self): @Configuration() class TestMessagesHeaderCommand(SearchCommand): class ConfigurationSettings(SearchCommand.ConfigurationSettings): @classmethod def fix_up(cls, command_class): pass command = TestMessagesHeaderCommand() command._protocol_version = 1 output_buffer = BytesIO() command._record_writer = RecordWriterV1(output_buffer) messages = [(command.write_debug, 'debug_message'), (command.write_error, 'error_message'), (command.write_fatal, 'fatal_message'), (command.write_info, 'info_message'), (command.write_warning, 'warning_message')] for write, message in messages: write(message) command.finish() expected = ('debug_message=debug_message\r\n' 'error_message=error_message\r\n' 'error_message=fatal_message\r\n' 'info_message=info_message\r\n' 'warn_message=warning_message\r\n' '\r\n') self.assertEqual(output_buffer.getvalue().decode('utf-8'), expected) return
def build_command_input(getinfo_metadata, execute_metadata, execute_body): input = ('chunked 1.0,{},0\n{}'.format( len(six.ensure_binary(getinfo_metadata)), getinfo_metadata) + 'chunked 1.0,{},{}\n{}{}'.format( len(six.ensure_binary(execute_metadata)), len(six.ensure_binary(execute_body)), execute_metadata, execute_body)) ifile = BytesIO(six.ensure_binary(input)) if not six.PY2: ifile = TextIOWrapper(ifile) return ifile
def record(self, path): self._output = BytesIO() self._recording = deque() self._recording_part = OrderedDict() self._recording.append(self._recording_part) def get(self, method, *args, **kwargs): result = method(*args, **kwargs) part = self._recording_part key = method.__name__ try: results = part[key] except KeyError: part[key] = results = deque() results.append(result) return result self.get = MethodType(get, self, self.__class__) def next_part(self): part = OrderedDict() self._recording_part = part self._recording.append(part) self.next_part = MethodType(next_part, self, self.__class__) def stop(self): with io.open(path, 'wb') as f: test = OrderedDict( (('inputs', self._recording), ('results', self._output.getvalue()))) pickle.dump(test, f) self.stop = MethodType(stop, self, self.__class__) return
def test_process_scpv2(self): # SearchCommand.process should # 1. Recognize all standard options: metadata = ( '{{' '"action": "getinfo", "preview": false, "searchinfo": {{' '"latest_time": "0",' '"splunk_version": "20150522",' '"username": "******",' '"app": "searchcommands_app",' '"args": [' '"logging_configuration={logging_configuration}",' '"logging_level={logging_level}",' '"record={record}",' '"show_configuration={show_configuration}",' '"required_option_1=value_1",' '"required_option_2=value_2"' '],' '"search": "A%7C%20inputlookup%20tweets%20%7C%20countmatches%20fieldname%3Dword_count%20pattern%3D%22%5Cw%2B%22%20text%20record%3Dt%20%7C%20export%20add_timestamp%3Df%20add_offset%3Dt%20format%3Dcsv%20segmentation%3Draw",' '"earliest_time": "0",' '"session_key": "0JbG1fJEvXrL6iYZw9y7tmvd6nHjTKj7ggaE7a4Jv5R0UIbeYJ65kThn^3hiNeoqzMT_LOtLpVR3Y8TIJyr5bkHUElMijYZ8l14wU0L4n^Oa5QxepsZNUIIQCBm^",' '"owner": "admin",' '"sid": "1433261372.158",' '"splunkd_uri": "https://127.0.0.1:8089",' '"dispatch_dir": {dispatch_dir},' '"raw_args": [' '"logging_configuration={logging_configuration}",' '"logging_level={logging_level}",' '"record={record}",' '"show_configuration={show_configuration}",' '"required_option_1=value_1",' '"required_option_2=value_2"' '],' '"maxresultrows": 10,' '"command": "countmatches"' '}}' '}}') basedir = self._package_directory default_logging_configuration = os.path.join( basedir, 'apps', 'app_with_logging_configuration', 'default', 'logging.conf') dispatch_dir = os.path.join(basedir, 'recordings', 'scpv2', 'Splunk-6.3', 'countmatches.dispatch_dir') logging_configuration = os.path.join(basedir, 'apps', 'app_with_logging_configuration', 'logging.conf') logging_level = 'ERROR' record = False show_configuration = True getinfo_metadata = metadata.format( dispatch_dir=encode_string(dispatch_dir), logging_configuration=encode_string(logging_configuration)[1:-1], logging_level=logging_level, record=('true' if record is True else 'false'), show_configuration=('true' if show_configuration is True else 'false')) execute_metadata = '{"action":"execute","finished":true}' execute_body = 'test\r\ndata\r\n' ifile = StringIO('chunked 1.0,{},0\n{}'.format(len(getinfo_metadata), getinfo_metadata) + 'chunked 1.0,{},{}\n{}{}'.format( len(execute_metadata), len(execute_body), execute_metadata, execute_body)) command = TestCommand() result = BytesIO() argv = ['some-external-search-command.py'] self.assertEqual(command.logging_level, 'WARNING') self.assertIs(command.record, None) self.assertIs(command.show_configuration, None) try: # noinspection PyTypeChecker command.process(argv, ifile, ofile=result) except SystemExit as error: self.fail('Unexpected exception: {}: {}'.format( type(error).__name__, error)) self.assertEqual(command.logging_configuration, logging_configuration) self.assertEqual(command.logging_level, 'ERROR') self.assertEqual(command.record, record) self.assertEqual(command.show_configuration, show_configuration) self.assertEqual(command.required_option_1, 'value_1') self.assertEqual(command.required_option_2, 'value_2') self.assertEqual( 'chunked 1.0,68,0\n' '{"inspector":{"messages":[["INFO","test command configuration: "]]}}\n' 'chunked 1.0,17,23\n' '{"finished":true}test,__mv_test\r\n' 'data,\r\n', result.getvalue().decode('utf-8')) self.assertEqual(command.protocol_version, 2) # 2. Provide access to these properties: # fieldnames # input_header # metadata # search_results_info # service self.assertEqual([], command.fieldnames) command_metadata = command.metadata input_header = command.input_header self.assertIsNone(input_header['allowStream']) self.assertEqual( input_header['infoPath'], os.path.join(command_metadata.searchinfo.dispatch_dir, 'info.csv')) self.assertIsNone(input_header['keywords']) self.assertEqual(input_header['preview'], command_metadata.preview) self.assertIs(input_header['realtime'], False) self.assertEqual(input_header['search'], command_metadata.searchinfo.search) self.assertEqual(input_header['sid'], command_metadata.searchinfo.sid) self.assertEqual(input_header['splunkVersion'], command_metadata.searchinfo.splunk_version) self.assertIsNone(input_header['truncated']) self.assertEqual(command_metadata.preview, input_header['preview']) self.assertEqual(command_metadata.searchinfo.app, 'searchcommands_app') self.assertEqual(command_metadata.searchinfo.args, [ 'logging_configuration=' + logging_configuration, 'logging_level=ERROR', 'record=false', 'show_configuration=true', 'required_option_1=value_1', 'required_option_2=value_2' ]) self.assertEqual(command_metadata.searchinfo.dispatch_dir, os.path.dirname(input_header['infoPath'])) self.assertEqual(command_metadata.searchinfo.earliest_time, 0.0) self.assertEqual(command_metadata.searchinfo.latest_time, 0.0) self.assertEqual(command_metadata.searchinfo.owner, 'admin') self.assertEqual(command_metadata.searchinfo.raw_args, command_metadata.searchinfo.args) self.assertEqual( command_metadata.searchinfo.search, 'A| inputlookup tweets | countmatches fieldname=word_count pattern="\\w+" text record=t | export add_timestamp=f add_offset=t format=csv segmentation=raw' ) self.assertEqual( command_metadata.searchinfo.session_key, '0JbG1fJEvXrL6iYZw9y7tmvd6nHjTKj7ggaE7a4Jv5R0UIbeYJ65kThn^3hiNeoqzMT_LOtLpVR3Y8TIJyr5bkHUElMijYZ8l14wU0L4n^Oa5QxepsZNUIIQCBm^' ) self.assertEqual(command_metadata.searchinfo.sid, '1433261372.158') self.assertEqual(command_metadata.searchinfo.splunk_version, '20150522') self.assertEqual(command_metadata.searchinfo.splunkd_uri, 'https://127.0.0.1:8089') self.assertEqual(command_metadata.searchinfo.username, 'admin') self.assertEqual(command_metadata.searchinfo.maxresultrows, 10) self.assertEqual(command_metadata.searchinfo.command, 'countmatches') command.search_results_info.search_metrics = command.search_results_info.search_metrics.__dict__ command.search_results_info.optional_fields_json = command.search_results_info.optional_fields_json.__dict__ self.maxDiff = None self.assertDictEqual( command.search_results_info.__dict__, { u'is_summary_index': 0, u'bs_thread_count': 1, u'rt_backfill': 0, u'rtspan': '', u'search_StartTime': 1433261392.934936, u'read_raw': 1, u'root_sid': '', u'field_rendering': '', u'query_finished': 1, u'optional_fields_json': {}, u'group_list': '', u'remoteServers': '', u'rt_latest': '', u'remote_log_download_mode': 'disabled', u'reduce_search': '', u'request_finalization': 0, u'auth_token': 'UQZSgWwE2f9oIKrj1QG^kVhW^T_cR4H5Z65bPtMhwlHytS5jFrFYyH^dGzjTusDjVTgoBNeR7bvIzctHF7DrLJ1ANevgDOWEWRvABNj6d_k0koqxw9Io', u'indexed_realtime': 0, u'ppc_bs': '$SPLUNK_HOME/etc', u'drop_count': 0, u'datamodel_map': '', u'search_can_be_event_type': 0, u'search_StartUp_Spent': 0, u'realtime': 0, u'splunkd_uri': 'https://127.0.0.1:8089', u'columnOrder': '', u'kv_store_settings': 'hosts;127.0.0.1:8191\\;;local;127.0.0.1:8191;read_preference;958513E3-8716-4ABF-9559-DA0C9678437F;replica_set_name;958513E3-8716-4ABF-9559-DA0C9678437F;status;ready;', u'label': '', u'summary_maxtimespan': '', u'indexed_realtime_offset': 0, u'sid': 1433261392.159, u'msg': [], u'internal_only': 0, u'summary_id': '', u'orig_search_head': '', u'ppc_app': 'chunked_searchcommands', u'countMap': { u'invocations.dispatch.writeStatus': u'1', u'duration.dispatch.writeStatus': u'2', u'duration.startup.handoff': u'79', u'duration.startup.configuration': u'34', u'invocations.startup.handoff': u'1', u'invocations.startup.configuration': u'1' }, u'is_shc_mode': 0, u'shp_id': '958513E3-8716-4ABF-9559-DA0C9678437F', u'timestamp': 1433261392.936374, u'is_remote_sorted': 0, u'remote_search': '', u'splunkd_protocol': 'https', u'site': '', u'maxevents': 0, u'keySet': '', u'summary_stopped': 0, u'search_metrics': { u'ConsideredEvents': 0, u'ConsideredBuckets': 0, u'TotalSlicesInBuckets': 0, u'EliminatedBuckets': 0, u'DecompressedSlices': 0 }, u'summary_mode': 'all', u'now': 1433261392.0, u'splunkd_port': 8089, u'is_saved_search': 0, u'rtoptions': '', u'search': '| inputlookup random_data max=50000 | sum total=total value1 record=t | export add_timestamp=f add_offset=t format=csv segmentation=raw', u'bundle_version': 0, u'generation_id': 0, u'bs_thread_id': 0, u'is_batch_mode': 0, u'scan_count': 0, u'rt_earliest': '', u'default_group': '*', u'tstats_reduce': '', u'kv_store_additional_settings': 'hosts_guids;958513E3-8716-4ABF-9559-DA0C9678437F\\;;', u'enable_event_stream': 0, u'is_remote': 0, u'is_scheduled': 0, u'sample_ratio': 1, u'ppc_user': '******', u'sample_seed': 0 }) self.assertIsInstance(command.service, Service) self.assertEqual(command.service.authority, command_metadata.searchinfo.splunkd_uri) self.assertEqual(command.service.scheme, command.search_results_info.splunkd_protocol) self.assertEqual(command.service.port, command.search_results_info.splunkd_port) self.assertEqual(command.service.token, command_metadata.searchinfo.session_key) self.assertEqual(command.service.namespace.app, command.metadata.searchinfo.app) self.assertIsNone(command.service.namespace.owner) self.assertIsNone(command.service.namespace.sharing) self.assertEqual(command.protocol_version, 2) # 3. Produce an error message, log a debug message, and exit when invalid standard option values are encountered # Note on loggers # Loggers are global and can't be removed once they're created. We create loggers that are keyed by class name # Each instance of a class thus created gets access to the same logger. We created one in the prior test and # set it's level to ERROR. That level is retained in this test. logging_configuration = 'non-existent-logging.conf' logging_level = 'NON-EXISTENT-LOGGING-LEVEL' record = 'Non-boolean value' show_configuration = 'Non-boolean value' getinfo_metadata = metadata.format( dispatch_dir=encode_string(dispatch_dir), logging_configuration=encode_string(logging_configuration)[1:-1], logging_level=logging_level, record=record, show_configuration=show_configuration) execute_metadata = '{"action":"execute","finished":true}' execute_body = 'test\r\ndata\r\n' ifile = StringIO('chunked 1.0,{},0\n{}'.format(len(getinfo_metadata), getinfo_metadata) + 'chunked 1.0,{},{}\n{}{}'.format( len(execute_metadata), len(execute_body), execute_metadata, execute_body)) command = TestCommand() result = BytesIO() argv = ['test.py'] # noinspection PyTypeChecker self.assertRaises(SystemExit, command.process, argv, ifile, ofile=result) self.assertEqual(command.logging_level, 'ERROR') self.assertEqual(command.record, False) self.assertEqual(command.show_configuration, False) self.assertEqual(command.required_option_1, 'value_1') self.assertEqual(command.required_option_2, 'value_2') self.assertEqual( 'chunked 1.0,287,0\n' '{"inspector":{"messages":[["ERROR","Illegal value: logging_configuration=non-existent-logging.conf"],' '["ERROR","Illegal value: logging_level=NON-EXISTENT-LOGGING-LEVEL"],' '["ERROR","Illegal value: record=Non-boolean value"],' '["ERROR","Illegal value: show_configuration=Non-boolean value"]]}}\n' 'chunked 1.0,17,0\n' '{"finished":true}', result.getvalue().decode('utf-8')) self.assertEqual(command.protocol_version, 2) # 4. Produce an error message, log an error message that includes a traceback, and exit when an exception is # raised during command execution. logging_configuration = os.path.join(basedir, 'apps', 'app_with_logging_configuration', 'logging.conf') logging_level = 'WARNING' record = False show_configuration = False getinfo_metadata = metadata.format( dispatch_dir=encode_string(dispatch_dir), logging_configuration=encode_string(logging_configuration)[1:-1], logging_level=logging_level, record=('true' if record is True else 'false'), show_configuration=('true' if show_configuration is True else 'false')) execute_metadata = '{"action":"execute","finished":true}' execute_body = 'action\r\nraise_exception\r\n' ifile = StringIO('chunked 1.0,{},0\n{}'.format(len(getinfo_metadata), getinfo_metadata) + 'chunked 1.0,{},{}\n{}{}'.format( len(execute_metadata), len(execute_body), execute_metadata, execute_body)) command = TestCommand() result = BytesIO() argv = ['test.py'] try: command.process(argv, ifile, ofile=result) except SystemExit as error: self.assertNotEqual(0, error.code) except BaseException as error: self.fail('{0}: {1}: {2}\n'.format( type(error).__name__, error, result.getvalue().decode('utf-8'))) else: self.fail( 'Expected SystemExit, not a return from TestCommand.process: {}\n' .format(result.getvalue().decode('utf-8'))) self.assertEqual(command.logging_configuration, logging_configuration) self.assertEqual(command.logging_level, logging_level) self.assertEqual(command.record, record) self.assertEqual(command.show_configuration, show_configuration) self.assertEqual(command.required_option_1, 'value_1') self.assertEqual(command.required_option_2, 'value_2') finished = r'\"finished\":true' if six.PY2: inspector = \ r'\"inspector\":\{\"messages\":\[\[\"ERROR\",\"StandardError at \\\".+\\\", line \d+ : test ' \ r'logging_configuration=\\\".+\\\" logging_level=\\\"WARNING\\\" record=\\\"f\\\" ' \ r'required_option_1=\\\"value_1\\\" required_option_2=\\\"value_2\\\" show_configuration=\\\"f\\\"\"\]\]\}' else: inspector = \ r'\"inspector\":\{\"messages\":\[\[\"ERROR\",\"Exception at \\\".+\\\", line \d+ : test ' \ r'logging_configuration=\\\".+\\\" logging_level=\\\"WARNING\\\" record=\\\"f\\\" ' \ r'required_option_1=\\\"value_1\\\" required_option_2=\\\"value_2\\\" show_configuration=\\\"f\\\"\"\]\]\}' self.assertRegexpMatches( result.getvalue().decode('utf-8'), r'^chunked 1.0,2,0\n' r'\{\}\n' r'chunked 1.0,\d+,0\n' r'\{(' + inspector + r',' + finished + r'|' + finished + r',' + inspector + r')\}') self.assertEqual(command.protocol_version, 2) return
def test_process_scpv1(self): # TestCommand.process should complain if supports_getinfo == False # We support dynamic configuration, not static # The exception line number may change, so we're using a regex match instead of a string match expected = re.compile( r'error_message=RuntimeError at ".+search_command\.py", line \d\d\d : Command test appears to be ' r'statically configured for search command protocol version 1 and static configuration is unsupported by ' r'splunklib.searchcommands. Please ensure that default/commands.conf contains this stanza:\n' r'\[test\]\n' r'filename = test.py\n' r'enableheader = true\n' r'outputheader = true\n' r'requires_srinfo = true\n' r'supports_getinfo = true\n' r'supports_multivalues = true\n' r'supports_rawargs = true') argv = [ 'test.py', 'not__GETINFO__or__EXECUTE__', 'option=value', 'fieldname' ] command = TestCommand() result = BytesIO() self.assertRaises(SystemExit, command.process, argv, ofile=result) self.assertRegexpMatches(result.getvalue().decode('UTF-8'), expected) # TestCommand.process should return configuration settings on Getinfo probe argv = [ 'test.py', '__GETINFO__', 'required_option_1=value', 'required_option_2=value' ] command = TestCommand() ifile = StringIO('\n') result = BytesIO() self.assertEqual(str(command.configuration), '') if six.PY2: expected = ( "[(u'clear_required_fields', None, [1]), (u'distributed', None, [2]), (u'generates_timeorder', None, [1]), " "(u'generating', None, [1, 2]), (u'maxinputs', None, [2]), (u'overrides_timeorder', None, [1]), " "(u'required_fields', None, [1, 2]), (u'requires_preop', None, [1]), (u'retainsevents', None, [1]), " "(u'run_in_preview', None, [2]), (u'streaming', None, [1]), (u'streaming_preop', None, [1, 2]), " "(u'type', None, [2])]") else: expected = ( "[('clear_required_fields', None, [1]), ('distributed', None, [2]), ('generates_timeorder', None, [1]), " "('generating', None, [1, 2]), ('maxinputs', None, [2]), ('overrides_timeorder', None, [1]), " "('required_fields', None, [1, 2]), ('requires_preop', None, [1]), ('retainsevents', None, [1]), " "('run_in_preview', None, [2]), ('streaming', None, [1]), ('streaming_preop', None, [1, 2]), " "('type', None, [2])]") self.assertEqual(repr(command.configuration), expected) try: # noinspection PyTypeChecker command.process(argv, ifile, ofile=result) except BaseException as error: self.fail('{0}: {1}: {2}\n'.format( type(error).__name__, error, result.getvalue().decode('UTF-8'))) self.assertEqual( '\r\n\r\n\r\n', result.getvalue().decode( 'UTF-8')) # No message header and no configuration settings ifile = StringIO('\n') result = BytesIO() # We might also put this sort of code into our SearchCommand.prepare override ... configuration = command.configuration # SCP v1/v2 configuration settings configuration.generating = True configuration.required_fields = ['foo', 'bar'] configuration.streaming_preop = 'some streaming command' # SCP v1 configuration settings configuration.clear_required_fields = True configuration.generates_timeorder = True configuration.overrides_timeorder = True configuration.requires_preop = True configuration.retainsevents = True configuration.streaming = True # SCP v2 configuration settings (SCP v1 requires that maxinputs and run_in_preview are set in commands.conf) configuration.distributed = True configuration.maxinputs = 50000 configuration.run_in_preview = True configuration.type = 'streaming' if six.PY2: expected = ( 'clear_required_fields="True", generates_timeorder="True", generating="True", overrides_timeorder="True", ' 'required_fields="[u\'foo\', u\'bar\']", requires_preop="True", retainsevents="True", streaming="True", ' 'streaming_preop="some streaming command"') else: expected = ( 'clear_required_fields="True", generates_timeorder="True", generating="True", overrides_timeorder="True", ' 'required_fields="[\'foo\', \'bar\']", requires_preop="True", retainsevents="True", streaming="True", ' 'streaming_preop="some streaming command"') self.assertEqual(str(command.configuration), expected) if six.PY2: expected = ( "[(u'clear_required_fields', True, [1]), (u'distributed', True, [2]), (u'generates_timeorder', True, [1]), " "(u'generating', True, [1, 2]), (u'maxinputs', 50000, [2]), (u'overrides_timeorder', True, [1]), " "(u'required_fields', [u'foo', u'bar'], [1, 2]), (u'requires_preop', True, [1]), " "(u'retainsevents', True, [1]), (u'run_in_preview', True, [2]), (u'streaming', True, [1]), " "(u'streaming_preop', u'some streaming command', [1, 2]), (u'type', u'streaming', [2])]" ) else: expected = ( "[('clear_required_fields', True, [1]), ('distributed', True, [2]), ('generates_timeorder', True, [1]), " "('generating', True, [1, 2]), ('maxinputs', 50000, [2]), ('overrides_timeorder', True, [1]), " "('required_fields', ['foo', 'bar'], [1, 2]), ('requires_preop', True, [1]), " "('retainsevents', True, [1]), ('run_in_preview', True, [2]), ('streaming', True, [1]), " "('streaming_preop', 'some streaming command', [1, 2]), ('type', 'streaming', [2])]" ) self.assertEqual(repr(command.configuration), expected) try: # noinspection PyTypeChecker command.process(argv, ifile, ofile=result) except BaseException as error: self.fail('{0}: {1}: {2}\n'.format( type(error).__name__, error, result.getvalue().decode('UTF-8'))) result.seek(0) reader = csv.reader(codecs.iterdecode(result, 'UTF-8')) self.assertEqual([], next(reader)) observed = dict(izip(next(reader), next(reader))) self.assertRaises(StopIteration, lambda: next(reader)) expected = { 'clear_required_fields': '1', '__mv_clear_required_fields': '', 'generating': '1', '__mv_generating': '', 'generates_timeorder': '1', '__mv_generates_timeorder': '', 'overrides_timeorder': '1', '__mv_overrides_timeorder': '', 'requires_preop': '1', '__mv_requires_preop': '', 'required_fields': 'foo,bar', '__mv_required_fields': '', 'retainsevents': '1', '__mv_retainsevents': '', 'streaming': '1', '__mv_streaming': '', 'streaming_preop': 'some streaming command', '__mv_streaming_preop': '', } self.assertDictEqual( expected, observed) # No message header and no configuration settings for action in '__GETINFO__', '__EXECUTE__': # TestCommand.process should produce an error record on parser errors argv = [ 'test.py', action, 'required_option_1=value', 'required_option_2=value', 'undefined_option=value', 'fieldname_1', 'fieldname_2' ] command = TestCommand() ifile = StringIO('\n') result = BytesIO() self.assertRaises(SystemExit, command.process, argv, ifile, ofile=result) self.assertTrue( 'error_message=Unrecognized test command option: undefined_option="value"\r\n\r\n', result.getvalue().decode('UTF-8')) # TestCommand.process should produce an error record when required options are missing argv = [ 'test.py', action, 'required_option_2=value', 'fieldname_1' ] command = TestCommand() ifile = StringIO('\n') result = BytesIO() self.assertRaises(SystemExit, command.process, argv, ifile, ofile=result) self.assertTrue( 'error_message=A value for test command option required_option_1 is required\r\n\r\n', result.getvalue().decode('UTF-8')) argv = ['test.py', action, 'fieldname_1'] command = TestCommand() ifile = StringIO('\n') result = BytesIO() self.assertRaises(SystemExit, command.process, argv, ifile, ofile=result) self.assertTrue( 'error_message=Values for these test command options are required: required_option_1, required_option_2' '\r\n\r\n', result.getvalue().decode('UTF-8')) # TestStreamingCommand.process should exit on processing exceptions ifile = StringIO('\naction\r\nraise_error\r\n') argv = ['test.py', '__EXECUTE__'] command = TestStreamingCommand() result = BytesIO() try: # noinspection PyTypeChecker command.process(argv, ifile, ofile=result) except SystemExit as error: self.assertNotEqual(error.code, 0) self.assertRegexpMatches( result.getvalue().decode('UTF-8'), r'^error_message=RuntimeError at ".+", line \d+ : Testing\r\n\r\n$' ) except BaseException as error: self.fail('Expected SystemExit, but caught {}: {}'.format( type(error).__name__, error)) else: self.fail('Expected SystemExit, but no exception was raised') # Command.process should provide access to search results info info_path = os.path.join(self._package_directory, 'recordings', 'scpv1', 'Splunk-6.3', 'countmatches.execute.dispatch_dir', 'externSearchResultsInfo.csv') ifile = StringIO('infoPath:' + info_path + '\n\naction\r\nget_search_results_info\r\n') argv = ['test.py', '__EXECUTE__'] command = TestStreamingCommand() result = BytesIO() try: # noinspection PyTypeChecker command.process(argv, ifile, ofile=result) except BaseException as error: self.fail('Expected no exception, but caught {}: {}'.format( type(error).__name__, error)) else: self.assertRegexpMatches( result.getvalue().decode('UTF-8'), r'^\r\n' r'(' r'data,__mv_data,_serial,__mv__serial\r\n' r'\"\{.*u\'is_summary_index\': 0, .+\}\",,0,' r'|' r'_serial,__mv__serial,data,__mv_data\r\n' r'0,,\"\{.*\'is_summary_index\': 0, .+\}\",' r')' r'\r\n$') # TestStreamingCommand.process should provide access to a service object when search results info is available self.assertIsInstance(command.service, Service) self.assertEqual(command.service.authority, command.search_results_info.splunkd_uri) self.assertEqual(command.service.scheme, command.search_results_info.splunkd_protocol) self.assertEqual(command.service.port, command.search_results_info.splunkd_port) self.assertEqual(command.service.token, command.search_results_info.auth_token) self.assertEqual(command.service.namespace.app, command.search_results_info.ppc_app) self.assertEqual(command.service.namespace.owner, None) self.assertEqual(command.service.namespace.sharing, None) # Command.process should not provide access to search results info or a service object when the 'infoPath' # input header is unavailable ifile = StringIO('\naction\r\nget_search_results_info') argv = ['teststreaming.py', '__EXECUTE__'] command = TestStreamingCommand() # noinspection PyTypeChecker command.process(argv, ifile, ofile=result) self.assertIsNone(command.search_results_info) self.assertIsNone(command.service) return
def test_record_writer_with_random_data(self, save_recording=False): # Confirmed: [minint, maxint) covers the full range of values that xrange allows # RecordWriter writes apps in units of maxresultrows records. Default: 50,0000. # Partial results are written when the record count reaches maxresultrows. writer = RecordWriterV2( BytesIO(), maxresultrows=10) # small for the purposes of this unit test test_data = OrderedDict() fieldnames = [ '_serial', '_time', 'random_bytes', 'random_dict', 'random_integers', 'random_unicode' ] test_data['fieldnames'] = fieldnames test_data['values'] = [] write_record = writer.write_record for serial_number in range(0, 31): values = [ serial_number, time(), random_bytes(), random_dict(), random_integers(), random_unicode() ] record = OrderedDict(izip(fieldnames, values)) #try: write_record(record) #except Exception as error: # self.fail(error) test_data['values'].append(values) # RecordWriter accumulates inspector messages and metrics until maxresultrows are written, a partial result # is produced or we're finished messages = [('debug', random_unicode()), ('error', random_unicode()), ('fatal', random_unicode()), ('info', random_unicode()), ('warn', random_unicode())] test_data['messages'] = messages for message_type, message_text in messages: writer.write_message(message_type, '{}', message_text) metrics = { 'metric-1': SearchMetric(1, 2, 3, 4), 'metric-2': SearchMetric(5, 6, 7, 8) } test_data['metrics'] = metrics for name, metric in six.iteritems(metrics): writer.write_metric(name, metric) self.assertEqual(writer._chunk_count, 0) self.assertEqual(writer._record_count, 31) self.assertEqual(writer.pending_record_count, 31) self.assertGreater(writer._buffer.tell(), 0) self.assertEqual(writer._total_record_count, 0) self.assertEqual(writer.committed_record_count, 0) self.assertListEqual(writer._fieldnames, fieldnames) self.assertListEqual(writer._inspector['messages'], messages) self.assertDictEqual( dict( ifilter(lambda k_v: k_v[0].startswith('metric.'), six.iteritems(writer._inspector))), dict( imap(lambda k_v1: ('metric.' + k_v1[0], k_v1[1]), six.iteritems(metrics)))) writer.flush(finished=True) self.assertEqual(writer._chunk_count, 1) self.assertEqual(writer._record_count, 0) self.assertEqual(writer.pending_record_count, 0) self.assertEqual(writer._buffer.tell(), 0) self.assertEqual(writer._buffer.getvalue(), '') self.assertEqual(writer._total_record_count, 31) self.assertEqual(writer.committed_record_count, 31) self.assertRaises(AssertionError, writer.flush, finished=True, partial=True) self.assertRaises(AssertionError, writer.flush, finished='non-boolean') self.assertRaises(AssertionError, writer.flush, partial='non-boolean') self.assertRaises(AssertionError, writer.flush) # P2 [ ] TODO: For SCPv2 we should follow the finish negotiation protocol. # self.assertRaises(RuntimeError, writer.write_record, {}) self.assertFalse(writer._ofile.closed) self.assertIsNone(writer._fieldnames) self.assertDictEqual(writer._inspector, OrderedDict()) # P2 [ ] TODO: Verify that RecordWriter gives consumers the ability to write partial results by calling # RecordWriter.flush(partial=True). # P2 [ ] TODO: Verify that RecordWriter gives consumers the ability to finish early by calling # RecordWriter.flush(finish=True). return