コード例 #1
0
    def _compare_chunks(self, expected, output, time_sensitive=True):

        expected = expected.strip()
        output = output.strip()

        if time_sensitive:
            self.assertEqual(len(expected), len(output))
            compare_csv_files = self._compare_csv_files_time_sensitive
        else:
            compare_csv_files = self._compare_csv_files_time_insensitive

        chunks_1 = self._load_chunks(StringIO(expected))
        chunks_2 = self._load_chunks(StringIO(output))

        self.assertEqual(len(chunks_1), len(chunks_2))
        n = 0

        for chunk_1, chunk_2 in izip(chunks_1, chunks_2):
            self.assertDictEqual(
                chunk_1.metadata, chunk_2.metadata,
                'Chunk {0}: metadata error: "{1}" != "{2}"'.format(
                    n, chunk_1.metadata, chunk_2.metadata))
            compare_csv_files(chunk_1.body, chunk_2.body)
            n += 1

        return
コード例 #2
0
    def test_successful_validation(self):
        """Check that successful validation yield no text and a 0 exit value."""

        # Override abstract methods
        class NewScript(Script):
            def get_scheme(self):
                return None

            def validate_input(self, definition):
                # always succeed...
                return

            def stream_events(self, inputs, ew):
                # unused
                return

        script = NewScript()

        out = StringIO()
        err = StringIO()
        ew = EventWriter(out, err)

        args = [TEST_SCRIPT_PATH, "--validate-arguments"]

        return_value = script.run_script(args, ew,
                                         data_open("data/validation.xml"))

        self.assertEqual("", err.getvalue())
        self.assertEqual("", out.getvalue())
        self.assertEqual(0, return_value)
コード例 #3
0
    def _compare_csv_files_time_sensitive(self, expected, output):

        self.assertEqual(len(expected), len(output))

        skip_first_row = expected[0:2] == '\r\n'
        expected = StringIO(expected)
        output = StringIO(output)
        line_number = 1

        if skip_first_row:
            self.assertEqual(expected.readline(), output.readline())
            line_number += 1

        expected = csv.DictReader(expected)
        output = csv.DictReader(output)

        for expected_row in expected:
            output_row = next(output)
            self.assertDictEqual(
                expected_row, output_row,
                'Error on line {0}: expected {1}, not {2}'.format(
                    line_number, expected_row, output_row))
            line_number += 1

        self.assertRaises(StopIteration, output.next)
        return
コード例 #4
0
    def playback(self, path):

        with open(path, 'rb') as f:
            test_data = pickle.load(f)

        self._output = StringIO()
        self._recording = test_data['inputs']
        self._recording_part = self._recording.popleft()

        def get(self, method, *args, **kwargs):
            return self._recording_part[method.__name__].popleft()

        self.get = MethodType(get, self, self.__class__)

        def next_part(self):
            self._recording_part = self._recording.popleft()

        self.next_part = MethodType(next_part, self, self.__class__)

        def stop(self):
            self._test_case.assertEqual(test_data['results'],
                                        self._output.getvalue())

        self.stop = MethodType(stop, self, self.__class__)
        return
コード例 #5
0
    def _compare_csv_files_time_insensitive(self, expected, output):

        skip_first_row = expected[0:2] == '\r\n'
        expected = StringIO(expected)
        output = StringIO(output)
        line_number = 1

        if skip_first_row:
            self.assertEqual(expected.readline(), output.readline())
            line_number += 1

        expected = csv.DictReader(expected)
        output = csv.DictReader(output)

        for expected_row in expected:
            output_row = next(output)

            try:
                timestamp = float(output_row['_time'])
                datetime.fromtimestamp(timestamp)
            except BaseException as error:
                self.fail(error)
            else:
                output_row['_time'] = expected_row['_time']

            self.assertDictEqual(
                expected_row, output_row,
                'Error on line {0}: expected {1}, not {2}'.format(
                    line_number, expected_row, output_row))

            line_number += 1

        self.assertRaises(StopIteration, output.next)
        return
コード例 #6
0
    def test_recorder(self):

        if (python_version[0] == 2 and python_version[1] < 7):
            print("Skipping test since we're on {1}".format("".join(python_version)))
            pass

        # Grab an input/output recording, the results of a prior countmatches run

        recording = os.path.join(self._package_path, 'recordings', 'scpv2', 'Splunk-6.3', 'countmatches.')

        with gzip.open(recording + 'input.gz', 'rb') as file_1:
            with io.open(recording + 'output', 'rb') as file_2:
                ifile = StringIO(file_1.read())
                result = StringIO(file_2.read())

        # Set up the input/output recorders that are under test

        ifile = Recorder(mktemp(), ifile)

        try:
            ofile = Recorder(mktemp(), StringIO())

            try:
                # Read and then write a line
                ifile.readline()
                ofile.write(result.readline())

                # Read and then write a block
                ifile.read()
                ofile.write(result.read())

                # Verify that what we wrote is equivalent to the original recording, the result from a prior
                # countmatches run
                self.assertEqual(ofile.getvalue(), result.getvalue())

                # Verify that we faithfully recorded the input and output files
                ifile._recording.close()
                ofile._recording.close()

                with gzip.open(ifile._recording.name, 'rb') as file_1:
                    with gzip.open(ofile._recording.name, 'rb') as file_2:
                        self.assertEqual(file_1.read(), ifile._file.getvalue())
                        self.assertEqual(file_2.read(), ofile._file.getvalue())

            finally:
                ofile._recording.close()
                os.remove(ofile._recording.name)

        finally:
            ifile._recording.close()
            os.remove(ifile._recording.name)

        return
コード例 #7
0
    def test_messages_header(self):
        @Configuration()
        class TestMessagesHeaderCommand(SearchCommand):
            class ConfigurationSettings(SearchCommand.ConfigurationSettings):
                @classmethod
                def fix_up(cls, command_class):
                    pass

        command = TestMessagesHeaderCommand()
        command._protocol_version = 1
        output_buffer = StringIO()
        command._record_writer = RecordWriterV1(output_buffer)

        messages = [(command.write_debug, 'debug_message'),
                    (command.write_error, 'error_message'),
                    (command.write_fatal, 'fatal_message'),
                    (command.write_info, 'info_message'),
                    (command.write_warning, 'warning_message')]

        for write, message in messages:
            write(message)

        command.finish()

        expected = ('debug_message=debug_message\r\n'
                    'error_message=error_message\r\n'
                    'error_message=fatal_message\r\n'
                    'info_message=info_message\r\n'
                    'warn_message=warning_message\r\n'
                    '\r\n')

        self.assertEquals(output_buffer.getvalue(), expected)
        return
コード例 #8
0
ファイル: server.py プロジェクト: zzz221129/splunk-sdk-python
    def make_request(self, url, method, data, headers):
        self.log_message("%s: %s", method, url)

        try:
            # Make the request
            request = urllib.Request(url, data, headers)
            request.get_method = lambda: method
            response = urllib.urlopen(request)

            # We were successful, so send the response code
            self.send_response(response.code, message=response.msg)
            for key, value in iteritems(dict(response.headers)):
                # Optionally log the headers
                #self.log_message("%s: %s" % (key, value))

                self.send_header(key, value)

            # Send the cross-domain headers
            self.send_header("Access-Control-Allow-Origin", "*")
            self.send_header("Access-Control-Allow-Methods",
                             "PUT, POST, GET, DELETE, OPTIONS")
            self.send_header("Access-Control-Allow-Headers",
                             "X-Redirect-URL, Authorization")

            # We are done with the headers
            self.end_headers()

            # Copy the response to the output
            self.copyfile(response, self.wfile)
        except urllib.HTTPError as e:
            # On errors, log the response code and message
            self.log_message("Code: %s (%s)", e.code, e.msg)

            for key, value in iteritems(dict(e.hdrs)):
                # On errors, we always log the headers
                self.log_message("%s: %s", key, value)

            response_text = e.fp.read()
            response_file = StringIO(response_text)

            # On errors, we also log the response text
            self.log_message("Response: %s", response_text)

            # Send the error response code
            self.send_response(e.code, message=e.msg)

            # Send the cross-domain headers
            self.send_header("Access-Control-Allow-Origin", "*")
            self.send_header("Access-Control-Allow-Methods",
                             "PUT, POST, GET, DELETE, OPTIONS")
            self.send_header("Access-Control-Allow-Headers",
                             "X-Redirect-URL, Authorization")

            # Send the other headers
            self.send_header("Content-Type", self.error_content_type)
            self.send_header('Connection', 'close')
            self.end_headers()

            # Finally, send the error itself
            self.copyfile(response_file, self.wfile)
コード例 #9
0
    def record(self, path):

        self._output = StringIO()
        self._recording = deque()
        self._recording_part = OrderedDict()
        self._recording.append(self._recording_part)

        def get(self, method, *args, **kwargs):
            result = method(*args, **kwargs)
            part = self._recording_part
            key = method.__name__
            try:
                results = part[key]
            except KeyError:
                part[key] = results = deque()
            results.append(result)
            return result

        self.get = MethodType(get, self, self.__class__)

        def next_part(self):
            part = OrderedDict()
            self._recording_part = part
            self._recording.append(part)

        self.next_part = MethodType(next_part, self, self.__class__)

        def stop(self):
            with io.open(path, 'wb') as f:
                test = OrderedDict((('inputs', self._recording), ('results', self._output.getvalue())))
                pickle.dump(test, f)

        self.stop = MethodType(stop, self, self.__class__)
        return
コード例 #10
0
    def __init__(self, ofile, maxresultrows=None):
        self._maxresultrows = 50000 if maxresultrows is None else maxresultrows

        self._ofile = set_binary_mode(ofile)
        self._fieldnames = None
        self._buffer = StringIO()

        self._writer = csv.writer(self._buffer, dialect=CsvDialect)
        self._writerow = self._writer.writerow
        self._finished = False
        self._flushed = False

        self._inspector = OrderedDict()
        self._chunk_count = 0
        self._pending_record_count = 0
        self._committed_record_count = 0
コード例 #11
0
    def test_error_on_script_with_null_scheme(self):
        """A script that returns a null scheme should generate no output on
        stdout and an error on stderr saying that it the scheme was null."""

        # Override abstract methods
        class NewScript(Script):
            def get_scheme(self):
                return None

            def stream_events(self, inputs, ew):
                # not used
                return

        script = NewScript()

        out = BytesIO()
        err = BytesIO()
        ew = EventWriter(out, err)

        in_stream = StringIO()

        args = [TEST_SCRIPT_PATH, "--scheme"]
        return_value = script.run_script(args, ew, in_stream)

        self.assertEqual(b"", out.getvalue())
        self.assertEqual(
            b"FATAL Modular input script returned a null scheme.\n",
            err.getvalue())
        self.assertNotEqual(0, return_value)
コード例 #12
0
 def test_event_without_enough_fields_fails(self):
     """Check that events without data throw an error"""
     with self.assertRaises(ValueError):
         event = Event()
         stream = StringIO()
         event.write_to(stream)
     self.assertTrue(True)
コード例 #13
0
    def assertInfoEqual(self, output, expected):
        reader = csv.reader(StringIO(output))
        self.assertEqual([], next(reader))
        fields = next(reader)
        values = next(reader)
        self.assertRaises(StopIteration, reader.next)
        output = dict(izip(fields, values))

        reader = csv.reader(StringIO(expected))
        self.assertEqual([], next(reader))
        fields = next(reader)
        values = next(reader)
        self.assertRaises(StopIteration, reader.next)
        expected = dict(izip(fields, values))

        self.assertDictEqual(expected, output)
コード例 #14
0
    def _execute_chunk_v2(self, process, chunk):
        metadata, body = chunk

        if len(body) <= 0:
            return

        records = self._read_csv_records(StringIO(body))
        self._record_writer.write_records(process(records))
コード例 #15
0
    def _execute_chunk_v2(self, process, chunk):
        metadata, body = chunk

        if len(body) <= 0 and not self._allow_empty_input:
            raise ValueError(
                "No records found to process. Set allow_empty_input=True in dispatch function to move forward "
                "with empty records.")

        records = self._read_csv_records(StringIO(body))
        self._record_writer.write_records(process(records))
コード例 #16
0
    def _records_protocol_v2(self, ifile):
        istream = self._as_binary_stream(ifile)

        while True:
            result = self._read_chunk(istream)

            if not result:
                return

            metadata, body = result
            action = getattr(metadata, 'action', None)

            if action != 'execute':
                raise RuntimeError(
                    'Expected execute action, not {}'.format(action))

            finished = getattr(metadata, 'finished', False)
            self._record_writer.is_flushed = False

            if len(body) > 0:
                reader = csv.reader(StringIO(body), dialect=CsvDialect)

                try:
                    fieldnames = next(reader)
                except StopIteration:
                    return

                mv_fieldnames = dict([(name, name[len('__mv_'):])
                                      for name in fieldnames
                                      if name.startswith('__mv_')])

                if len(mv_fieldnames) == 0:
                    for values in reader:
                        yield OrderedDict(izip(fieldnames, values))
                else:
                    for values in reader:
                        record = OrderedDict()
                        for fieldname, value in izip(fieldnames, values):
                            if fieldname.startswith('__mv_'):
                                if len(value) > 0:
                                    record[mv_fieldnames[
                                        fieldname]] = self._decode_list(value)
                            elif fieldname not in record:
                                record[fieldname] = value
                        yield record

            if finished:
                return

            self.flush()
コード例 #17
0
    def record(self, path):

        self._output = StringIO()
        self._recording = deque()
        self._recording_part = OrderedDict()
        self._recording.append(self._recording_part)

        def get(self, method, *args, **kwargs):
            result = method(*args, **kwargs)
            part = self._recording_part
            key = method.__name__
            try:
                results = part[key]
            except KeyError:
                part[key] = results = deque()
            results.append(result)
            return result

        self.get = MethodType(get, self, self.__class__)

        def next_part(self):
            part = OrderedDict()
            self._recording_part = part
            self._recording.append(part)

        self.next_part = MethodType(next_part, self, self.__class__)

        def stop(self):
            with io.open(path, 'wb') as f:
                test = OrderedDict(
                    (('inputs', self._recording), ('results',
                                                   self._output.getvalue())))
                pickle.dump(test, f)

        self.stop = MethodType(stop, self, self.__class__)
        return
コード例 #18
0
ファイル: internals.py プロジェクト: bawood/TA-DUOSecurity2FA
    def __init__(self, ofile, maxresultrows=None):
        self._maxresultrows = 50000 if maxresultrows is None else maxresultrows

        self._ofile = ofile
        self._fieldnames = None
        self._buffer = StringIO()

        self._writer = csv.writer(self._buffer, dialect=CsvDialect)
        self._writerow = self._writer.writerow
        self._finished = False
        self._flushed = False

        self._inspector = OrderedDict()
        self._chunk_count = 0
        self._record_count = 0
        self._total_record_count = 0
コード例 #19
0
    def playback(self, path):

        with open(path, 'rb') as f:
            test_data = pickle.load(f)

        self._output = StringIO()
        self._recording = test_data['inputs']
        self._recording_part = self._recording.popleft()

        def get(self, method, *args, **kwargs):
            return self._recording_part[method.__name__].popleft()

        self.get = MethodType(get, self, self.__class__)

        def next_part(self):
            self._recording_part = self._recording.popleft()

        self.next_part = MethodType(next_part, self, self.__class__)

        def stop(self):
            self._test_case.assertEqual(test_data['results'], self._output.getvalue())

        self.stop = MethodType(stop, self, self.__class__)
        return
コード例 #20
0
 def format(self, value):
     output = StringIO()
     writer = csv.writer(output, List.Dialect)
     writer.writerow(value)
     value = output.getvalue()
     return value[:-1]
コード例 #21
0
class TestRecorder(object):
    def __init__(self, test_case):

        self._test_case = test_case
        self._output = None
        self._recording = None
        self._recording_part = None

        def _not_implemented(self):
            raise NotImplementedError(
                'class {} is not in playback or record mode'.format(
                    self.__class__.__name__))

        self.get = self.next_part = self.stop = MethodType(
            _not_implemented, self, self.__class__)
        return

    @property
    def output(self):
        return self._output

    def playback(self, path):

        with open(path, 'rb') as f:
            test_data = pickle.load(f)

        self._output = StringIO()
        self._recording = test_data['inputs']
        self._recording_part = self._recording.popleft()

        def get(self, method, *args, **kwargs):
            return self._recording_part[method.__name__].popleft()

        self.get = MethodType(get, self, self.__class__)

        def next_part(self):
            self._recording_part = self._recording.popleft()

        self.next_part = MethodType(next_part, self, self.__class__)

        def stop(self):
            self._test_case.assertEqual(test_data['results'],
                                        self._output.getvalue())

        self.stop = MethodType(stop, self, self.__class__)
        return

    def record(self, path):

        self._output = StringIO()
        self._recording = deque()
        self._recording_part = OrderedDict()
        self._recording.append(self._recording_part)

        def get(self, method, *args, **kwargs):
            result = method(*args, **kwargs)
            part = self._recording_part
            key = method.__name__
            try:
                results = part[key]
            except KeyError:
                part[key] = results = deque()
            results.append(result)
            return result

        self.get = MethodType(get, self, self.__class__)

        def next_part(self):
            part = OrderedDict()
            self._recording_part = part
            self._recording.append(part)

        self.next_part = MethodType(next_part, self, self.__class__)

        def stop(self):
            with io.open(path, 'wb') as f:
                test = OrderedDict(
                    (('inputs', self._recording), ('results',
                                                   self._output.getvalue())))
                pickle.dump(test, f)

        self.stop = MethodType(stop, self, self.__class__)
        return
コード例 #22
0
    def test_record_writer_with_recordings(self):

        cls = self.__class__
        method = cls.test_record_writer_with_recordings
        base_path = os.path.join(self._recordings_path, '.'.join(
            (cls.__name__, method.__name__)))

        for input_file in iglob(base_path + '*.input.gz'):

            with gzip.open(input_file, 'rb') as ifile:
                test_data = pickle.load(ifile)

            writer = RecordWriterV2(
                StringIO(),
                maxresultrows=10)  # small for the purposes of this unit test
            write_record = writer.write_record
            fieldnames = test_data['fieldnames']

            for values in test_data['values']:
                record = OrderedDict(izip(fieldnames, values))
                try:
                    write_record(record)
                except Exception as error:
                    self.fail(error)

            for message_type, message_text in test_data['messages']:
                writer.write_message(message_type, '{}', message_text)

            for name, metric in six.iteritems(test_data['metrics']):
                writer.write_metric(name, metric)

            writer.flush(finished=True)

            # Read expected data

            expected_path = os.path.splitext(
                os.path.splitext(input_file)[0])[0] + '.output'

            with io.open(expected_path, 'rb') as ifile:
                expected = ifile.read()

            expected = self._load_chunks(StringIO(expected))

            # Read observed data

            ifile = writer._ofile
            ifile.seek(0)

            observed = self._load_chunks(ifile)

            # Write observed data (as an aid to diagnostics)

            observed_path = expected_path + '.observed'
            observed_value = ifile.getvalue()

            with io.open(observed_path, 'wb') as ifile:
                ifile.write(observed_value)

            self._compare_chunks(observed, expected)

        return
コード例 #23
0
    def test_record_writer_with_random_data(self, save_recording=False):

        # Confirmed: [minint, maxint) covers the full range of values that xrange allows

        # RecordWriter writes apps in units of maxresultrows records. Default: 50,0000.
        # Partial results are written when the record count reaches maxresultrows.

        writer = RecordWriterV2(
            StringIO(),
            maxresultrows=10)  # small for the purposes of this unit test
        test_data = OrderedDict()

        fieldnames = [
            '_serial', '_time', 'random_bytes', 'random_dict',
            'random_integers', 'random_unicode'
        ]
        test_data['fieldnames'] = fieldnames
        test_data['values'] = []

        write_record = writer.write_record

        for serial_number in range(0, 31):
            values = [
                serial_number,
                time(),
                random_bytes(),
                random_dict(),
                random_integers(),
                random_unicode()
            ]
            record = OrderedDict(izip(fieldnames, values))
            #try:
            write_record(record)
            #except Exception as error:
            #    self.fail(error)
            test_data['values'].append(values)

        # RecordWriter accumulates inspector messages and metrics until maxresultrows are written, a partial result
        # is produced or we're finished

        messages = [('debug', random_unicode()), ('error', random_unicode()),
                    ('fatal', random_unicode()), ('info', random_unicode()),
                    ('warn', random_unicode())]

        test_data['messages'] = messages

        for message_type, message_text in messages:
            writer.write_message(message_type, '{}', message_text)

        metrics = {
            'metric-1': SearchMetric(1, 2, 3, 4),
            'metric-2': SearchMetric(5, 6, 7, 8)
        }

        test_data['metrics'] = metrics

        for name, metric in six.iteritems(metrics):
            writer.write_metric(name, metric)

        self.assertEqual(writer._chunk_count, 3)
        self.assertEqual(writer._record_count, 1)
        self.assertGreater(writer._buffer.tell(), 0)
        self.assertEqual(writer._total_record_count, 30)
        self.assertListEqual(writer._fieldnames, fieldnames)
        self.assertListEqual(writer._inspector['messages'], messages)

        self.assertDictEqual(
            dict(
                ifilter(lambda k_v: k_v[0].startswith('metric.'),
                        six.iteritems(writer._inspector))),
            dict(
                imap(lambda k_v1: ('metric.' + k_v1[0], k_v1[1]),
                     six.iteritems(metrics))))

        writer.flush(finished=True)

        self.assertEqual(writer._chunk_count, 4)
        self.assertEqual(writer._record_count, 0)
        self.assertEqual(writer._buffer.tell(), 0)
        self.assertEqual(writer._buffer.getvalue(), '')
        self.assertEqual(writer._total_record_count, 31)

        self.assertRaises(AssertionError,
                          writer.flush,
                          finished=True,
                          partial=True)
        self.assertRaises(AssertionError, writer.flush, finished='non-boolean')
        self.assertRaises(AssertionError, writer.flush, partial='non-boolean')
        self.assertRaises(AssertionError, writer.flush)

        self.assertRaises(RuntimeError, writer.write_record, {})

        self.assertFalse(writer._ofile.closed)
        self.assertIsNone(writer._fieldnames)
        self.assertDictEqual(writer._inspector, OrderedDict())

        # P2 [ ] TODO: Verify that RecordWriter gives consumers the ability to write partial results by calling
        # RecordWriter.flush(partial=True).

        # P2 [ ] TODO: Verify that RecordWriter gives consumers the ability to finish early by calling
        # RecordWriter.flush(finish=True).

        if save_recording:

            cls = self.__class__
            method = cls.test_record_writer_with_recordings
            base_path = os.path.join(
                self._recordings_path, '.'.join(
                    (cls.__name__, method.__name__, six.text_type(time()))))

            with gzip.open(base_path + '.input.gz', 'wb') as f:
                pickle.dump(test_data, f)

            with open(base_path + '.output', 'wb') as f:
                f.write(writer._ofile.getvalue())

        return
コード例 #24
0
    def test_process_scpv1(self):

        # TestCommand.process should complain if supports_getinfo == False
        # We support dynamic configuration, not static

        # The exception line number may change, so we're using a regex match instead of a string match

        expected = re.compile(
            r'error_message=RuntimeError at ".+search_command\.py", line \d\d\d : Command test appears to be '
            r'statically configured for search command protocol version 1 and static configuration is unsupported by '
            r'splunklib.searchcommands. Please ensure that default/commands.conf contains this stanza:\n'
            r'\[test\]\n'
            r'filename = test.py\n'
            r'enableheader = true\n'
            r'outputheader = true\n'
            r'requires_srinfo = true\n'
            r'supports_getinfo = true\n'
            r'supports_multivalues = true\n'
            r'supports_rawargs = true')

        argv = [
            'test.py', 'not__GETINFO__or__EXECUTE__', 'option=value',
            'fieldname'
        ]
        command = TestCommand()
        result = StringIO()

        self.assertRaises(SystemExit, command.process, argv, ofile=result)
        self.assertRegexpMatches(result.getvalue(), expected)

        # TestCommand.process should return configuration settings on Getinfo probe

        argv = [
            'test.py', '__GETINFO__', 'required_option_1=value',
            'required_option_2=value'
        ]
        command = TestCommand()
        ifile = StringIO('\n')
        result = StringIO()

        self.assertEqual(str(command.configuration), '')

        if six.PY2:
            expected = (
                "[(u'clear_required_fields', None, [1]), (u'distributed', None, [2]), (u'generates_timeorder', None, [1]), "
                "(u'generating', None, [1, 2]), (u'maxinputs', None, [2]), (u'overrides_timeorder', None, [1]), "
                "(u'required_fields', None, [1, 2]), (u'requires_preop', None, [1]), (u'retainsevents', None, [1]), "
                "(u'run_in_preview', None, [2]), (u'streaming', None, [1]), (u'streaming_preop', None, [1, 2]), "
                "(u'type', None, [2])]")
        else:
            expected = (
                "[('clear_required_fields', None, [1]), ('distributed', None, [2]), ('generates_timeorder', None, [1]), "
                "('generating', None, [1, 2]), ('maxinputs', None, [2]), ('overrides_timeorder', None, [1]), "
                "('required_fields', None, [1, 2]), ('requires_preop', None, [1]), ('retainsevents', None, [1]), "
                "('run_in_preview', None, [2]), ('streaming', None, [1]), ('streaming_preop', None, [1, 2]), "
                "('type', None, [2])]")

        self.assertEqual(repr(command.configuration), expected)

        try:
            # noinspection PyTypeChecker
            command.process(argv, ifile, ofile=result)
        except BaseException as error:
            self.fail('{0}: {1}: {2}\n'.format(
                type(error).__name__, error, result.getvalue()))

        self.assertEqual('\r\n\r\n\r\n', result.getvalue()
                         )  # No message header and no configuration settings

        ifile = StringIO('\n')
        result = StringIO()

        # We might also put this sort of code into our SearchCommand.prepare override ...

        configuration = command.configuration

        # SCP v1/v2 configuration settings
        configuration.generating = True
        configuration.required_fields = ['foo', 'bar']
        configuration.streaming_preop = 'some streaming command'

        # SCP v1 configuration settings
        configuration.clear_required_fields = True
        configuration.generates_timeorder = True
        configuration.overrides_timeorder = True
        configuration.requires_preop = True
        configuration.retainsevents = True
        configuration.streaming = True

        # SCP v2 configuration settings (SCP v1 requires that maxinputs and run_in_preview are set in commands.conf)
        configuration.distributed = True
        configuration.maxinputs = 50000
        configuration.run_in_preview = True
        configuration.type = 'streaming'

        if six.PY2:
            expected = (
                'clear_required_fields="True", generates_timeorder="True", generating="True", overrides_timeorder="True", '
                'required_fields="[u\'foo\', u\'bar\']", requires_preop="True", retainsevents="True", streaming="True", '
                'streaming_preop="some streaming command"')
        else:
            expected = (
                'clear_required_fields="True", generates_timeorder="True", generating="True", overrides_timeorder="True", '
                'required_fields="[\'foo\', \'bar\']", requires_preop="True", retainsevents="True", streaming="True", '
                'streaming_preop="some streaming command"')
        self.assertEqual(str(command.configuration), expected)

        if six.PY2:
            expected = (
                "[(u'clear_required_fields', True, [1]), (u'distributed', True, [2]), (u'generates_timeorder', True, [1]), "
                "(u'generating', True, [1, 2]), (u'maxinputs', 50000, [2]), (u'overrides_timeorder', True, [1]), "
                "(u'required_fields', [u'foo', u'bar'], [1, 2]), (u'requires_preop', True, [1]), "
                "(u'retainsevents', True, [1]), (u'run_in_preview', True, [2]), (u'streaming', True, [1]), "
                "(u'streaming_preop', u'some streaming command', [1, 2]), (u'type', u'streaming', [2])]"
            )
        else:
            expected = (
                "[('clear_required_fields', True, [1]), ('distributed', True, [2]), ('generates_timeorder', True, [1]), "
                "('generating', True, [1, 2]), ('maxinputs', 50000, [2]), ('overrides_timeorder', True, [1]), "
                "('required_fields', ['foo', 'bar'], [1, 2]), ('requires_preop', True, [1]), "
                "('retainsevents', True, [1]), ('run_in_preview', True, [2]), ('streaming', True, [1]), "
                "('streaming_preop', 'some streaming command', [1, 2]), ('type', 'streaming', [2])]"
            )

        self.assertEqual(repr(command.configuration), expected)

        try:
            # noinspection PyTypeChecker
            command.process(argv, ifile, ofile=result)
        except BaseException as error:
            self.fail('{0}: {1}: {2}\n'.format(
                type(error).__name__, error, result.getvalue()))

        result.seek(0)
        reader = csv.reader(result)
        self.assertEqual([], next(reader))
        observed = dict(izip(next(reader), next(reader)))
        self.assertRaises(StopIteration, lambda: next(reader))

        expected = {
            'clear_required_fields': '1',
            '__mv_clear_required_fields': '',
            'generating': '1',
            '__mv_generating': '',
            'generates_timeorder': '1',
            '__mv_generates_timeorder': '',
            'overrides_timeorder': '1',
            '__mv_overrides_timeorder': '',
            'requires_preop': '1',
            '__mv_requires_preop': '',
            'required_fields': 'foo,bar',
            '__mv_required_fields': '',
            'retainsevents': '1',
            '__mv_retainsevents': '',
            'streaming': '1',
            '__mv_streaming': '',
            'streaming_preop': 'some streaming command',
            '__mv_streaming_preop': '',
        }

        self.assertDictEqual(
            expected,
            observed)  # No message header and no configuration settings

        for action in '__GETINFO__', '__EXECUTE__':

            # TestCommand.process should produce an error record on parser errors

            argv = [
                'test.py', action, 'required_option_1=value',
                'required_option_2=value', 'undefined_option=value',
                'fieldname_1', 'fieldname_2'
            ]

            command = TestCommand()
            ifile = StringIO('\n')
            result = StringIO()

            self.assertRaises(SystemExit,
                              command.process,
                              argv,
                              ifile,
                              ofile=result)
            self.assertTrue(
                'error_message=Unrecognized test command option: undefined_option="value"\r\n\r\n',
                result.getvalue())

            # TestCommand.process should produce an error record when required options are missing

            argv = [
                'test.py', action, 'required_option_2=value', 'fieldname_1'
            ]
            command = TestCommand()
            ifile = StringIO('\n')
            result = StringIO()

            self.assertRaises(SystemExit,
                              command.process,
                              argv,
                              ifile,
                              ofile=result)

            self.assertTrue(
                'error_message=A value for test command option required_option_1 is required\r\n\r\n',
                result.getvalue())

            argv = ['test.py', action, 'fieldname_1']
            command = TestCommand()
            ifile = StringIO('\n')
            result = StringIO()

            self.assertRaises(SystemExit,
                              command.process,
                              argv,
                              ifile,
                              ofile=result)

            self.assertTrue(
                'error_message=Values for these test command options are required: required_option_1, required_option_2'
                '\r\n\r\n', result.getvalue())

        # TestStreamingCommand.process should exit on processing exceptions

        ifile = StringIO('\naction\r\nraise_error\r\n')
        argv = ['test.py', '__EXECUTE__']
        command = TestStreamingCommand()
        result = StringIO()

        try:
            # noinspection PyTypeChecker
            command.process(argv, ifile, ofile=result)
        except SystemExit as error:
            self.assertNotEqual(error.code, 0)
            self.assertRegexpMatches(
                result.getvalue(),
                r'^error_message=RuntimeError at ".+", line \d+ : Testing\r\n\r\n$'
            )
        except BaseException as error:
            self.fail('Expected SystemExit, but caught {}: {}'.format(
                type(error).__name__, error))
        else:
            self.fail('Expected SystemExit, but no exception was raised')

        # Command.process should provide access to search results info
        info_path = os.path.join(self._package_directory, 'recordings',
                                 'scpv1', 'Splunk-6.3',
                                 'countmatches.execute.dispatch_dir',
                                 'externSearchResultsInfo.csv')

        ifile = StringIO('infoPath:' + info_path +
                         '\n\naction\r\nget_search_results_info\r\n')
        argv = ['test.py', '__EXECUTE__']
        command = TestStreamingCommand()
        result = StringIO()

        try:
            # noinspection PyTypeChecker
            command.process(argv, ifile, ofile=result)
        except BaseException as error:
            self.fail('Expected no exception, but caught {}: {}'.format(
                type(error).__name__, error))
        else:
            self.assertRegexpMatches(
                result.getvalue(), r'^\r\n'
                r'('
                r'data,__mv_data,_serial,__mv__serial\r\n'
                r'"\{.*u\'is_summary_index\': 0, .+\}",,0,'
                r'|'
                r'_serial,__mv__serial,data,__mv_data\r\n'
                r'0,,"\{.*u\'is_summary_index\': 0, .+\}",'
                r')'
                r'\r\n$')

        # TestStreamingCommand.process should provide access to a service object when search results info is available

        self.assertIsInstance(command.service, Service)

        self.assertEqual(command.service.authority,
                         command.search_results_info.splunkd_uri)

        self.assertEqual(command.service.scheme,
                         command.search_results_info.splunkd_protocol)

        self.assertEqual(command.service.port,
                         command.search_results_info.splunkd_port)

        self.assertEqual(command.service.token,
                         command.search_results_info.auth_token)

        self.assertEqual(command.service.namespace.app,
                         command.search_results_info.ppc_app)

        self.assertEqual(command.service.namespace.owner, None)
        self.assertEqual(command.service.namespace.sharing, None)

        # Command.process should not provide access to search results info or a service object when the 'infoPath'
        # input header is unavailable

        ifile = StringIO('\naction\r\nget_search_results_info')
        argv = ['teststreaming.py', '__EXECUTE__']
        command = TestStreamingCommand()

        # noinspection PyTypeChecker
        command.process(argv, ifile, ofile=result)

        self.assertIsNone(command.search_results_info)
        self.assertIsNone(command.service)

        return
コード例 #25
0
    def test_process_scpv2(self):

        # SearchCommand.process should

        # 1. Recognize all standard options:

        metadata = (
            '{{'
                '"action": "getinfo", "preview": false, "searchinfo": {{'
                    '"latest_time": "0",'
                    '"splunk_version": "20150522",'
                    '"username": "******",'
                    '"app": "searchcommands_app",'
                    '"args": ['
                        '"logging_configuration={logging_configuration}",'
                        '"logging_level={logging_level}",'
                        '"record={record}",'
                        '"show_configuration={show_configuration}",'
                        '"required_option_1=value_1",'
                        '"required_option_2=value_2"'
                    '],'
                    '"search": "A%7C%20inputlookup%20tweets%20%7C%20countmatches%20fieldname%3Dword_count%20pattern%3D%22%5Cw%2B%22%20text%20record%3Dt%20%7C%20export%20add_timestamp%3Df%20add_offset%3Dt%20format%3Dcsv%20segmentation%3Draw",'
                    '"earliest_time": "0",'
                    '"session_key": "0JbG1fJEvXrL6iYZw9y7tmvd6nHjTKj7ggaE7a4Jv5R0UIbeYJ65kThn^3hiNeoqzMT_LOtLpVR3Y8TIJyr5bkHUElMijYZ8l14wU0L4n^Oa5QxepsZNUIIQCBm^",'
                    '"owner": "admin",'
                    '"sid": "1433261372.158",'
                    '"splunkd_uri": "https://127.0.0.1:8089",'
                    '"dispatch_dir": {dispatch_dir},'
                    '"raw_args": ['
                        '"logging_configuration={logging_configuration}",'
                        '"logging_level={logging_level}",'
                        '"record={record}",'
                        '"show_configuration={show_configuration}",'
                        '"required_option_1=value_1",'
                        '"required_option_2=value_2"'
                    '],'
                    '"maxresultrows": 10,'
                    '"command": "countmatches"'
                '}}'
            '}}')

        basedir = self._package_directory

        default_logging_configuration = os.path.join(basedir, 'apps', 'app_with_logging_configuration', 'default', 'logging.conf')
        dispatch_dir = os.path.join(basedir, 'recordings', 'scpv2', 'Splunk-6.3', 'countmatches.dispatch_dir')
        logging_configuration = os.path.join(basedir, 'apps', 'app_with_logging_configuration', 'logging.conf')
        logging_level = 'ERROR'
        record = False
        show_configuration = True

        getinfo_metadata = metadata.format(
            dispatch_dir=encode_string(dispatch_dir),
            logging_configuration=encode_string(logging_configuration)[1:-1],
            logging_level=logging_level,
            record=('true' if record is True else 'false'),
            show_configuration=('true' if show_configuration is True else 'false'))

        execute_metadata = '{"action":"execute","finished":true}'
        execute_body = 'test\r\ndata\r\n'

        ifile = StringIO(
            'chunked 1.0,{},0\n{}'.format(len(getinfo_metadata), getinfo_metadata) +
            'chunked 1.0,{},{}\n{}{}'.format(len(execute_metadata), len(execute_body), execute_metadata, execute_body))

        command = TestCommand()
        result = StringIO()
        argv = ['some-external-search-command.py']

        self.assertEqual(command.logging_level, 'WARNING')
        self.assertIs(command.record, None)
        self.assertIs(command.show_configuration, None)

        try:
            # noinspection PyTypeChecker
            command.process(argv, ifile, ofile=result)
        except SystemExit as error:
            self.fail('Unexpected exception: {}: {}'.format(type(error).__name__, error))

        self.assertEqual(command.logging_configuration, logging_configuration)
        self.assertEqual(command.logging_level, 'ERROR')
        self.assertEqual(command.record, record)
        self.assertEqual(command.show_configuration, show_configuration)
        self.assertEqual(command.required_option_1, 'value_1')
        self.assertEqual(command.required_option_2, 'value_2')

        self.assertEqual(
            'chunked 1.0,68,0\n'
            '{"inspector":{"messages":[["INFO","test command configuration: "]]}}\n'
            'chunked 1.0,17,23\n'
            '{"finished":true}test,__mv_test\r\n'
            'data,\r\n',
            result.getvalue())

        self.assertEqual(command.protocol_version, 2)

        # 2. Provide access to these properties:
        #   fieldnames
        #   input_header
        #   metadata
        #   search_results_info
        #   service

        self.assertEqual([], command.fieldnames)

        command_metadata = command.metadata
        input_header = command.input_header

        self.assertIsNone(input_header['allowStream'])
        self.assertEqual(input_header['infoPath'], os.path.join(command_metadata.searchinfo.dispatch_dir, 'info.csv'))
        self.assertIsNone(input_header['keywords'])
        self.assertEqual(input_header['preview'], command_metadata.preview)
        self.assertIs(input_header['realtime'], False)
        self.assertEqual(input_header['search'], command_metadata.searchinfo.search)
        self.assertEqual(input_header['sid'], command_metadata.searchinfo.sid)
        self.assertEqual(input_header['splunkVersion'], command_metadata.searchinfo.splunk_version)
        self.assertIsNone(input_header['truncated'])

        self.assertEqual(command_metadata.preview, input_header['preview'])
        self.assertEqual(command_metadata.searchinfo.app, 'searchcommands_app')
        self.assertEqual(command_metadata.searchinfo.args, ['logging_configuration=' + logging_configuration, 'logging_level=ERROR', 'record=false', 'show_configuration=true', 'required_option_1=value_1', 'required_option_2=value_2'])
        self.assertEqual(command_metadata.searchinfo.dispatch_dir, os.path.dirname(input_header['infoPath']))
        self.assertEqual(command_metadata.searchinfo.earliest_time, 0.0)
        self.assertEqual(command_metadata.searchinfo.latest_time, 0.0)
        self.assertEqual(command_metadata.searchinfo.owner, 'admin')
        self.assertEqual(command_metadata.searchinfo.raw_args, command_metadata.searchinfo.args)
        self.assertEqual(command_metadata.searchinfo.search, 'A| inputlookup tweets | countmatches fieldname=word_count pattern="\\w+" text record=t | export add_timestamp=f add_offset=t format=csv segmentation=raw')
        self.assertEqual(command_metadata.searchinfo.session_key, '0JbG1fJEvXrL6iYZw9y7tmvd6nHjTKj7ggaE7a4Jv5R0UIbeYJ65kThn^3hiNeoqzMT_LOtLpVR3Y8TIJyr5bkHUElMijYZ8l14wU0L4n^Oa5QxepsZNUIIQCBm^')
        self.assertEqual(command_metadata.searchinfo.sid, '1433261372.158')
        self.assertEqual(command_metadata.searchinfo.splunk_version, '20150522')
        self.assertEqual(command_metadata.searchinfo.splunkd_uri, 'https://127.0.0.1:8089')
        self.assertEqual(command_metadata.searchinfo.username, 'admin')
        self.assertEqual(command_metadata.searchinfo.maxresultrows, 10)
        self.assertEqual(command_metadata.searchinfo.command, 'countmatches')

        command.search_results_info.search_metrics = command.search_results_info.search_metrics.__dict__
        command.search_results_info.optional_fields_json = command.search_results_info.optional_fields_json.__dict__

        self.maxDiff = None

        self.assertDictEqual(command.search_results_info.__dict__, {
            u'is_summary_index': 0,
            u'bs_thread_count': 1,
            u'rt_backfill': 0,
            u'rtspan': '',
            u'search_StartTime': 1433261392.934936,
            u'read_raw': 1,
            u'root_sid': '',
            u'field_rendering': '',
            u'query_finished': 1,
            u'optional_fields_json': {},
            u'group_list': '',
            u'remoteServers': '',
            u'rt_latest': '',
            u'remote_log_download_mode': 'disabled',
            u'reduce_search': '',
            u'request_finalization': 0,
            u'auth_token': 'UQZSgWwE2f9oIKrj1QG^kVhW^T_cR4H5Z65bPtMhwlHytS5jFrFYyH^dGzjTusDjVTgoBNeR7bvIzctHF7DrLJ1ANevgDOWEWRvABNj6d_k0koqxw9Io',
            u'indexed_realtime': 0,
            u'ppc_bs': '$SPLUNK_HOME/etc',
            u'drop_count': 0,
            u'datamodel_map': '',
            u'search_can_be_event_type': 0,
            u'search_StartUp_Spent': 0,
            u'realtime': 0,
            u'splunkd_uri': 'https://127.0.0.1:8089',
            u'columnOrder': '',
            u'kv_store_settings': 'hosts;127.0.0.1:8191\\;;local;127.0.0.1:8191;read_preference;958513E3-8716-4ABF-9559-DA0C9678437F;replica_set_name;958513E3-8716-4ABF-9559-DA0C9678437F;status;ready;',
            u'label': '',
            u'summary_maxtimespan': '',
            u'indexed_realtime_offset': 0,
            u'sid': 1433261392.159,
            u'msg': [],
            u'internal_only': 0,
            u'summary_id': '',
            u'orig_search_head': '',
            u'ppc_app': 'chunked_searchcommands',
            u'countMap': {
                u'invocations.dispatch.writeStatus': u'1',
                u'duration.dispatch.writeStatus': u'2',
                u'duration.startup.handoff': u'79',
                u'duration.startup.configuration': u'34',
                u'invocations.startup.handoff': u'1',
                u'invocations.startup.configuration': u'1'},
            u'is_shc_mode': 0,
            u'shp_id': '958513E3-8716-4ABF-9559-DA0C9678437F',
            u'timestamp': 1433261392.936374, u'is_remote_sorted': 0,
            u'remote_search': '',
            u'splunkd_protocol': 'https',
            u'site': '',
            u'maxevents': 0,
            u'keySet': '',
            u'summary_stopped': 0,
            u'search_metrics': {
                u'ConsideredEvents': 0,
                u'ConsideredBuckets': 0,
                u'TotalSlicesInBuckets': 0,
                u'EliminatedBuckets': 0,
                u'DecompressedSlices': 0},
            u'summary_mode': 'all', u'now': 1433261392.0,
            u'splunkd_port': 8089, u'is_saved_search': 0,
            u'rtoptions': '',
            u'search': '| inputlookup random_data max=50000 | sum total=total value1 record=t | export add_timestamp=f add_offset=t format=csv segmentation=raw',
            u'bundle_version': 0,
            u'generation_id': 0,
            u'bs_thread_id': 0,
            u'is_batch_mode': 0,
            u'scan_count': 0,
            u'rt_earliest': '',
            u'default_group': '*',
            u'tstats_reduce': '',
            u'kv_store_additional_settings': 'hosts_guids;958513E3-8716-4ABF-9559-DA0C9678437F\\;;',
            u'enable_event_stream': 0,
            u'is_remote': 0,
            u'is_scheduled': 0,
            u'sample_ratio': 1,
            u'ppc_user': '******',
            u'sample_seed': 0})

        self.assertIsInstance(command.service, Service)

        self.assertEqual(command.service.authority, command_metadata.searchinfo.splunkd_uri)
        self.assertEqual(command.service.scheme, command.search_results_info.splunkd_protocol)
        self.assertEqual(command.service.port, command.search_results_info.splunkd_port)
        self.assertEqual(command.service.token, command_metadata.searchinfo.session_key)
        self.assertEqual(command.service.namespace.app, command.metadata.searchinfo.app)
        self.assertIsNone(command.service.namespace.owner)
        self.assertIsNone(command.service.namespace.sharing)

        self.assertEqual(command.protocol_version, 2)

        # 3. Produce an error message, log a debug message, and exit when invalid standard option values are encountered

        # Note on loggers
        # Loggers are global and can't be removed once they're created. We create loggers that are keyed by class name
        # Each instance of a class thus created gets access to the same logger. We created one in the prior test and
        # set it's level to ERROR. That level is retained in this test.

        logging_configuration = 'non-existent-logging.conf'
        logging_level = 'NON-EXISTENT-LOGGING-LEVEL'
        record = 'Non-boolean value'
        show_configuration = 'Non-boolean value'

        getinfo_metadata = metadata.format(
            dispatch_dir=encode_string(dispatch_dir),
            logging_configuration=encode_string(logging_configuration)[1:-1],
            logging_level=logging_level,
            record=record,
            show_configuration=show_configuration)

        execute_metadata = '{"action":"execute","finished":true}'
        execute_body = 'test\r\ndata\r\n'

        ifile = StringIO(
            'chunked 1.0,{},0\n{}'.format(len(getinfo_metadata), getinfo_metadata) +
            'chunked 1.0,{},{}\n{}{}'.format(len(execute_metadata), len(execute_body), execute_metadata, execute_body))

        command = TestCommand()
        result = StringIO()
        argv = ['test.py']

        # noinspection PyTypeChecker
        self.assertRaises(SystemExit, command.process, argv, ifile, ofile=result)
        self.assertEqual(command.logging_level, 'ERROR')
        self.assertEqual(command.record, False)
        self.assertEqual(command.show_configuration, False)
        self.assertEqual(command.required_option_1, 'value_1')
        self.assertEqual(command.required_option_2, 'value_2')

        self.assertEqual(
            'chunked 1.0,287,0\n'
            '{"inspector":{"messages":[["ERROR","Illegal value: logging_configuration=non-existent-logging.conf"],'
            '["ERROR","Illegal value: logging_level=NON-EXISTENT-LOGGING-LEVEL"],'
            '["ERROR","Illegal value: record=Non-boolean value"],'
            '["ERROR","Illegal value: show_configuration=Non-boolean value"]]}}\n'
            'chunked 1.0,17,0\n'
            '{"finished":true}',
            result.getvalue())

        self.assertEqual(command.protocol_version, 2)

        # 4. Produce an error message, log an error message that includes a traceback, and exit when an exception is
        #    raised during command execution.

        logging_configuration = os.path.join(basedir, 'apps', 'app_with_logging_configuration', 'logging.conf')
        logging_level = 'WARNING'
        record = False
        show_configuration = False

        getinfo_metadata = metadata.format(
            dispatch_dir=encode_string(dispatch_dir),
            logging_configuration=encode_string(logging_configuration)[1:-1],
            logging_level=logging_level,
            record=('true' if record is True else 'false'),
            show_configuration=('true' if show_configuration is True else 'false'))

        execute_metadata = '{"action":"execute","finished":true}'
        execute_body = 'action\r\nraise_exception\r\n'

        ifile = StringIO(
            'chunked 1.0,{},0\n{}'.format(len(getinfo_metadata), getinfo_metadata) +
            'chunked 1.0,{},{}\n{}{}'.format(len(execute_metadata), len(execute_body), execute_metadata, execute_body))

        command = TestCommand()
        result = StringIO()
        argv = ['test.py']

        try:
            command.process(argv, ifile, ofile=result)
        except SystemExit as error:
            self.assertNotEqual(0, error.code)
        except BaseException as error:
            self.fail('{0}: {1}: {2}\n'.format(type(error).__name__, error, result.getvalue()))
        else:
            self.fail('Expected SystemExit, not a return from TestCommand.process: {}\n'.format(result.getvalue()))

        self.assertEqual(command.logging_configuration, logging_configuration)
        self.assertEqual(command.logging_level, logging_level)
        self.assertEqual(command.record, record)
        self.assertEqual(command.show_configuration, show_configuration)
        self.assertEqual(command.required_option_1, 'value_1')
        self.assertEqual(command.required_option_2, 'value_2')

        finished = r'"finished":true'

        inspector = \
            r'"inspector":\{"messages":\[\["ERROR","StandardError at \\".+\\", line \d+ : test ' \
            r'logging_configuration=\\".+\\" logging_level=\\"WARNING\\" record=\\"f\\" ' \
            r'required_option_1=\\"value_1\\" required_option_2=\\"value_2\\" show_configuration=\\"f\\""\]\]\}'

        self.assertRegexpMatches(
            result.getvalue(),
            r'^chunked 1.0,2,0\n'
            r'\{\}\n'
            r'chunked 1.0,\d+,0\n'
            r'\{(' + inspector + r',' + finished + r'|' + finished + r',' + inspector + r')\}')

        self.assertEqual(command.protocol_version, 2)
        return
コード例 #26
0
class TestRecorder(object):

    def __init__(self, test_case):

        self._test_case = test_case
        self._output = None
        self._recording = None
        self._recording_part = None

        def _not_implemented(self):
            raise NotImplementedError('class {} is not in playback or record mode'.format(self.__class__.__name__))

        self.get = self.next_part = self.stop = MethodType(_not_implemented, self, self.__class__)
        return

    @property
    def output(self):
        return self._output

    def playback(self, path):

        with open(path, 'rb') as f:
            test_data = pickle.load(f)

        self._output = StringIO()
        self._recording = test_data['inputs']
        self._recording_part = self._recording.popleft()

        def get(self, method, *args, **kwargs):
            return self._recording_part[method.__name__].popleft()

        self.get = MethodType(get, self, self.__class__)

        def next_part(self):
            self._recording_part = self._recording.popleft()

        self.next_part = MethodType(next_part, self, self.__class__)

        def stop(self):
            self._test_case.assertEqual(test_data['results'], self._output.getvalue())

        self.stop = MethodType(stop, self, self.__class__)
        return

    def record(self, path):

        self._output = StringIO()
        self._recording = deque()
        self._recording_part = OrderedDict()
        self._recording.append(self._recording_part)

        def get(self, method, *args, **kwargs):
            result = method(*args, **kwargs)
            part = self._recording_part
            key = method.__name__
            try:
                results = part[key]
            except KeyError:
                part[key] = results = deque()
            results.append(result)
            return result

        self.get = MethodType(get, self, self.__class__)

        def next_part(self):
            part = OrderedDict()
            self._recording_part = part
            self._recording.append(part)

        self.next_part = MethodType(next_part, self, self.__class__)

        def stop(self):
            with io.open(path, 'wb') as f:
                test = OrderedDict((('inputs', self._recording), ('results', self._output.getvalue())))
                pickle.dump(test, f)

        self.stop = MethodType(stop, self, self.__class__)
        return
コード例 #27
0
class RecordWriter(object):
    def __init__(self, ofile, maxresultrows=None):
        self._maxresultrows = 50000 if maxresultrows is None else maxresultrows

        self._ofile = set_binary_mode(ofile)
        self._fieldnames = None
        self._buffer = StringIO()

        self._writer = csv.writer(self._buffer, dialect=CsvDialect)
        self._writerow = self._writer.writerow
        self._finished = False
        self._flushed = False

        self._inspector = OrderedDict()
        self._chunk_count = 0
        self._pending_record_count = 0
        self._committed_record_count = 0

    @property
    def is_flushed(self):
        return self._flushed

    @is_flushed.setter
    def is_flushed(self, value):
        self._flushed = True if value else False

    @property
    def ofile(self):
        return self._ofile

    @ofile.setter
    def ofile(self, value):
        self._ofile = set_binary_mode(value)

    @property
    def pending_record_count(self):
        return self._pending_record_count

    @property
    def _record_count(self):
        warnings.warn(
            "_record_count will be deprecated soon. Use pending_record_count instead.",
            PendingDeprecationWarning)
        return self.pending_record_count

    @property
    def committed_record_count(self):
        return self._committed_record_count

    @property
    def _total_record_count(self):
        warnings.warn(
            "_total_record_count will be deprecated soon. Use committed_record_count instead.",
            PendingDeprecationWarning)
        return self.committed_record_count

    def write(self, data):
        bytes_type = bytes if sys.version_info >= (3, 0) else str
        if not isinstance(data, bytes_type):
            data = data.encode('utf-8')
        self.ofile.write(data)

    def flush(self, finished=None, partial=None):
        assert finished is None or isinstance(finished, bool)
        assert partial is None or isinstance(partial, bool)
        assert not (finished is None and partial is None)
        assert finished is None or partial is None
        self._ensure_validity()

    def write_message(self, message_type, message_text, *args, **kwargs):
        self._ensure_validity()
        self._inspector.setdefault('messages', []).append(
            (message_type, message_text.format(*args, **kwargs)))

    def write_record(self, record):
        self._ensure_validity()
        self._write_record(record)

    def write_records(self, records):
        self._ensure_validity()
        write_record = self._write_record
        for record in records:
            write_record(record)

    def _clear(self):
        self._buffer.seek(0)
        self._buffer.truncate()
        self._inspector.clear()
        self._pending_record_count = 0

    def _ensure_validity(self):
        if self._finished is True:
            assert self._record_count == 0 and len(self._inspector) == 0
            raise RuntimeError('I/O operation on closed record writer')

    def _write_record(self, record):

        fieldnames = self._fieldnames

        if fieldnames is None:
            self._fieldnames = fieldnames = list(record.keys())
            value_list = imap(lambda fn: (str(fn), str('__mv_') + str(fn)),
                              fieldnames)
            self._writerow(list(chain.from_iterable(value_list)))

        get_value = record.get
        values = []

        for fieldname in fieldnames:
            value = get_value(fieldname, None)

            if value is None:
                values += (None, None)
                continue

            value_t = type(value)

            if issubclass(value_t, (list, tuple)):

                if len(value) == 0:
                    values += (None, None)
                    continue

                if len(value) > 1:
                    value_list = value
                    sv = ''
                    mv = '$'

                    for value in value_list:

                        if value is None:
                            sv += '\n'
                            mv += '$;$'
                            continue

                        value_t = type(value)

                        if value_t is not bytes:

                            if value_t is bool:
                                value = str(value.real)
                            elif value_t is six.text_type:
                                value = value
                            elif isinstance(
                                    value, six.integer_types
                            ) or value_t is float or value_t is complex:
                                value = str(value)
                            elif issubclass(value_t, (dict, list, tuple)):
                                value = str(''.join(
                                    RecordWriter._iterencode_json(value, 0)))
                            else:
                                value = repr(value).encode(
                                    'utf-8', errors='backslashreplace')

                        sv += value + '\n'
                        mv += value.replace('$', '$$') + '$;$'

                    values += (sv[:-1], mv[:-2])
                    continue

                value = value[0]
                value_t = type(value)

            if value_t is bool:
                values += (str(value.real), None)
                continue

            if value_t is bytes:
                values += (value, None)
                continue

            if value_t is six.text_type:
                if six.PY2:
                    value = value.encode('utf-8')
                values += (value, None)
                continue

            if isinstance(value, six.integer_types
                          ) or value_t is float or value_t is complex:
                values += (str(value), None)
                continue

            if issubclass(value_t, dict):
                values += (str(''.join(RecordWriter._iterencode_json(value,
                                                                     0))),
                           None)
                continue

            values += (repr(value), None)

        self._writerow(values)
        self._pending_record_count += 1

        if self.pending_record_count >= self._maxresultrows:
            self.flush(partial=True)

    try:
        # noinspection PyUnresolvedReferences
        from _json import make_encoder
    except ImportError:
        # We may be running under PyPy 2.5 which does not include the _json module
        _iterencode_json = JSONEncoder(separators=(',', ':')).iterencode
    else:
        # Creating _iterencode_json this way yields a two-fold performance improvement on Python 2.7.9 and 2.7.10
        from json.encoder import encode_basestring_ascii

        @staticmethod
        def _default(o):
            raise TypeError(repr(o) + ' is not JSON serializable')

        _iterencode_json = make_encoder(
            {},  # markers (for detecting circular references)
            _default,  # object_encoder
            encode_basestring_ascii,  # string_encoder
            None,  # indent
            ':',
            ',',  # separators
            False,  # sort_keys
            False,  # skip_keys
            True  # allow_nan
        )

        del make_encoder
コード例 #28
0
    def test_input_header(self):

        # No items

        input_header = InputHeader()

        with closing(StringIO('\r\n'.encode())) as input_file:
            input_header.read(input_file)

        self.assertEquals(len(input_header), 0)

        # One unnamed single-line item (same as no items)

        input_header = InputHeader()

        with closing(
                StringIO('this%20is%20an%20unnamed%20single-line%20item\n\n'.
                         encode())) as input_file:
            input_header.read(input_file)

        self.assertEquals(len(input_header), 0)

        input_header = InputHeader()

        with closing(
                StringIO('this%20is%20an%20unnamed\nmulti-\nline%20item\n\n'.
                         encode())) as input_file:
            input_header.read(input_file)

        self.assertEquals(len(input_header), 0)

        # One named single-line item

        input_header = InputHeader()

        with closing(
                StringIO('Foo:this%20is%20a%20single-line%20item\n\n'.encode())
        ) as input_file:
            input_header.read(input_file)

        self.assertEquals(len(input_header), 1)
        self.assertEquals(input_header['Foo'], 'this is a single-line item')

        input_header = InputHeader()

        with closing(StringIO('Bar:this is a\nmulti-\nline item\n\n'.encode())
                     ) as input_file:
            input_header.read(input_file)

        self.assertEquals(len(input_header), 1)
        self.assertEquals(input_header['Bar'], 'this is a\nmulti-\nline item')

        # The infoPath item (which is the path to a file that we open for reads)

        input_header = InputHeader()

        with closing(StringIO(
                'infoPath:non-existent.csv\n\n'.encode())) as input_file:
            input_header.read(input_file)

        self.assertEquals(len(input_header), 1)
        self.assertEqual(input_header['infoPath'], 'non-existent.csv')

        # Set of named items

        collection = {
            'word_list': 'hello\nworld\n!',
            'word_1': 'hello',
            'word_2': 'world',
            'word_3': '!',
            'sentence': 'hello world!'
        }

        input_header = InputHeader()
        text = reduce(
            lambda value, item: value + '{}:{}\n'.format(item[0], item[1]),
            six.iteritems(collection), '') + '\n'

        with closing(StringIO(text.encode())) as input_file:
            input_header.read(input_file)

        self.assertDictEqual(input_header, collection)

        # Set of named items with an unnamed item at the beginning (the only place that an unnamed item can appear)

        with closing(StringIO(
            ('unnamed item\n' + text).encode())) as input_file:
            input_header.read(input_file)

        self.assertDictEqual(input_header, collection)

        # Test iterators, indirectly through items, keys, and values

        self.assertEqual(sorted(input_header.items()),
                         sorted(collection.items()))
        self.assertEqual(sorted(input_header.keys()),
                         sorted(collection.keys()))
        self.assertEqual(sorted(input_header.values()),
                         sorted(collection.values()))

        return
コード例 #29
0
ファイル: validators.py プロジェクト: hRun/TA-cryptosuite
 def format(self, value):
     output = StringIO()
     writer = csv.writer(output, List.Dialect)
     writer.writerow(value)
     value = output.getvalue()
     return value[:-1]
コード例 #30
0
    def test_logging_configuration(self):

        # Test that logging is properly initialized when there is no logging configuration file

        rebase_environment('app_without_logging_configuration')

        self.assertIsNone(environment.logging_configuration)
        self.assertTrue(
            any(
                isinstance(h, logging.StreamHandler)
                for h in logging.root.handlers))
        self.assertTrue('splunklib' in logging.Logger.manager.loggerDict)
        self.assertEqual(environment.splunklib_logger,
                         logging.Logger.manager.loggerDict['splunklib'])
        self.assertIsInstance(environment.splunklib_logger, logging.Logger)

        command = StubbedSearchCommand()

        self.assertIs(command.logger,
                      logging.getLogger('StubbedSearchCommand'))
        self.assertEqual(len(command.logger.handlers), 0)
        self.assertIsNone(command.logging_configuration)
        self.assertIs(command.logger.root, logging.root)

        root_handler = next(h for h in logging.root.handlers
                            if isinstance(h, logging.StreamHandler))

        self.assertIsInstance(root_handler, logging.StreamHandler)
        self.assertEqual(root_handler.stream, sys.stderr)

        self.assertEqual(command.logging_level,
                         logging.getLevelName(logging.root.level))
        root_handler.stream = StringIO()
        message = 'Test that output is directed to stderr without formatting'
        command.logger.warning(message)
        self.assertEqual(root_handler.stream.getvalue(), message + '\n')

        # A search command loads {local,default}/logging.conf when it is available

        rebase_environment('app_with_logging_configuration')

        command = StubbedSearchCommand()
        self.assertEqual(
            command.logging_configuration,
            os.path.join(environment.app_root, 'default', 'logging.conf'))
        self.assertIs(command.logger,
                      logging.getLogger('StubbedSearchCommand'))

        # Setting logging_configuration loads a new logging configuration file relative to the app root

        command.logging_configuration = 'alternative-logging.conf'
        self.assertEqual(
            command.logging_configuration,
            os.path.join(environment.app_root, 'default',
                         'alternative-logging.conf'))
        self.assertIs(command.logger,
                      logging.getLogger('StubbedSearchCommand'))

        # Setting logging_configuration loads a new logging configuration file on an absolute path

        app_root_logging_configuration = os.path.join(environment.app_root,
                                                      'logging.conf')
        command.logging_configuration = app_root_logging_configuration

        self.assertEqual(command.logging_configuration,
                         app_root_logging_configuration)
        self.assertIs(command.logger,
                      logging.getLogger('StubbedSearchCommand'))

        # logging_configuration raises a value error, if a non-existent logging configuration file is provided

        try:
            command.logging_configuration = 'foo'
        except ValueError:
            pass
        except BaseException as e:
            self.fail('Expected ValueError, but {} was raised'.format(type(e)))
        else:
            self.fail(
                'Expected ValueError, but logging_configuration={}'.format(
                    command.logging_configuration))

        try:
            command.logging_configuration = os.path.join(
                package_directory, 'non-existent.logging.conf')
        except ValueError:
            pass
        except BaseException as e:
            self.fail('Expected ValueError, but {} was raised'.format(type(e)))
        else:
            self.fail(
                'Expected ValueError, but logging_configuration={}'.format(
                    command.logging_configuration))
コード例 #31
0
    def test_process_scpv1(self):

        # TestCommand.process should complain if supports_getinfo == False
        # We support dynamic configuration, not static

        # The exception line number may change, so we're using a regex match instead of a string match

        expected = re.compile(
            r'error_message=RuntimeError at ".+search_command\.py", line \d\d\d : Command test appears to be '
            r'statically configured for search command protocol version 1 and static configuration is unsupported by '
            r'splunklib.searchcommands. Please ensure that default/commands.conf contains this stanza:\n'
            r'\[test\]\n'
            r'filename = test.py\n'
            r'enableheader = true\n'
            r'outputheader = true\n'
            r'requires_srinfo = true\n'
            r'supports_getinfo = true\n'
            r'supports_multivalues = true\n'
            r'supports_rawargs = true')

        argv = ['test.py', 'not__GETINFO__or__EXECUTE__', 'option=value', 'fieldname']
        command = TestCommand()
        result = StringIO()

        self.assertRaises(SystemExit, command.process, argv, ofile=result)
        self.assertRegexpMatches(result.getvalue(), expected)

        # TestCommand.process should return configuration settings on Getinfo probe

        argv = ['test.py', '__GETINFO__', 'required_option_1=value', 'required_option_2=value']
        command = TestCommand()
        ifile = StringIO('\n')
        result = StringIO()

        self.assertEqual(str(command.configuration), '')

        if six.PY2:
            expected = ("[(u'clear_required_fields', None, [1]), (u'distributed', None, [2]), (u'generates_timeorder', None, [1]), "
            "(u'generating', None, [1, 2]), (u'maxinputs', None, [2]), (u'overrides_timeorder', None, [1]), "
            "(u'required_fields', None, [1, 2]), (u'requires_preop', None, [1]), (u'retainsevents', None, [1]), "
            "(u'run_in_preview', None, [2]), (u'streaming', None, [1]), (u'streaming_preop', None, [1, 2]), "
            "(u'type', None, [2])]")
        else:
            expected = ("[('clear_required_fields', None, [1]), ('distributed', None, [2]), ('generates_timeorder', None, [1]), "
            "('generating', None, [1, 2]), ('maxinputs', None, [2]), ('overrides_timeorder', None, [1]), "
            "('required_fields', None, [1, 2]), ('requires_preop', None, [1]), ('retainsevents', None, [1]), "
            "('run_in_preview', None, [2]), ('streaming', None, [1]), ('streaming_preop', None, [1, 2]), "
            "('type', None, [2])]")

        self.assertEqual(
            repr(command.configuration), expected)

        try:
            # noinspection PyTypeChecker
            command.process(argv, ifile, ofile=result)
        except BaseException as error:
            self.fail('{0}: {1}: {2}\n'.format(type(error).__name__, error, result.getvalue()))

        self.assertEqual('\r\n\r\n\r\n', result.getvalue())  # No message header and no configuration settings

        ifile = StringIO('\n')
        result = StringIO()

        # We might also put this sort of code into our SearchCommand.prepare override ...

        configuration = command.configuration

        # SCP v1/v2 configuration settings
        configuration.generating = True
        configuration.required_fields = ['foo', 'bar']
        configuration.streaming_preop = 'some streaming command'

        # SCP v1 configuration settings
        configuration.clear_required_fields = True
        configuration.generates_timeorder = True
        configuration.overrides_timeorder = True
        configuration.requires_preop = True
        configuration.retainsevents = True
        configuration.streaming = True

        # SCP v2 configuration settings (SCP v1 requires that maxinputs and run_in_preview are set in commands.conf)
        configuration.distributed = True
        configuration.maxinputs = 50000
        configuration.run_in_preview = True
        configuration.type = 'streaming'

        if six.PY2:
            expected = ('clear_required_fields="True", generates_timeorder="True", generating="True", overrides_timeorder="True", '
                'required_fields="[u\'foo\', u\'bar\']", requires_preop="True", retainsevents="True", streaming="True", '
                'streaming_preop="some streaming command"')
        else:
            expected = ('clear_required_fields="True", generates_timeorder="True", generating="True", overrides_timeorder="True", '
                        'required_fields="[\'foo\', \'bar\']", requires_preop="True", retainsevents="True", streaming="True", '
                        'streaming_preop="some streaming command"')
        self.assertEqual(str(command.configuration), expected)

        if six.PY2:
            expected = ("[(u'clear_required_fields', True, [1]), (u'distributed', True, [2]), (u'generates_timeorder', True, [1]), "
            "(u'generating', True, [1, 2]), (u'maxinputs', 50000, [2]), (u'overrides_timeorder', True, [1]), "
            "(u'required_fields', [u'foo', u'bar'], [1, 2]), (u'requires_preop', True, [1]), "
            "(u'retainsevents', True, [1]), (u'run_in_preview', True, [2]), (u'streaming', True, [1]), "
            "(u'streaming_preop', u'some streaming command', [1, 2]), (u'type', u'streaming', [2])]")
        else:
            expected = ("[('clear_required_fields', True, [1]), ('distributed', True, [2]), ('generates_timeorder', True, [1]), "
            "('generating', True, [1, 2]), ('maxinputs', 50000, [2]), ('overrides_timeorder', True, [1]), "
            "('required_fields', ['foo', 'bar'], [1, 2]), ('requires_preop', True, [1]), "
            "('retainsevents', True, [1]), ('run_in_preview', True, [2]), ('streaming', True, [1]), "
            "('streaming_preop', 'some streaming command', [1, 2]), ('type', 'streaming', [2])]")

        self.assertEqual(
            repr(command.configuration), expected)

        try:
            # noinspection PyTypeChecker
            command.process(argv, ifile, ofile=result)
        except BaseException as error:
            self.fail('{0}: {1}: {2}\n'.format(type(error).__name__, error, result.getvalue()))

        result.seek(0)
        reader = csv.reader(result)
        self.assertEqual([], next(reader))
        observed = dict(izip(next(reader), next(reader)))
        self.assertRaises(StopIteration, lambda: next(reader))

        expected = {
            'clear_required_fields': '1',                '__mv_clear_required_fields': '',
            'generating': '1',                           '__mv_generating': '',
            'generates_timeorder': '1',                  '__mv_generates_timeorder': '',
            'overrides_timeorder': '1',                  '__mv_overrides_timeorder': '',
            'requires_preop': '1',                       '__mv_requires_preop': '',
            'required_fields': 'foo,bar',                '__mv_required_fields': '',
            'retainsevents': '1',                        '__mv_retainsevents': '',
            'streaming': '1',                            '__mv_streaming': '',
            'streaming_preop': 'some streaming command', '__mv_streaming_preop': '',
        }

        self.assertDictEqual(expected, observed)  # No message header and no configuration settings

        for action in '__GETINFO__', '__EXECUTE__':

            # TestCommand.process should produce an error record on parser errors

            argv = [
                'test.py', action, 'required_option_1=value', 'required_option_2=value', 'undefined_option=value',
                'fieldname_1', 'fieldname_2']

            command = TestCommand()
            ifile = StringIO('\n')
            result = StringIO()

            self.assertRaises(SystemExit, command.process, argv, ifile, ofile=result)
            self.assertTrue(
                'error_message=Unrecognized test command option: undefined_option="value"\r\n\r\n',
                result.getvalue())

            # TestCommand.process should produce an error record when required options are missing

            argv = ['test.py', action, 'required_option_2=value', 'fieldname_1']
            command = TestCommand()
            ifile = StringIO('\n')
            result = StringIO()

            self.assertRaises(SystemExit, command.process, argv, ifile, ofile=result)

            self.assertTrue(
                'error_message=A value for test command option required_option_1 is required\r\n\r\n',
                result.getvalue())

            argv = ['test.py', action, 'fieldname_1']
            command = TestCommand()
            ifile = StringIO('\n')
            result = StringIO()

            self.assertRaises(SystemExit, command.process, argv, ifile, ofile=result)

            self.assertTrue(
                'error_message=Values for these test command options are required: required_option_1, required_option_2'
                '\r\n\r\n',
                result.getvalue())

        # TestStreamingCommand.process should exit on processing exceptions

        ifile = StringIO('\naction\r\nraise_error\r\n')
        argv = ['test.py', '__EXECUTE__']
        command = TestStreamingCommand()
        result = StringIO()

        try:
            # noinspection PyTypeChecker
            command.process(argv, ifile, ofile=result)
        except SystemExit as error:
            self.assertNotEqual(error.code, 0)
            self.assertRegexpMatches(
                result.getvalue(),
                r'^error_message=RuntimeError at ".+", line \d+ : Testing\r\n\r\n$')
        except BaseException as error:
            self.fail('Expected SystemExit, but caught {}: {}'.format(type(error).__name__, error))
        else:
            self.fail('Expected SystemExit, but no exception was raised')

        # Command.process should provide access to search results info
        info_path = os.path.join(
            self._package_directory, 'recordings', 'scpv1', 'Splunk-6.3', 'countmatches.execute.dispatch_dir',
            'externSearchResultsInfo.csv')

        ifile = StringIO('infoPath:' + info_path + '\n\naction\r\nget_search_results_info\r\n')
        argv = ['test.py', '__EXECUTE__']
        command = TestStreamingCommand()
        result = StringIO()

        try:
            # noinspection PyTypeChecker
            command.process(argv, ifile, ofile=result)
        except BaseException as error:
            self.fail('Expected no exception, but caught {}: {}'.format(type(error).__name__, error))
        else:
            self.assertRegexpMatches(
                result.getvalue(),
                r'^\r\n'
                r'('
                r'data,__mv_data,_serial,__mv__serial\r\n'
                r'"\{.*u\'is_summary_index\': 0, .+\}",,0,'
                r'|'
                r'_serial,__mv__serial,data,__mv_data\r\n'
                r'0,,"\{.*u\'is_summary_index\': 0, .+\}",'
                r')'
                r'\r\n$'
            )

        # TestStreamingCommand.process should provide access to a service object when search results info is available

        self.assertIsInstance(command.service, Service)

        self.assertEqual(command.service.authority,
                         command.search_results_info.splunkd_uri)

        self.assertEqual(command.service.scheme,
                         command.search_results_info.splunkd_protocol)

        self.assertEqual(command.service.port,
                         command.search_results_info.splunkd_port)

        self.assertEqual(command.service.token,
                         command.search_results_info.auth_token)

        self.assertEqual(command.service.namespace.app,
                         command.search_results_info.ppc_app)

        self.assertEqual(command.service.namespace.owner,
                         None)
        self.assertEqual(command.service.namespace.sharing,
                         None)

        # Command.process should not provide access to search results info or a service object when the 'infoPath'
        # input header is unavailable

        ifile = StringIO('\naction\r\nget_search_results_info')
        argv = ['teststreaming.py', '__EXECUTE__']
        command = TestStreamingCommand()

        # noinspection PyTypeChecker
        command.process(argv, ifile, ofile=result)

        self.assertIsNone(command.search_results_info)
        self.assertIsNone(command.service)

        return
コード例 #32
0
    def test_recorder(self):

        if (python_version[0] == 2 and python_version[1] < 7):
            print("Skipping test since we're on {1}".format(
                "".join(python_version)))
            pass

        # Grab an input/output recording, the results of a prior countmatches run

        recording = os.path.join(self._package_path, 'recordings', 'scpv2',
                                 'Splunk-6.3', 'countmatches.')

        with gzip.open(recording + 'input.gz', 'rb') as file_1:
            with io.open(recording + 'output', 'rb') as file_2:
                ifile = StringIO(file_1.read())
                result = StringIO(file_2.read())

        # Set up the input/output recorders that are under test

        ifile = Recorder(mktemp(), ifile)

        try:
            ofile = Recorder(mktemp(), StringIO())

            try:
                # Read and then write a line
                ifile.readline()
                ofile.write(result.readline())

                # Read and then write a block
                ifile.read()
                ofile.write(result.read())

                # Verify that what we wrote is equivalent to the original recording, the result from a prior
                # countmatches run
                self.assertEqual(ofile.getvalue(), result.getvalue())

                # Verify that we faithfully recorded the input and output files
                ifile._recording.close()
                ofile._recording.close()

                with gzip.open(ifile._recording.name, 'rb') as file_1:
                    with gzip.open(ofile._recording.name, 'rb') as file_2:
                        self.assertEqual(file_1.read(), ifile._file.getvalue())
                        self.assertEqual(file_2.read(), ofile._file.getvalue())

            finally:
                ofile._recording.close()
                os.remove(ofile._recording.name)

        finally:
            ifile._recording.close()
            os.remove(ifile._recording.name)

        return
コード例 #33
0
ファイル: internals.py プロジェクト: bawood/TA-DUOSecurity2FA
class RecordWriter(object):

    def __init__(self, ofile, maxresultrows=None):
        self._maxresultrows = 50000 if maxresultrows is None else maxresultrows

        self._ofile = ofile
        self._fieldnames = None
        self._buffer = StringIO()

        self._writer = csv.writer(self._buffer, dialect=CsvDialect)
        self._writerow = self._writer.writerow
        self._finished = False
        self._flushed = False

        self._inspector = OrderedDict()
        self._chunk_count = 0
        self._record_count = 0
        self._total_record_count = 0

    @property
    def is_flushed(self):
        return self._flushed

    @is_flushed.setter
    def is_flushed(self, value):
        self._flushed = True if value else False

    @property
    def ofile(self):
        return self._ofile

    @ofile.setter
    def ofile(self, value):
        self._ofile = value

    def flush(self, finished=None, partial=None):
        assert finished is None or isinstance(finished, bool)
        assert partial is None or isinstance(partial, bool)
        assert not (finished is None and partial is None)
        assert finished is None or partial is None
        self._ensure_validity()

    def write_message(self, message_type, message_text, *args, **kwargs):
        self._ensure_validity()
        self._inspector.setdefault('messages', []).append((message_type, message_text.format(*args, **kwargs)))

    def write_record(self, record):
        self._ensure_validity()
        self._write_record(record)

    def write_records(self, records):
        self._ensure_validity()
        write_record = self._write_record
        for record in records:
            write_record(record)

    def _clear(self):
        self._buffer.seek(0)
        self._buffer.truncate()
        self._inspector.clear()
        self._record_count = 0
        self._flushed = False

    def _ensure_validity(self):
        if self._finished is True:
            assert self._record_count == 0 and len(self._inspector) == 0
            raise RuntimeError('I/O operation on closed record writer')

    def _write_record(self, record):

        fieldnames = self._fieldnames

        if fieldnames is None:
            self._fieldnames = fieldnames = list(record.keys())
            value_list = imap(lambda fn: (str(fn), str('__mv_') + str(fn)), fieldnames)
            self._writerow(list(chain.from_iterable(value_list)))

        get_value = record.get
        values = []

        for fieldname in fieldnames:
            value = get_value(fieldname, None)

            if value is None:
                values += (None, None)
                continue

            value_t = type(value)

            if issubclass(value_t, (list, tuple)):

                if len(value) == 0:
                    values += (None, None)
                    continue

                if len(value) > 1:
                    value_list = value
                    sv = ''
                    mv = '$'

                    for value in value_list:

                        if value is None:
                            sv += '\n'
                            mv += '$;$'
                            continue

                        value_t = type(value)

                        if value_t is not bytes:

                            if value_t is bool:
                                value = str(value.real)
                            elif value_t is six.text_type:
                                value = value
                            elif value_t is int or value_t is int or value_t is float or value_t is complex:
                                value = str(value)
                            elif issubclass(value_t, (dict, list, tuple)):
                                value = str(''.join(RecordWriter._iterencode_json(value, 0)))
                            else:
                                value = repr(value).encode('utf-8', errors='backslashreplace')

                        sv += value + '\n'
                        mv += value.replace('$', '$$') + '$;$'

                    values += (sv[:-1], mv[:-2])
                    continue

                value = value[0]
                value_t = type(value)

            if value_t is bool:
                values += (str(value.real), None)
                continue

            if value_t is bytes:
                values += (value, None)
                continue

            if value_t is six.text_type:
                if six.PY2:
                    value = value.encode('utf-8')
                values += (value, None)
                continue

            if value_t is int or value_t is int or value_t is float or value_t is complex:
                values += (str(value), None)
                continue

            if issubclass(value_t, dict):
                values += (str(''.join(RecordWriter._iterencode_json(value, 0))), None)
                continue

            values += (repr(value), None)

        self._writerow(values)
        self._record_count += 1

        if self._record_count >= self._maxresultrows:
            self.flush(partial=True)

    try:
        # noinspection PyUnresolvedReferences
        from _json import make_encoder
    except ImportError:
        # We may be running under PyPy 2.5 which does not include the _json module
        _iterencode_json = JSONEncoder(separators=(',', ':')).iterencode
    else:
        # Creating _iterencode_json this way yields a two-fold performance improvement on Python 2.7.9 and 2.7.10
        from json.encoder import encode_basestring_ascii

        @staticmethod
        def _default(o):
            raise TypeError(repr(o) + ' is not JSON serializable')

        _iterencode_json = make_encoder(
            {},                       # markers (for detecting circular references)
            _default,                 # object_encoder
            encode_basestring_ascii,  # string_encoder
            None,                     # indent
            ':', ',',                 # separators
            False,                    # sort_keys
            False,                    # skip_keys
            True                      # allow_nan
        )

        del make_encoder
コード例 #34
0
    def test_process_scpv2(self):

        # SearchCommand.process should

        # 1. Recognize all standard options:

        metadata = (
            '{{'
            '"action": "getinfo", "preview": false, "searchinfo": {{'
            '"latest_time": "0",'
            '"splunk_version": "20150522",'
            '"username": "******",'
            '"app": "searchcommands_app",'
            '"args": ['
            '"logging_configuration={logging_configuration}",'
            '"logging_level={logging_level}",'
            '"record={record}",'
            '"show_configuration={show_configuration}",'
            '"required_option_1=value_1",'
            '"required_option_2=value_2"'
            '],'
            '"search": "A%7C%20inputlookup%20tweets%20%7C%20countmatches%20fieldname%3Dword_count%20pattern%3D%22%5Cw%2B%22%20text%20record%3Dt%20%7C%20export%20add_timestamp%3Df%20add_offset%3Dt%20format%3Dcsv%20segmentation%3Draw",'
            '"earliest_time": "0",'
            '"session_key": "0JbG1fJEvXrL6iYZw9y7tmvd6nHjTKj7ggaE7a4Jv5R0UIbeYJ65kThn^3hiNeoqzMT_LOtLpVR3Y8TIJyr5bkHUElMijYZ8l14wU0L4n^Oa5QxepsZNUIIQCBm^",'
            '"owner": "admin",'
            '"sid": "1433261372.158",'
            '"splunkd_uri": "https://127.0.0.1:8089",'
            '"dispatch_dir": {dispatch_dir},'
            '"raw_args": ['
            '"logging_configuration={logging_configuration}",'
            '"logging_level={logging_level}",'
            '"record={record}",'
            '"show_configuration={show_configuration}",'
            '"required_option_1=value_1",'
            '"required_option_2=value_2"'
            '],'
            '"maxresultrows": 10,'
            '"command": "countmatches"'
            '}}'
            '}}')

        basedir = self._package_directory

        default_logging_configuration = os.path.join(
            basedir, 'apps', 'app_with_logging_configuration', 'default',
            'logging.conf')
        dispatch_dir = os.path.join(basedir, 'recordings', 'scpv2',
                                    'Splunk-6.3', 'countmatches.dispatch_dir')
        logging_configuration = os.path.join(basedir, 'apps',
                                             'app_with_logging_configuration',
                                             'logging.conf')
        logging_level = 'ERROR'
        record = False
        show_configuration = True

        getinfo_metadata = metadata.format(
            dispatch_dir=encode_string(dispatch_dir),
            logging_configuration=encode_string(logging_configuration)[1:-1],
            logging_level=logging_level,
            record=('true' if record is True else 'false'),
            show_configuration=('true'
                                if show_configuration is True else 'false'))

        execute_metadata = '{"action":"execute","finished":true}'
        execute_body = 'test\r\ndata\r\n'

        ifile = StringIO('chunked 1.0,{},0\n{}'.format(len(getinfo_metadata),
                                                       getinfo_metadata) +
                         'chunked 1.0,{},{}\n{}{}'.format(
                             len(execute_metadata), len(execute_body),
                             execute_metadata, execute_body))

        command = TestCommand()
        result = StringIO()
        argv = ['some-external-search-command.py']

        self.assertEqual(command.logging_level, 'WARNING')
        self.assertIs(command.record, None)
        self.assertIs(command.show_configuration, None)

        try:
            # noinspection PyTypeChecker
            command.process(argv, ifile, ofile=result)
        except SystemExit as error:
            self.fail('Unexpected exception: {}: {}'.format(
                type(error).__name__, error))

        self.assertEqual(command.logging_configuration, logging_configuration)
        self.assertEqual(command.logging_level, 'ERROR')
        self.assertEqual(command.record, record)
        self.assertEqual(command.show_configuration, show_configuration)
        self.assertEqual(command.required_option_1, 'value_1')
        self.assertEqual(command.required_option_2, 'value_2')

        self.assertEqual(
            'chunked 1.0,68,0\n'
            '{"inspector":{"messages":[["INFO","test command configuration: "]]}}\n'
            'chunked 1.0,17,23\n'
            '{"finished":true}test,__mv_test\r\n'
            'data,\r\n', result.getvalue())

        self.assertEqual(command.protocol_version, 2)

        # 2. Provide access to these properties:
        #   fieldnames
        #   input_header
        #   metadata
        #   search_results_info
        #   service

        self.assertEqual([], command.fieldnames)

        command_metadata = command.metadata
        input_header = command.input_header

        self.assertIsNone(input_header['allowStream'])
        self.assertEqual(
            input_header['infoPath'],
            os.path.join(command_metadata.searchinfo.dispatch_dir, 'info.csv'))
        self.assertIsNone(input_header['keywords'])
        self.assertEqual(input_header['preview'], command_metadata.preview)
        self.assertIs(input_header['realtime'], False)
        self.assertEqual(input_header['search'],
                         command_metadata.searchinfo.search)
        self.assertEqual(input_header['sid'], command_metadata.searchinfo.sid)
        self.assertEqual(input_header['splunkVersion'],
                         command_metadata.searchinfo.splunk_version)
        self.assertIsNone(input_header['truncated'])

        self.assertEqual(command_metadata.preview, input_header['preview'])
        self.assertEqual(command_metadata.searchinfo.app, 'searchcommands_app')
        self.assertEqual(command_metadata.searchinfo.args, [
            'logging_configuration=' + logging_configuration,
            'logging_level=ERROR', 'record=false', 'show_configuration=true',
            'required_option_1=value_1', 'required_option_2=value_2'
        ])
        self.assertEqual(command_metadata.searchinfo.dispatch_dir,
                         os.path.dirname(input_header['infoPath']))
        self.assertEqual(command_metadata.searchinfo.earliest_time, 0.0)
        self.assertEqual(command_metadata.searchinfo.latest_time, 0.0)
        self.assertEqual(command_metadata.searchinfo.owner, 'admin')
        self.assertEqual(command_metadata.searchinfo.raw_args,
                         command_metadata.searchinfo.args)
        self.assertEqual(
            command_metadata.searchinfo.search,
            'A| inputlookup tweets | countmatches fieldname=word_count pattern="\\w+" text record=t | export add_timestamp=f add_offset=t format=csv segmentation=raw'
        )
        self.assertEqual(
            command_metadata.searchinfo.session_key,
            '0JbG1fJEvXrL6iYZw9y7tmvd6nHjTKj7ggaE7a4Jv5R0UIbeYJ65kThn^3hiNeoqzMT_LOtLpVR3Y8TIJyr5bkHUElMijYZ8l14wU0L4n^Oa5QxepsZNUIIQCBm^'
        )
        self.assertEqual(command_metadata.searchinfo.sid, '1433261372.158')
        self.assertEqual(command_metadata.searchinfo.splunk_version,
                         '20150522')
        self.assertEqual(command_metadata.searchinfo.splunkd_uri,
                         'https://127.0.0.1:8089')
        self.assertEqual(command_metadata.searchinfo.username, 'admin')
        self.assertEqual(command_metadata.searchinfo.maxresultrows, 10)
        self.assertEqual(command_metadata.searchinfo.command, 'countmatches')

        command.search_results_info.search_metrics = command.search_results_info.search_metrics.__dict__
        command.search_results_info.optional_fields_json = command.search_results_info.optional_fields_json.__dict__

        self.maxDiff = None

        self.assertDictEqual(
            command.search_results_info.__dict__, {
                u'is_summary_index': 0,
                u'bs_thread_count': 1,
                u'rt_backfill': 0,
                u'rtspan': '',
                u'search_StartTime': 1433261392.934936,
                u'read_raw': 1,
                u'root_sid': '',
                u'field_rendering': '',
                u'query_finished': 1,
                u'optional_fields_json': {},
                u'group_list': '',
                u'remoteServers': '',
                u'rt_latest': '',
                u'remote_log_download_mode': 'disabled',
                u'reduce_search': '',
                u'request_finalization': 0,
                u'auth_token':
                'UQZSgWwE2f9oIKrj1QG^kVhW^T_cR4H5Z65bPtMhwlHytS5jFrFYyH^dGzjTusDjVTgoBNeR7bvIzctHF7DrLJ1ANevgDOWEWRvABNj6d_k0koqxw9Io',
                u'indexed_realtime': 0,
                u'ppc_bs': '$SPLUNK_HOME/etc',
                u'drop_count': 0,
                u'datamodel_map': '',
                u'search_can_be_event_type': 0,
                u'search_StartUp_Spent': 0,
                u'realtime': 0,
                u'splunkd_uri': 'https://127.0.0.1:8089',
                u'columnOrder': '',
                u'kv_store_settings':
                'hosts;127.0.0.1:8191\\;;local;127.0.0.1:8191;read_preference;958513E3-8716-4ABF-9559-DA0C9678437F;replica_set_name;958513E3-8716-4ABF-9559-DA0C9678437F;status;ready;',
                u'label': '',
                u'summary_maxtimespan': '',
                u'indexed_realtime_offset': 0,
                u'sid': 1433261392.159,
                u'msg': [],
                u'internal_only': 0,
                u'summary_id': '',
                u'orig_search_head': '',
                u'ppc_app': 'chunked_searchcommands',
                u'countMap': {
                    u'invocations.dispatch.writeStatus': u'1',
                    u'duration.dispatch.writeStatus': u'2',
                    u'duration.startup.handoff': u'79',
                    u'duration.startup.configuration': u'34',
                    u'invocations.startup.handoff': u'1',
                    u'invocations.startup.configuration': u'1'
                },
                u'is_shc_mode': 0,
                u'shp_id': '958513E3-8716-4ABF-9559-DA0C9678437F',
                u'timestamp': 1433261392.936374,
                u'is_remote_sorted': 0,
                u'remote_search': '',
                u'splunkd_protocol': 'https',
                u'site': '',
                u'maxevents': 0,
                u'keySet': '',
                u'summary_stopped': 0,
                u'search_metrics': {
                    u'ConsideredEvents': 0,
                    u'ConsideredBuckets': 0,
                    u'TotalSlicesInBuckets': 0,
                    u'EliminatedBuckets': 0,
                    u'DecompressedSlices': 0
                },
                u'summary_mode': 'all',
                u'now': 1433261392.0,
                u'splunkd_port': 8089,
                u'is_saved_search': 0,
                u'rtoptions': '',
                u'search':
                '| inputlookup random_data max=50000 | sum total=total value1 record=t | export add_timestamp=f add_offset=t format=csv segmentation=raw',
                u'bundle_version': 0,
                u'generation_id': 0,
                u'bs_thread_id': 0,
                u'is_batch_mode': 0,
                u'scan_count': 0,
                u'rt_earliest': '',
                u'default_group': '*',
                u'tstats_reduce': '',
                u'kv_store_additional_settings':
                'hosts_guids;958513E3-8716-4ABF-9559-DA0C9678437F\\;;',
                u'enable_event_stream': 0,
                u'is_remote': 0,
                u'is_scheduled': 0,
                u'sample_ratio': 1,
                u'ppc_user': '******',
                u'sample_seed': 0
            })

        self.assertIsInstance(command.service, Service)

        self.assertEqual(command.service.authority,
                         command_metadata.searchinfo.splunkd_uri)
        self.assertEqual(command.service.scheme,
                         command.search_results_info.splunkd_protocol)
        self.assertEqual(command.service.port,
                         command.search_results_info.splunkd_port)
        self.assertEqual(command.service.token,
                         command_metadata.searchinfo.session_key)
        self.assertEqual(command.service.namespace.app,
                         command.metadata.searchinfo.app)
        self.assertIsNone(command.service.namespace.owner)
        self.assertIsNone(command.service.namespace.sharing)

        self.assertEqual(command.protocol_version, 2)

        # 3. Produce an error message, log a debug message, and exit when invalid standard option values are encountered

        # Note on loggers
        # Loggers are global and can't be removed once they're created. We create loggers that are keyed by class name
        # Each instance of a class thus created gets access to the same logger. We created one in the prior test and
        # set it's level to ERROR. That level is retained in this test.

        logging_configuration = 'non-existent-logging.conf'
        logging_level = 'NON-EXISTENT-LOGGING-LEVEL'
        record = 'Non-boolean value'
        show_configuration = 'Non-boolean value'

        getinfo_metadata = metadata.format(
            dispatch_dir=encode_string(dispatch_dir),
            logging_configuration=encode_string(logging_configuration)[1:-1],
            logging_level=logging_level,
            record=record,
            show_configuration=show_configuration)

        execute_metadata = '{"action":"execute","finished":true}'
        execute_body = 'test\r\ndata\r\n'

        ifile = StringIO('chunked 1.0,{},0\n{}'.format(len(getinfo_metadata),
                                                       getinfo_metadata) +
                         'chunked 1.0,{},{}\n{}{}'.format(
                             len(execute_metadata), len(execute_body),
                             execute_metadata, execute_body))

        command = TestCommand()
        result = StringIO()
        argv = ['test.py']

        # noinspection PyTypeChecker
        self.assertRaises(SystemExit,
                          command.process,
                          argv,
                          ifile,
                          ofile=result)
        self.assertEqual(command.logging_level, 'ERROR')
        self.assertEqual(command.record, False)
        self.assertEqual(command.show_configuration, False)
        self.assertEqual(command.required_option_1, 'value_1')
        self.assertEqual(command.required_option_2, 'value_2')

        self.assertEqual(
            'chunked 1.0,287,0\n'
            '{"inspector":{"messages":[["ERROR","Illegal value: logging_configuration=non-existent-logging.conf"],'
            '["ERROR","Illegal value: logging_level=NON-EXISTENT-LOGGING-LEVEL"],'
            '["ERROR","Illegal value: record=Non-boolean value"],'
            '["ERROR","Illegal value: show_configuration=Non-boolean value"]]}}\n'
            'chunked 1.0,17,0\n'
            '{"finished":true}', result.getvalue())

        self.assertEqual(command.protocol_version, 2)

        # 4. Produce an error message, log an error message that includes a traceback, and exit when an exception is
        #    raised during command execution.

        logging_configuration = os.path.join(basedir, 'apps',
                                             'app_with_logging_configuration',
                                             'logging.conf')
        logging_level = 'WARNING'
        record = False
        show_configuration = False

        getinfo_metadata = metadata.format(
            dispatch_dir=encode_string(dispatch_dir),
            logging_configuration=encode_string(logging_configuration)[1:-1],
            logging_level=logging_level,
            record=('true' if record is True else 'false'),
            show_configuration=('true'
                                if show_configuration is True else 'false'))

        execute_metadata = '{"action":"execute","finished":true}'
        execute_body = 'action\r\nraise_exception\r\n'

        ifile = StringIO('chunked 1.0,{},0\n{}'.format(len(getinfo_metadata),
                                                       getinfo_metadata) +
                         'chunked 1.0,{},{}\n{}{}'.format(
                             len(execute_metadata), len(execute_body),
                             execute_metadata, execute_body))

        command = TestCommand()
        result = StringIO()
        argv = ['test.py']

        try:
            command.process(argv, ifile, ofile=result)
        except SystemExit as error:
            self.assertNotEqual(0, error.code)
        except BaseException as error:
            self.fail('{0}: {1}: {2}\n'.format(
                type(error).__name__, error, result.getvalue()))
        else:
            self.fail(
                'Expected SystemExit, not a return from TestCommand.process: {}\n'
                .format(result.getvalue()))

        self.assertEqual(command.logging_configuration, logging_configuration)
        self.assertEqual(command.logging_level, logging_level)
        self.assertEqual(command.record, record)
        self.assertEqual(command.show_configuration, show_configuration)
        self.assertEqual(command.required_option_1, 'value_1')
        self.assertEqual(command.required_option_2, 'value_2')

        finished = r'"finished":true'

        inspector = \
            r'"inspector":\{"messages":\[\["ERROR","StandardError at \\".+\\", line \d+ : test ' \
            r'logging_configuration=\\".+\\" logging_level=\\"WARNING\\" record=\\"f\\" ' \
            r'required_option_1=\\"value_1\\" required_option_2=\\"value_2\\" show_configuration=\\"f\\""\]\]\}'

        self.assertRegexpMatches(
            result.getvalue(), r'^chunked 1.0,2,0\n'
            r'\{\}\n'
            r'chunked 1.0,\d+,0\n'
            r'\{(' + inspector + r',' + finished + r'|' + finished + r',' +
            inspector + r')\}')

        self.assertEqual(command.protocol_version, 2)
        return