def __call__(self, value): if value is None: return None try: value = re.compile(six.text_type(value)) except re.error as error: raise ValueError('{}: {}'.format(six.text_type(error).capitalize(), value)) return value
def test(integer): for s in str(integer), six.text_type(integer): value = validator.__call__(s) self.assertEqual(value, integer) if six.PY2: self.assertIsInstance(value, long) else: self.assertIsInstance(value, int) self.assertEqual(validator.format(integer), six.text_type(integer))
def test(float_val): try: float_val = float(float_val) except ValueError: assert False for s in str(float_val), six.text_type(float_val): value = validator.__call__(s) self.assertAlmostEqual(value, float_val) self.assertIsInstance(value, float) self.assertEqual(validator.format(float_val), six.text_type(float_val))
def decode_mime(encoded_field, unescape_folding=True): if unescape_folding and r'\t' in encoded_field or r'\r\n' in encoded_field: encoded_field = re.sub(r'(?!<\\)\\r\\n( |\\t)+', ' ', encoded_field) # Unfold escaped CRLF encoded_field = re.sub(r'(?!<\\)\\r\\n$', '', encoded_field) # Trim trailing CRLF encoded_field = re.sub(r'(?!<\\)\\t', '\t', encoded_field) # Unescape tab characters # Decode the field and convert to Unicode if '=?' in encoded_field: if PY3: decoded = Parser(policy=policy.default).parsestr('MIMEDecode: {}'.format(encoded_field)).get('MIMEDecode') return text_type(decoded) else: return text_type(make_header(decode_header(encoded_field))) return encoded_field
def thefilter(self, record, pattern): values = "" for fieldname in self.fieldnames: #multivalue fields come through as a list, iterate through the list and run the regex against each entry #in the multivalued field if isinstance(record[fieldname], list): for aRecord in record[fieldname]: matches = pattern.findall(six.text_type(aRecord.decode("utf-8"))) for match in matches: values = values + " " + match else: matches = pattern.findall(six.text_type(record[fieldname].decode("utf-8"))) for match in matches: values = values + " " + match return values
def test_unicode_connect(self): opts = self.opts.kwargs.copy() opts['host'] = six.text_type(opts['host']) context = binding.connect(**opts) # Just check to make sure the service is alive response = context.get("/services") self.assertEqual(response.status, 200)
def __call__(self, value): if not (value is None or isinstance(value, bool)): value = six.text_type(value).lower() if value not in Boolean.truth_values: raise ValueError('Unrecognized truth value: {0}'.format(value)) value = Boolean.truth_values[value] return value
def stream(self, records): self.logger.debug('CountMatchesCommand: %s', self) # logs command line pattern = self.pattern outname = self.outname count = 0 whitelist = "" for record in records: for fieldname in self.fieldnames: matches = pattern.findall(six.text_type(record[fieldname])) count += len(matches) record[self.fieldname] = count if whitelist != "": whitelist = str(whitelist) + "|" + str(record) else: whitelist = str(record) # whitelist is empty if count == 0: whitelist = "[('" + str(outname) + "', '*')]" yield {'_raw': str(whitelist)}
def __call__(self, o): if isfunction(o): # We must wait to finalize configuration as the class containing this function is under construction # at the time this call to decorate a member function. This will be handled in the call to # o.ConfigurationSettings.fix_up(o) in the elif clause of this code block. o._settings = self.settings elif isclass(o): # Set command name name = o.__name__ if name.endswith('Command'): name = name[:-len('Command')] o.name = six.text_type(name.lower()) # Construct ConfigurationSettings instance for the command class o.ConfigurationSettings = ConfigurationSettingsType( module=o.__module__ + '.' + o.__name__, name='ConfigurationSettings', bases=(o.ConfigurationSettings,)) ConfigurationSetting.fix_up(o.ConfigurationSettings, self.settings) o.ConfigurationSettings.fix_up(o) Option.fix_up(o) else: raise TypeError('Incorrect usage: Configuration decorator applied to {0}'.format(type(o), o.__name__)) return o
def __call__(self, value): if value is None: return None value = six.text_type(value) if self.pattern.match(value) is None: raise ValueError('Expected {}, not {}'.format(self.name, json_encode_string(value))) return value
def __call__(self, value): if value is not None: value = six.text_type(value) if OptionName.pattern.match(value) is None: raise ValueError( 'Illegal characters in option name: {}'.format(value)) return value
def __call__(self, value): if value is None: return None try: return Code.object(compile(value, 'string', self._mode), six.text_type(value)) except (SyntaxError, TypeError) as error: raise ValueError(error.message)
def __call__(self, value): if value is None: return None value = six.text_type(value) if value not in self.membership: raise ValueError('Unrecognized value: {}'.format(value)) return value
def test_boolean(self): truth_values = { '1': True, '0': False, 't': True, 'f': False, 'true': True, 'false': False, 'y': True, 'n': False, 'yes': True, 'no': False } validator = validators.Boolean() for value in truth_values: for variant in value, value.capitalize(), value.upper(): s = six.text_type(variant) self.assertEqual(validator.__call__(s), truth_values[value]) self.assertIsNone(validator.__call__(None)) self.assertRaises(ValueError, validator.__call__, 'anything-else') return
def generate(self): text = self.text for i in range(1, self.count + 1): yield { '_serial': i, '_time': time.time(), '_raw': six.text_type(i) + '. ' + text }
def _process_protocol_v1(self, argv, ifile, ofile): debug = environment.splunklib_logger.debug class_name = self.__class__.__name__ debug('%s.process started under protocol_version=1', class_name) self._record_writer = RecordWriterV1(ofile) # noinspection PyBroadException try: if argv[1] == '__GETINFO__': debug('Writing configuration settings') ifile = self._prepare_protocol_v1(argv, ifile, ofile) self._record_writer.write_record(dict( (n, ','.join(v) if isinstance(v, (list, tuple)) else v) for n, v in six.iteritems(self._configuration))) self.finish() elif argv[1] == '__EXECUTE__': debug('Executing') ifile = self._prepare_protocol_v1(argv, ifile, ofile) self._records = self._records_protocol_v1 self._metadata.action = 'execute' self._execute(ifile, None) else: message = ( 'Command {0} appears to be statically configured for search command protocol version 1 and static ' 'configuration is unsupported by splunklib.searchcommands. Please ensure that ' 'default/commands.conf contains this stanza:\n' '[{0}]\n' 'filename = {1}\n' 'enableheader = true\n' 'outputheader = true\n' 'requires_srinfo = true\n' 'supports_getinfo = true\n' 'supports_multivalues = true\n' 'supports_rawargs = true'.format(self.name, os.path.basename(argv[0]))) raise RuntimeError(message) except (SyntaxError, ValueError) as error: self.write_error(six.text_type(error)) self.flush() exit(0) except SystemExit: self.flush() raise except: self._report_unexpected_error() self.flush() exit(1) debug('%s.process finished under protocol_version=1', class_name)
def finalize_options(self): self.scp_version = int(self.scp_version) if not (self.scp_version == 1 or self.scp_version == 2): raise SystemError('Expected an SCP version number of 1 or 2, not {}'.format(self.scp_version)) self.package_name = self.package_name + '-' + six.text_type(self.build_number) return
def test_countmatches_as_unit(self): expected, output, errors, exit_status = self._run_command('countmatches', action='getinfo', protocol=1) self.assertEqual(0, exit_status, msg=six.text_type(errors)) self.assertEqual('', errors) self._compare_csv_files_time_sensitive(expected, output) expected, output, errors, exit_status = self._run_command('countmatches', action='execute', protocol=1) self.assertEqual(0, exit_status, msg=six.text_type(errors)) self.assertEqual('', errors) self._compare_csv_files_time_sensitive(expected, output) expected, output, errors, exit_status = self._run_command('countmatches') self.assertEqual(0, exit_status, msg=six.text_type(errors)) self.assertEqual('', errors) self._compare_chunks(expected, output) return
def test_pypygeneratetext_as_unit(self): expected, output, errors, exit_status = self._run_command('pypygeneratetext', action='getinfo', protocol=1) self.assertEqual(0, exit_status, msg=six.text_type(errors)) self.assertEqual('', errors) self._compare_csv_files_time_sensitive(expected, output) expected, output, errors, exit_status = self._run_command('pypygeneratetext', action='execute', protocol=1) self.assertEqual(0, exit_status, msg=six.text_type(errors)) self.assertEqual('', errors) self._compare_csv_files_time_insensitive(expected, output) expected, output, errors, exit_status = self._run_command('pypygeneratetext') self.assertEqual(0, exit_status, msg=six.text_type(errors)) self.assertEqual('', errors) self._compare_chunks(expected, output, time_sensitive=False) return
def __init__(self, name, action, phase, protocol_version): basedir = Recordings._prefix + six.text_type(protocol_version) if not os.path.isdir(basedir): raise ValueError('Directory "{}" containing recordings for protocol version {} does not exist'.format( protocol_version, basedir)) self._basedir = basedir self._name = '.'.join(ifilter(lambda part: part is not None, (name, action, phase)))
def stream(self, records): self.logger.debug('CountMatchesCommand: %s', self) # logs command line pattern = self.pattern for record in records: count = 0 for fieldname in self.fieldnames: matches = pattern.findall(six.text_type(record[fieldname].decode("utf-8"))) count += len(matches) record[self.fieldname] = count yield record
def execute(self): # noinspection PyBroadException try: if self._argv is None: self._argv = os.path.splitext(os.path.basename(self._path))[0] self._execute(self._path, self._argv, self._environ) except: error_type, error, tb = sys.exc_info() message = 'Command execution failed: ' + six.text_type(error) self._logger.error(message + '\nTraceback:\n' + ''.join(traceback.format_tb(tb))) sys.exit(1)
def test_countmatches_as_unit(self): expected, output, errors, exit_status = self._run_command( 'countmatches', action='getinfo', protocol=1) self.assertEqual(0, exit_status, msg=six.text_type(errors)) self.assertEqual('', errors) self._compare_csv_files_time_sensitive(expected, output) expected, output, errors, exit_status = self._run_command( 'countmatches', action='execute', protocol=1) self.assertEqual(0, exit_status, msg=six.text_type(errors)) self.assertEqual('', errors) self._compare_csv_files_time_sensitive(expected, output) expected, output, errors, exit_status = self._run_command( 'countmatches') self.assertEqual(0, exit_status, msg=six.text_type(errors)) self.assertEqual('', errors) self._compare_chunks(expected, output) return
def stream(self, records): self.logger.debug('CountMatchesCommand: %s', self) # logs command line pattern = self.pattern for record in records: count = 0 for fieldname in self.fieldnames: matches = pattern.findall( six.text_type(record[fieldname].decode("utf-8"))) count += len(matches) record[self.fieldname] = count yield record
def __init__(self, path, argv=None, environ=None): if not isinstance(path, (bytes, six.text_type)): raise ValueError('Expected a string value for path, not {}'.format(repr(path))) self._logger = getLogger(self.__class__.__name__) self._path = six.text_type(path) self._argv = None self._environ = None self.argv = argv self.environ = environ
def __call__(self, value): if value is None: return None try: return Code.object(compile(value, 'string', self._mode), six.text_type(value)) except (SyntaxError, TypeError) as error: if six.PY2: message = error.message else: message = str(error) six.raise_from(ValueError(message), error)
def __str__(self): """ Converts the value of this instance to its string representation. The value of this ConfigurationSettings instance is represented as a string of comma-separated :code:`name=value` pairs. Items with values of :const:`None` are filtered from the list. :return: String representation of this instance """ #text = ', '.join(imap(lambda (name, value): name + '=' + json_encode_string(unicode(value)), self.iteritems())) text = ', '.join(['{}={}'.format(name, json_encode_string(six.text_type(value))) for (name, value) in six.iteritems(self)]) return text
def __init__(self, name, action, phase, protocol_version): basedir = Recordings._prefix + six.text_type(protocol_version) if not os.path.isdir(basedir): raise ValueError( 'Directory "{}" containing recordings for protocol version {} does not exist' .format(protocol_version, basedir)) self._basedir = basedir self._name = '.'.join( ifilter(lambda part: part is not None, (name, action, phase)))
def test_pypygeneratetext_as_unit(self): expected, output, errors, exit_status = self._run_command( 'pypygeneratetext', action='getinfo', protocol=1) self.assertEqual(0, exit_status, msg=six.text_type(errors)) self.assertEqual('', errors) self._compare_csv_files_time_sensitive(expected, output) expected, output, errors, exit_status = self._run_command( 'pypygeneratetext', action='execute', protocol=1) self.assertEqual(0, exit_status, msg=six.text_type(errors)) self.assertEqual('', errors) self._compare_csv_files_time_insensitive(expected, output) expected, output, errors, exit_status = self._run_command( 'pypygeneratetext') self.assertEqual(0, exit_status, msg=six.text_type(errors)) self.assertEqual('', errors) self._compare_chunks(expected, output, time_sensitive=False) return
def __call__(self, value): if value is None: return value path = six.text_type(value) if not os.path.isabs(path): path = os.path.join(self.directory, path) try: value = open(path, self.mode) if self.buffering is None else open(path, self.mode, self.buffering) except IOError as error: raise ValueError('Cannot open {0} with mode={1} and buffering={2}: {3}'.format( value, self.mode, self.buffering, error)) return value
def prepare(self): phase = self.phase if phase == 'map': # noinspection PyUnresolvedReferences self._configuration = self.map.ConfigurationSettings(self) return if phase == 'reduce': streaming_preop = chain( (self.name, 'phase="map"', str(self._options)), self.fieldnames) self._configuration.streaming_preop = ' '.join(streaming_preop) return raise RuntimeError('Unrecognized reporting command phase: {}'.format( json_encode_string(six.text_type(phase))))
def test_boolean(self): truth_values = { '1': True, '0': False, 't': True, 'f': False, 'true': True, 'false': False, 'y': True, 'n': False, 'yes': True, 'no': False } validator = validators.Boolean() for value in truth_values: for variant in value, value.capitalize(), value.upper(): for s in six.text_type(variant), bytes(variant): self.assertEqual(validator.__call__(s), truth_values[value]) self.assertIsNone(validator.__call__(None)) self.assertRaises(ValueError, validator.__call__, 'anything-else') return
def test_duration(self): # Duration validator should parse and format time intervals of the form # HH:MM:SS validator = validators.Duration() for seconds in range(0, 25 * 60 * 60, 59): value = six.text_type(seconds) self.assertEqual(validator(value), seconds) self.assertEqual(validator(validator.format(seconds)), seconds) value = '%d:%02d' % (seconds / 60, seconds % 60) self.assertEqual(validator(value), seconds) self.assertEqual(validator(validator.format(seconds)), seconds) value = '%d:%02d:%02d' % (seconds / 3600, (seconds / 60) % 60, seconds % 60) self.assertEqual(validator(value), seconds) self.assertEqual(validator(validator.format(seconds)), seconds) self.assertEqual(validator('230:00:00'), 230 * 60 * 60) self.assertEqual(validator('23:00:00'), 23 * 60 * 60) self.assertEqual(validator('00:59:00'), 59 * 60) self.assertEqual(validator('00:00:59'), 59) self.assertEqual(validator.format(230 * 60 * 60), '230:00:00') self.assertEqual(validator.format(23 * 60 * 60), '23:00:00') self.assertEqual(validator.format(59 * 60), '00:59:00') self.assertEqual(validator.format(59), '00:00:59') self.assertRaises(ValueError, validator, '-1') self.assertRaises(ValueError, validator, '00:-1') self.assertRaises(ValueError, validator, '-1:00') self.assertRaises(ValueError, validator, '00:00:-1') self.assertRaises(ValueError, validator, '00:-1:00') self.assertRaises(ValueError, validator, '-1:00:00') self.assertRaises(ValueError, validator, '00:00:60') self.assertRaises(ValueError, validator, '00:60:00') return
def test_duration(self): # Duration validator should parse and format time intervals of the form # HH:MM:SS validator = validators.Duration() for seconds in range(0, 25 * 60 * 60, 59): for value in six.text_type(seconds), bytes(seconds): self.assertEqual(validator(value), seconds) self.assertEqual(validator(validator.format(seconds)), seconds) value = '%d:%02d' % (seconds / 60, seconds % 60) self.assertEqual(validator(value), seconds) self.assertEqual(validator(validator.format(seconds)), seconds) value = '%d:%02d:%02d' % (seconds / 3600, (seconds / 60) % 60, seconds % 60) self.assertEqual(validator(value), seconds) self.assertEqual(validator(validator.format(seconds)), seconds) self.assertEqual(validator('230:00:00'), 230 * 60 * 60) self.assertEqual(validator('23:00:00'), 23 * 60 * 60) self.assertEqual(validator('00:59:00'), 59 * 60) self.assertEqual(validator('00:00:59'), 59) self.assertEqual(validator.format(230 * 60 * 60), '230:00:00') self.assertEqual(validator.format(23 * 60 * 60), '23:00:00') self.assertEqual(validator.format(59 * 60), '00:59:00') self.assertEqual(validator.format(59), '00:00:59') self.assertRaises(ValueError, validator, '-1') self.assertRaises(ValueError, validator, '00:-1') self.assertRaises(ValueError, validator, '-1:00') self.assertRaises(ValueError, validator, '00:00:-1') self.assertRaises(ValueError, validator, '00:-1:00') self.assertRaises(ValueError, validator, '-1:00:00') self.assertRaises(ValueError, validator, '00:00:60') self.assertRaises(ValueError, validator, '00:60:00') return
def _process_protocol_v2(self, argv, ifile, ofile): """ Processes records on the `input stream optionally writing records to the output stream. :param ifile: Input file object. :type ifile: file or InputType :param ofile: Output file object. :type ofile: file or OutputType :return: :const:`None` """ debug = environment.splunklib_logger.debug class_name = self.__class__.__name__ debug('%s.process started under protocol_version=2', class_name) self._protocol_version = 2 # Read search command metadata from splunkd # noinspection PyBroadException try: debug('Reading metadata') metadata, body = self._read_chunk(self._as_binary_stream(ifile)) action = getattr(metadata, 'action', None) if action != 'getinfo': raise RuntimeError( 'Expected getinfo action, not {}'.format(action)) if len(body) > 0: raise RuntimeError('Did not expect data for getinfo action') self._metadata = deepcopy(metadata) searchinfo = self._metadata.searchinfo searchinfo.earliest_time = float(searchinfo.earliest_time) searchinfo.latest_time = float(searchinfo.latest_time) searchinfo.search = unquote(searchinfo.search) self._map_input_header() debug(' metadata=%r, input_header=%r', self._metadata, self._input_header) try: tempfile.tempdir = self._metadata.searchinfo.dispatch_dir except AttributeError: raise RuntimeError( '%s.metadata.searchinfo.dispatch_dir is undefined'.format( class_name)) debug(' tempfile.tempdir=%r', tempfile.tempdir) except: self._record_writer = RecordWriterV2(ofile) self._report_unexpected_error() self.finish() exit(1) # Write search command configuration for consumption by splunkd # noinspection PyBroadException try: self._record_writer = RecordWriterV2( ofile, getattr(self._metadata.searchinfo, 'maxresultrows', None)) self.fieldnames = [] self.options.reset() args = self.metadata.searchinfo.args error_count = 0 debug('Parsing arguments') if args and type(args) == list: for arg in args: result = arg.split('=', 1) if len(result) == 1: self.fieldnames.append(str(result[0])) else: name, value = result name = str(name) try: option = self.options[name] except KeyError: self.write_error( 'Unrecognized option: {}={}'.format( name, value)) error_count += 1 continue try: option.value = value except ValueError: self.write_error('Illegal value: {}={}'.format( name, value)) error_count += 1 continue missing = self.options.get_missing() if missing is not None: if len(missing) == 1: self.write_error('A value for "{}" is required'.format( missing[0])) else: self.write_error( 'Values for these required options are missing: {}'. format(', '.join(missing))) error_count += 1 if error_count > 0: exit(1) debug(' command: %s', six.text_type(self)) debug('Preparing for execution') self.prepare() if self.record: ifile, ofile = self._prepare_recording(argv, ifile, ofile) self._record_writer.ofile = ofile # Record the metadata that initiated this command after removing the record option from args/raw_args info = self._metadata.searchinfo for attr in 'args', 'raw_args': setattr(info, attr, [ arg for arg in getattr(info, attr) if not arg.startswith('record=') ]) metadata = MetadataEncoder().encode(self._metadata) ifile.record('chunked 1.0,', six.text_type(len(metadata)), ',0\n', metadata) if self.show_configuration: self.write_info(self.name + ' command configuration: ' + str(self._configuration)) debug(' command configuration: %s', self._configuration) except SystemExit: self._record_writer.write_metadata(self._configuration) self.finish() raise except: self._record_writer.write_metadata(self._configuration) self._report_unexpected_error() self.finish() exit(1) self._record_writer.write_metadata(self._configuration) # Execute search command on data passing through the pipeline # noinspection PyBroadException try: debug('Executing under protocol_version=2') self._metadata.action = 'execute' self._execute(ifile, None) except SystemExit: self.finish() raise except: self._report_unexpected_error() self.finish() exit(1) debug('%s.process completed', class_name)
def generate(self): text = self.text for i in range(1, self.count + 1): yield {'_serial': i, '_time': time.time(), '_raw': six.text_type(i) + '. ' + text}
def test_record_writer_with_random_data(self, save_recording=False): # Confirmed: [minint, maxint) covers the full range of values that xrange allows # RecordWriter writes apps in units of maxresultrows records. Default: 50,0000. # Partial results are written when the record count reaches maxresultrows. writer = RecordWriterV2(StringIO(), maxresultrows=10) # small for the purposes of this unit test test_data = OrderedDict() fieldnames = ['_serial', '_time', 'random_bytes', 'random_dict', 'random_integers', 'random_unicode'] test_data['fieldnames'] = fieldnames test_data['values'] = [] write_record = writer.write_record for serial_number in range(0, 31): values = [serial_number, time(), random_bytes(), random_dict(), random_integers(), random_unicode()] record = OrderedDict(izip(fieldnames, values)) #try: write_record(record) #except Exception as error: # self.fail(error) test_data['values'].append(values) # RecordWriter accumulates inspector messages and metrics until maxresultrows are written, a partial result # is produced or we're finished messages = [ ('debug', random_unicode()), ('error', random_unicode()), ('fatal', random_unicode()), ('info', random_unicode()), ('warn', random_unicode())] test_data['messages'] = messages for message_type, message_text in messages: writer.write_message(message_type, '{}', message_text) metrics = { 'metric-1': SearchMetric(1, 2, 3, 4), 'metric-2': SearchMetric(5, 6, 7, 8) } test_data['metrics'] = metrics for name, metric in six.iteritems(metrics): writer.write_metric(name, metric) self.assertEqual(writer._chunk_count, 3) self.assertEqual(writer._record_count, 1) self.assertGreater(writer._buffer.tell(), 0) self.assertEqual(writer._total_record_count, 30) self.assertListEqual(writer._fieldnames, fieldnames) self.assertListEqual(writer._inspector['messages'], messages) self.assertDictEqual( dict(ifilter(lambda k_v: k_v[0].startswith('metric.'), six.iteritems(writer._inspector))), dict(imap(lambda k_v1: ('metric.' + k_v1[0], k_v1[1]), six.iteritems(metrics)))) writer.flush(finished=True) self.assertEqual(writer._chunk_count, 4) self.assertEqual(writer._record_count, 0) self.assertEqual(writer._buffer.tell(), 0) self.assertEqual(writer._buffer.getvalue(), '') self.assertEqual(writer._total_record_count, 31) self.assertRaises(AssertionError, writer.flush, finished=True, partial=True) self.assertRaises(AssertionError, writer.flush, finished='non-boolean') self.assertRaises(AssertionError, writer.flush, partial='non-boolean') self.assertRaises(AssertionError, writer.flush) self.assertRaises(RuntimeError, writer.write_record, {}) self.assertFalse(writer._ofile.closed) self.assertIsNone(writer._fieldnames) self.assertDictEqual(writer._inspector, OrderedDict()) # P2 [ ] TODO: Verify that RecordWriter gives consumers the ability to write partial results by calling # RecordWriter.flush(partial=True). # P2 [ ] TODO: Verify that RecordWriter gives consumers the ability to finish early by calling # RecordWriter.flush(finish=True). if save_recording: cls = self.__class__ method = cls.test_record_writer_with_recordings base_path = os.path.join(self._recordings_path, '.'.join((cls.__name__, method.__name__, six.text_type(time())))) with gzip.open(base_path + '.input.gz', 'wb') as f: pickle.dump(test_data, f) with open(base_path + '.output', 'wb') as f: f.write(writer._ofile.getvalue()) return
def fix_up(cls, values): is_configuration_setting = lambda attribute: isinstance(attribute, ConfigurationSetting) definitions = getmembers(cls, is_configuration_setting) i = 0 for name, setting in definitions: if setting._name is None: setting._name = name = six.text_type(name) else: name = setting._name validate, specification = setting._get_specification() backing_field_name = '_' + name if setting.fget is None and setting.fset is None and setting.fdel is None: value = setting._value if setting._readonly or value is not None: validate(specification, name, value) def fget(bfn, value): return lambda this: getattr(this, bfn, value) setting = setting.getter(fget(backing_field_name, value)) if not setting._readonly: def fset(bfn, validate, specification, name): return lambda this, value: setattr(this, bfn, validate(specification, name, value)) setting = setting.setter(fset(backing_field_name, validate, specification, name)) setattr(cls, name, setting) def is_supported_by_protocol(supporting_protocols): def is_supported_by_protocol(version): return version in supporting_protocols return is_supported_by_protocol del setting._name, setting._value, setting._readonly setting.is_supported_by_protocol = is_supported_by_protocol(specification.supporting_protocols) setting.supporting_protocols = specification.supporting_protocols setting.backing_field_name = backing_field_name definitions[i] = setting setting.name = name i += 1 try: value = values[name] except KeyError: continue if setting.fset is None: raise ValueError('The value of configuration setting {} is fixed'.format(name)) setattr(cls, backing_field_name, validate(specification, name, value)) del values[name] if len(values) > 0: settings = sorted(list(six.iteritems(values))) settings = imap(lambda n_v: '{}={}'.format(n_v[0], repr(n_v[1])), settings) raise AttributeError('Inapplicable configuration settings: ' + ', '.join(settings)) cls.configuration_setting_definitions = definitions
def _process_protocol_v2(self, argv, ifile, ofile): """ Processes records on the `input stream optionally writing records to the output stream. :param ifile: Input file object. :type ifile: file or InputType :param ofile: Output file object. :type ofile: file or OutputType :return: :const:`None` """ debug = environment.splunklib_logger.debug class_name = self.__class__.__name__ debug('%s.process started under protocol_version=2', class_name) self._protocol_version = 2 # Read search command metadata from splunkd # noinspection PyBroadException try: debug('Reading metadata') metadata, body = self._read_chunk(ifile) action = getattr(metadata, 'action', None) if action != 'getinfo': raise RuntimeError('Expected getinfo action, not {}'.format(action)) if len(body) > 0: raise RuntimeError('Did not expect data for getinfo action') self._metadata = deepcopy(metadata) searchinfo = self._metadata.searchinfo searchinfo.earliest_time = float(searchinfo.earliest_time) searchinfo.latest_time = float(searchinfo.latest_time) searchinfo.search = unquote(searchinfo.search) self._map_input_header() debug(' metadata=%r, input_header=%r', self._metadata, self._input_header) try: tempfile.tempdir = self._metadata.searchinfo.dispatch_dir except AttributeError: raise RuntimeError('%s.metadata.searchinfo.dispatch_dir is undefined'.format(class_name)) debug(' tempfile.tempdir=%r', tempfile.tempdir) except: self._record_writer = RecordWriterV2(ofile) self._report_unexpected_error() self.finish() exit(1) # Write search command configuration for consumption by splunkd # noinspection PyBroadException try: self._record_writer = RecordWriterV2(ofile, getattr(self._metadata.searchinfo, 'maxresultrows', None)) self.fieldnames = [] self.options.reset() args = self.metadata.searchinfo.args error_count = 0 debug('Parsing arguments') if args and type(args) == list: for arg in args: result = arg.split('=', 1) if len(result) == 1: self.fieldnames.append(str(result[0])) else: name, value = result name = str(name) try: option = self.options[name] except KeyError: self.write_error('Unrecognized option: {}={}'.format(name, value)) error_count += 1 continue try: option.value = value except ValueError: self.write_error('Illegal value: {}={}'.format(name, value)) error_count += 1 continue missing = self.options.get_missing() if missing is not None: if len(missing) == 1: self.write_error('A value for "{}" is required'.format(missing[0])) else: self.write_error('Values for these required options are missing: {}'.format(', '.join(missing))) error_count += 1 if error_count > 0: exit(1) debug(' command: %s', six.text_type(self)) debug('Preparing for execution') self.prepare() if self.record: ifile, ofile = self._prepare_recording(argv, ifile, ofile) self._record_writer.ofile = ofile # Record the metadata that initiated this command after removing the record option from args/raw_args info = self._metadata.searchinfo for attr in 'args', 'raw_args': setattr(info, attr, [arg for arg in getattr(info, attr) if not arg.startswith('record=')]) metadata = MetadataEncoder().encode(self._metadata) ifile.record('chunked 1.0,', six.text_type(len(metadata)), ',0\n', metadata) if self.show_configuration: self.write_info(self.name + ' command configuration: ' + str(self._configuration)) debug(' command configuration: %s', self._configuration) except SystemExit: self._record_writer.write_metadata(self._configuration) self.finish() raise except: self._record_writer.write_metadata(self._configuration) self._report_unexpected_error() self.finish() exit(1) self._record_writer.write_metadata(self._configuration) # Execute search command on data passing through the pipeline # noinspection PyBroadException try: debug('Executing under protocol_version=2') self._records = self._records_protocol_v2 self._metadata.action = 'execute' self._execute(ifile, None) except SystemExit: self.finish() raise except: self._report_unexpected_error() self.finish() exit(1) debug('%s.process completed', class_name)
def format(self, value): return None if value is None else six.text_type(value)
def prepare(self): phase = self.phase if phase == 'map': # noinspection PyUnresolvedReferences self._configuration = self.map.ConfigurationSettings(self) return if phase == 'reduce': streaming_preop = chain((self.name, 'phase="map"', str(self._options)), self.fieldnames) self._configuration.streaming_preop = ' '.join(streaming_preop) return raise RuntimeError('Unrecognized reporting command phase: {}'.format(json_encode_string(six.text_type(phase))))
def __init__(self, name, pattern, flags=0): self.name = six.text_type(name) self.pattern = re.compile(pattern, flags)
def __call__(self, value): if value is not None: value = six.text_type(value) if OptionName.pattern.match(value) is None: raise ValueError('Illegal characters in option name: {}'.format(value)) return value