Esempio n. 1
0
	def handleList(self, confInfo):
		self.capabilityRead = 'read_kvst_config'

		try:
			cfg = cli.getConfStanza('kvstore_tools','settings')
		except BaseException as e:
			raise Exception("Could not read configuration: " + repr(e))
		
		# Facility info - prepended to log lines
		facility = os.path.basename(__file__)
		facility = os.path.splitext(facility)[0]
		try:
			logger = setup_logger(cfg["log_level"], 'kvstore_tools.log', facility)
		except BaseException as e:
			raise Exception("Could not create logger: " + repr(e))

		logger.debug('KV Store Tools Settings handler started (List)')
		
		# Check for permissions to read the configuration
		session_key = self.getSessionKey()
		content = rest.simpleRequest('/services/authentication/current-context?output_mode=json', sessionKey=session_key, method='GET')[1]
		content = json.loads(content)
		current_user = content['entry'][0]['content']['username']
		current_user_capabilities = content['entry'][0]['content']['capabilities']
		if self.capabilityRead in current_user_capabilities:
			logger.debug("User %s is authorized" % current_user)

			confDict = self.readConf("kvstore_tools")
			if None != confDict:
				for stanza, settings in list(confDict.items()):
					for key, val in list(settings.items()):
						logger.debug("key: {0}, value: {1}".format(key, val))
						if key in ['compression']:
							if str2bool(val):
								val = '1'
							else:
								val = '0'
						'''
						if key in ['default_path'] and val in [None, '', 'unset']:
							val = os.path.join('$SPLUNK_HOME', 'etc', 'apps', 'kvstore_tools', 'backups')
							# Windows wildcard support (works with $ but this is more clear).
							if '\\' in val:
								val = val.replace('$SPLUNK_HOME', '%SPLUNK_HOME%')
						if key in ['backup_batch_size'] and val in [None, '']:
							val = '50000'
						if key in ['retention_days'] and val in [None, '']:
							val = '0'
						if key in ['retention_size'] and val in [None, '']:
							val = '0'
						'''
						confInfo[stanza].append(key, val)
		else:
			raise Exception("User %s is unauthorized. Has the read_kvst_config capability been granted?" % current_user)
Esempio n. 2
0
	def handleEdit(self, confInfo):
		self.capabilityWrite = 'write_kvst_config'

		try:
			cfg = cli.getConfStanza('kvstore_tools','settings')
		except BaseException as e:
			raise Exception("Could not read configuration: " + repr(e))
		
		# Facility info - prepended to log lines
		facility = os.path.basename(__file__)
		facility = os.path.splitext(facility)[0]
		try:
			logger = setup_logger(cfg["log_level"], 'kvstore_tools.log', facility)
		except BaseException as e:
			raise Exception("Could not create logger: " + repr(e))

		logger.debug('KV Store Tools Settings handler started (Edit)')

		# Check for permissions to read the configuration
		session_key = self.getSessionKey()
		content = rest.simpleRequest('/services/authentication/current-context?output_mode=json', sessionKey=session_key, method='GET')[1]
		content = json.loads(content)
		current_user = content['entry'][0]['content']['username']
		current_user_capabilities = content['entry'][0]['content']['capabilities']
		if self.capabilityWrite in current_user_capabilities:
			logger.debug("User %s is authorized" % current_user)

			# Read the splunk.secret file
			with open(os.path.join(os.getenv('SPLUNK_HOME'), 'etc', 'auth', 'splunk.secret'), 'r') as ssfh:
				splunk_secret = ssfh.readline()

			config = self.callerArgs.data
			new_config = {}
			for k, v in list(config.items()):
				if isinstance(v, list) and len(v) == 1:
					v = v[0]
				if v is None:
					logger.debug('Setting %s to blank' % k)
					new_config[k] = ''
				else:
					logger.debug('Setting %s to %s' % (k, v))
					if k[:10] == 'credential' and not '$7$' in v:
						logger.debug('Value has an unencrypted password. Encrypting.')
						# Split the value into alias/username/password
						hostname, username, password = v.split(':')
						try:
							v = hostname + ":" + username + ":" + encrypt_new(splunk_secret, password)
						except BaseException as e:
							logger.error("Error saving encrypted password for %s: %s" % (hostname, repr(e)))
							continue
							
					logger.debug('Encrypted')
					new_config[k] = v
					logger.debug('applied to configuration dict')
			try:
				if 'compression' in list(new_config.keys()):
					if str2bool(config['compression'][0]):
						new_config['compression'][0] = '1'
					else:
						new_config['compression'][0] = '0'
			
				if 'default_path' in list(new_config.keys()):
					if config['default_path'][0] in [None, '']:
						new_config['default_path'][0] = None
			
				if 'backup_batch_size' in list(new_config.keys()):
					if config['backup_batch_size'][0] in [None, '']:
						new_config['backup_batch_size'][0] = None
				
				logger.debug("Writing configuration")
			except BaseException as e:
				logger.critical("Error parsing configuration: %s" % repr(e))
			# Write the config stanza
			self.writeConf('kvstore_tools', 'settings', new_config)
		else:
			raise Exception("User %s is unauthorized. Has the write_kvst_config capability been granted?" % current_user)
Esempio n. 3
0
def get_config_from_alias(session_key, config_data, stanza_guid_alias=None):

    credentials = {}
    # Get all credentials for this app
    try:
        entity = en.getEntity('/server',
                              'settings',
                              namespace='-',
                              sessionKey=session_key,
                              owner='-')
        splunkd_port = entity["mgmtHostPort"]
        service = client.connect(token=session_key, port=splunkd_port)
        # Get all credentials in the secret store for this app
        storage_passwords = service.storage_passwords
        for credential in storage_passwords:
            if credential.access.app == app:
                credentials[credential._state.title] = {
                    'username': credential.content.get('username'),
                    'password': credential.content.get('clear_password'),
                    'realm': credential.content.get('realm')
                }

    except BaseException as e:
        raise Exception("Could not access secret store: " + repr(e))

    # Parse and merge the configuration
    try:
        for guid in list(config_data.keys()):
            for setting, setting_value in list(config_data[guid].items()):
                # Delete blank configuration values (in case setup process wrote them)
                if setting_value is not None and len(setting_value) == 0:
                    del config_data[guid][setting]
                # Add the username/password/realm values to the credential
                if 'credential' in setting:
                    #logger.debug("Found credential setting in stanza: %s/%s" % (guid, setting))
                    if setting_value in list(credentials.keys()):
                        for s in ['username', 'password', 'realm']:
                            if s in list(credentials[setting_value].keys(
                            )) and credentials[setting_value][s] is not None:
                                config_data[guid][
                                    setting + '_' +
                                    s] = credentials[setting_value][s]
        # Set the default configuration
        if 'default' in list(config_data.keys()):
            default_target_config = config_data['default']
        else:
            default_target_config = {}

        # See if a GUID was provided explicitly (used by alert actions)
        # 8-4-4-4-12 format
        try:
            if stanza_guid_alias is not None:
                if re.match(
                        r'[({]?[a-f0-9]{8}[-]?([a-f0-9]{4}[-]?){3}[a-f0-9]{12}[})]?',
                        stanza_guid_alias,
                        flags=re.IGNORECASE):
                    logger.debug("Using guid " + stanza_guid_alias)
                    return merge_two_dicts(default_target_config,
                                           config_data[stanza_guid_alias])
        except BaseException as e:
            logger.exception("Exception caught: " + repr(e))

        # Loop through all GUID stanzas for the specified alias
        for guid in list(config_data.keys()):
            if guid != 'default':
                # Merge the configuration with the default config to fill in null values
                config_stanza_settings = merge_two_dicts(
                    default_target_config, config_data[guid])
                guid_is_default = str2bool(config_stanza_settings['default'])
                # Check to see if this is the configuration we want to use
                if 'alias' in list(config_stanza_settings.keys()):
                    if config_stanza_settings['alias'] == stanza_guid_alias or (
                            stanza_guid_alias is None and guid_is_default):
                        # Return the specified target configuration, or default if target not specified
                        logger.debug("Active configuration: %s", guid)
                        return config_stanza_settings
        return None
    except BaseException as e:
        raise Exception("Unable to find target configuration: " + repr(e))
Esempio n. 4
0
    def stream(self, events):
        try:
            app_config = cli.getConfStanza('ep_general', 'settings')
            cmd_config = cli.getConfStanzas('ep_hec')
        except BaseException as e:
            raise Exception("Could not read configuration: " + repr(e))

        # Facility info - prepended to log lines
        facility = os.path.basename(__file__)
        facility = os.path.splitext(facility)[0]
        try:
            logger = setup_logger(app_config["log_level"],
                                  'export_everything.log', facility)
        except BaseException as e:
            raise Exception("Could not create logger: " + repr(e))

        logger.info('HEC Export search command initiated')
        logger.debug('search_ep_hec command: %s', self)  # logs command line

        # Set defaults
        if self.host is None:
            self.host = "$host$"
        # Get the default values used for data originating from this machine
        inputs_host = cli.getConfStanza('inputs', 'splunktcp')["host"]

        if self.source is None:
            self.source = "$source$"

        if self.sourcetype is None:
            self.sourcetype = "$sourcetype$"

        if self.index is None:
            self.index = "$index$"

        # Enumerate proxy settings
        http_proxy = os.environ.get('HTTP_PROXY')
        https_proxy = os.environ.get('HTTPS_PROXY')
        proxy_exceptions = os.environ.get('NO_PROXY')

        if http_proxy is not None:
            logger.debug("HTTP proxy: %s" % http_proxy)
        if https_proxy is not None:
            logger.debug("HTTPS proxy: %s" % https_proxy)
        if proxy_exceptions is not None:
            logger.debug("Proxy Exceptions: %s" % proxy_exceptions)

        # Enumerate settings
        searchinfo = self._metadata.searchinfo
        app = searchinfo.app
        user = searchinfo.username
        session_key = self._metadata.searchinfo.session_key

        if self.target is None and 'target=' in str(self):
            recover_parameters(self)
        # Replace all tokenized parameter strings
        replace_object_tokens(self)

        try:
            target_config = get_config_from_alias(session_key, cmd_config,
                                                  self.target)
            if target_config is None:
                exit_error(
                    logger,
                    "Unable to find target configuration (%s)." % self.target,
                    100937)

            logger.debug("Target configuration: " + str(target_config))
            hec_token = target_config['token']
            hec_host = target_config['host']
            hec_port = target_config['port']
            hec_ssl = str2bool(target_config['ssl'])
        except BaseException as e:
            exit_error(logger,
                       "Error reading target server configuration: " + repr(e),
                       124812)

        if len(hec_host) == 0:
            exit_error(logger, "No host specified", 119371)

        # Create HEC object
        hec = http_event_collector(hec_token,
                                   hec_host,
                                   http_event_port=hec_port,
                                   http_event_server_ssl=hec_ssl)
        if port_is_open(hec_host, hec_port):
            logger.debug("Port connectivity check passed")
            if hec.check_connectivity():

                # Special event key fields that can be specified/overridden in the alert action
                meta_keys = ['source', 'sourcetype', 'host', 'index']
                event_count = 0
                for event in events:
                    # Get the fields list for the event
                    event_keys = list(event.keys())

                    payload = {}
                    payload_event_src = {}
                    # Copy event to new event, so we can change it
                    for f in event_keys:
                        payload_event_src[f] = event[f]

                    if '_time' in event_keys:
                        payload.update({"time": payload_event_src['_time']})
                        del (payload_event_src['_time'])
                    else:
                        payload.update({"time": time.time()})

                    for k in meta_keys:
                        # Loop through the metadata keys: host/source/sourcetype/index
                        if getattr(self, k)[0] == getattr(
                                self, k)[-1] and getattr(self, k)[-1] == "$":
                            if k in event_keys:
                                # If the key field is in the event and its output argument is set to a variable
                                payload.update({
                                    k:
                                    payload_event_src[getattr(self, k)[1:-1]]
                                })
                                # Delete it from the payload event source so it's not included when we dump the rest of the fields later.
                                del (payload_event_src[getattr(self, k)[1:-1]])
                            elif k == "host" and self.host == "$host$":
                                # "host" field not found in event, but has the default value. Use the one from inputs.conf.
                                payload.update({k: inputs_host})
                        else:
                            # Plaintext entry
                            payload.update({k: self[k]})

                    # Only send _raw (no other fields) if the _raw field was included in the search result.
                    # (Don't include other fields/values)
                    if '_raw' in list(payload_event_src.keys()):
                        #logger.debug("Using _raw from search result")
                        payload.update({"event": payload_event_src['_raw']})
                    else:
                        payload.update({"event": payload_event_src})

                    event_count += 1
                    logger.debug("Payload = " + str(payload))
                    hec.batchEvent(payload)
                    yield (event)

                hec.flushBatch()
                logger.info(
                    "Successfully exported events. count=%s target=%s app=%s user=%s"
                    % (event_count, hec_host, app, user))
            else:  # Connectivity check failed
                exit_error(
                    logger,
                    "HEC endpoint port open but connection test failed.",
                    104893)
        else:
            if str2bool(hec_ssl):
                protocol = 'https'
            else:
                protocol = 'http'
            exit_error(
                logger,
                "Unable to connect to the HEC endpoint: %s" % protocol +
                '://' + hec_host + ':' + hec_port, 100253)
Esempio n. 5
0
                                    j[column_names[idx][5:]] = values
                                else:
                                    # Make a JSON object/dict with all of the row data
                                    j[column_names[idx]] = column
                                # Add the result to the results array
                        json_results.append(j)
                    # Increment row count
                    rownum += 1
            except BaseException as e:
                logger.error("Could not read or parse the results file",
                             exc_info=True)
                exit(1)
        logger.info("Read %d results from results file" % rownum)

        # Change the action if the overwrite flag is specified
        if str2bool(alert_config.get('overwrite')):
            logger.debug('Overwriting kvstore collection=%s with data=%s' %
                         (collection, json.dumps(json_results)))
            # Delete the collection contents
            try:
                response = kv.delete_collection(logger,
                                                payload.get('server_uri'),
                                                payload.get('session_key'),
                                                app, collection)
                logger.debug('Server response for collection deletion: %s' %
                             response)
            except BaseException as e:
                logger.error('Failed to delete collection: %s' % repr(e))
                sys.exit(3)
        else:
            logger.debug('Updating kvstore collection=%s with data=%s' %
Esempio n. 6
0
    def reduce(self, events):
        try:
            app_config = cli.getConfStanza('ep_general', 'settings')
            cmd_config = cli.getConfStanzas('ep_aws_s3')
        except BaseException as e:
            raise Exception("Could not read configuration: " + repr(e))

        # Facility info - prepended to log lines
        facility = os.path.basename(__file__)
        facility = os.path.splitext(facility)[0]
        try:
            logger = setup_logger(app_config["log_level"],
                                  'export_everything.log', facility)
        except BaseException as e:
            raise Exception("Could not create logger: " + repr(e))

        logger.info('AWS S3 Export search command initiated')
        logger.debug("Configuration: " + str(cmd_config))
        logger.debug('search_ep_awss3 command: %s', self)  # logs command line

        # Enumerate settings
        app = self._metadata.searchinfo.app
        user = self._metadata.searchinfo.username
        dispatch = self._metadata.searchinfo.dispatch_dir
        session_key = self._metadata.searchinfo.session_key

        if self.target is None and 'target=' in str(self):
            recover_parameters(self)
        # Replace all tokenized parameter strings
        replace_object_tokens(self)

        # Build the configuration
        try:
            aws_config = get_config_from_alias(session_key, cmd_config,
                                               self.target)
            if aws_config is None:
                exit_error(
                    logger,
                    "Unable to find target configuration (%s)." % self.target,
                    100937)
            logger.debug("Target configuration: " + str(aws_config))
        except BaseException as e:
            exit_error(logger,
                       "Error reading target server configuration: " + repr(e),
                       124812)

        if self.bucket is None:
            if 'default_s3_bucket' in list(aws_config.keys()):
                t = aws_config['default_s3_bucket']
                if t is not None and len(t) > 0:
                    self.bucket = t
                else:
                    exit_error(logger, "No bucket specified", 4)
            else:
                exit_error(logger, "No bucket specified", 5)

        file_extensions = {
            'raw': '.log',
            'kv': '.log',
            'pipe': '.log',
            'csv': '.csv',
            'tsv': '.tsv',
            'json': '.json'
        }

        if self.outputformat is None:
            self.outputformat = 'csv'

        if self.outputfile is None:
            # Boto is special. We need repr to give it the encoding it expects to match the hashing.
            self.outputfile = repr('export_' + user + '___now__' +
                                   file_extensions[self.outputformat]).strip(
                                       "'")

        # Replace keywords from output filename
        self.outputfile = replace_keywords(self.outputfile)

        if self.compress is not None:
            logger.debug('Compression: %s', self.compress)
        else:
            try:
                self.compress = str2bool(aws_config['compress'])
            except:
                self.compress = False

        # Use the random number to support running multiple outputs in a single search
        random_number = str(random.randint(10000, 100000))
        staging_filename = 'export_everything_staging_' + random_number + '.txt'
        local_output_file = os.path.join(dispatch, staging_filename)

        # Append .gz to the output file if compress=true
        if not self.compress and len(self.outputfile) > 3:
            # We have a .gz extension when compression was not specified. Enable compression.
            if self.outputfile[-3:] == '.gz':
                self.compress = True
        elif self.compress and len(self.outputfile) > 3:
            if self.outputfile[-3:] != '.gz':
                self.outputfile = self.outputfile + '.gz'

        if self.compress:
            local_output_file = local_output_file + '.gz'

        logger.debug("Staging file: %s" % local_output_file)
        try:
            s3 = get_aws_connection(aws_config)
        except BaseException as e:
            exit_error(logger, "Could not connect to AWS: " + repr(e), 741423)

        event_counter = 0
        # Write the output file to disk in the dispatch folder
        logger.debug(
            "Writing events to file %s in %s format. Compress=%s\n\tfields=%s",
            local_output_file, self.outputformat, self.compress, self.fields)
        for event in event_file.write_events_to_file(events, self.fields,
                                                     local_output_file,
                                                     self.outputformat,
                                                     self.compress):
            yield event
            event_counter += 1

        # Upload file to s3
        try:
            with open(local_output_file, "rb") as f:
                s3.upload_fileobj(f, self.bucket, self.outputfile)
            s3 = None
            sts_client = None
            logger.info(
                "Successfully exported events to s3. app=%s count=%s bucket=%s file=%s user=%s"
                % (app, event_counter, self.bucket, self.outputfile, user))
            os.remove(local_output_file)
        except s3.exceptions.NoSuchBucket as e:
            exit_error(logger, "Error: No such bucket", 123833)
        except BaseException as e:
            exit_error(logger, "Could not upload file to S3: " + repr(e), 9)