Пример #1
0
def get_shared_storage():
    """
    If search head pooling is enabled, return the location of shared storage.
    Otherwise, return an empty string.
    """
    try:
        state   = comm.getConfKeyValue('server', 'pooling', 'state')
        storage = comm.getConfKeyValue('server', 'pooling', 'storage')
        if ((state == 'enabled') and (len(storage) > 0)):
            return storage
    except Exception, e:
        pass
Пример #2
0
def get_shared_storage():
    """
    If search head pooling is enabled, return the location of shared storage.
    Otherwise, return an empty string.
    """
    try:
        state = comm.getConfKeyValue('server', 'pooling', 'state')
        storage = comm.getConfKeyValue('server', 'pooling', 'storage')
        if ((state == 'enabled') and (len(storage) > 0)):
            return storage
    except Exception, e:
        pass
Пример #3
0
def lit(name):
    """
    Get externalized string literals for use in this module.
    """
    try:
        return comm.getConfKeyValue("literals", "clilib.bundle_paths", name)
    except Exception, e:
        logger.exception(e)
        return ""
 def splunkd_scheme(cls):
     if not cls.scheme:
         import splunk.clilib.cli_common as comm
         import splunk.util as splutil
         enableSsl = comm.getConfKeyValue('server', 'sslConfig',
                                          'enableSplunkdSSL')
         enableSsl = splutil.normalizeBoolean(enableSsl)
         cls.scheme = 'https' if enableSsl else 'http'
     return cls.scheme
Пример #5
0
def lit(name):
    """
    Get externalized string literals for use in this module.
    """
    try:
        return comm.getConfKeyValue("literals", "clilib.bundle_paths", name)
    except Exception, e:
        logger.exception(e)
        return ""
Пример #6
0
def get_conf_value(conf, stanza, key):
    # This wrapper is here for future functionality
    return getConfKeyValue(conf, stanza, key)
Пример #7
0
class SummarizationController(BaseController):
    """
    Summarization
    """

    #
    # attach common template args
    #

    def render_template(self, template_path, template_args={}):
        template_args['appList'] = self.get_app_manifest()
        return super(SummarizationController,
                     self).render_template(template_path, template_args)

    def is_normalized(self, hash):
        '''
        Returns whether the hash id is normalized or regular
        '''

        if hash[0] == 'N' and hash[1] == 'S':
            return True
        return False

    def get_app_manifest(self):
        '''
        Returns a dict of all available apps to current user
        '''

        output = cached.getEntities('apps/local',
                                    search=['disabled=false', 'visible=true'],
                                    count=-1)

        return output

    #
    # Summarization Dashboard
    #

    @route('/:selection')
    @expose_page(methods=['GET', 'POST'])
    def show_dashboard(self,
                       ctrl=None,
                       selection=None,
                       ctrl_link=None,
                       savedsearch=None,
                       controller_exception=None,
                       **kwargs):
        '''
        Summarization Dashboard 
        '''
        if cherrypy.config['is_free_license'] or cherrypy.config[
                'is_forwarder_license']:
            return self.render_template(
                'admin/402.html',
                {'feature': _('Report Acceleration Summaries')})

        logger.debug("\n\n\n tsum: In show_dashboard: \n\n\n")

        # User is performing some action on a summary: removing, re-indexing, or verifying it
        if cherrypy.request.method == 'POST':
            logger.debug("post request!")
            try:
                if ctrl == "remove" or ctrl == "redo":
                    serverResponse, serverContent = rest.simpleRequest(
                        ctrl_link, method='DELETE', raiseAllErrors=True)
                    #logger.debug("serverResponse: %s" % serverResponse)
                    #logger.debug("serverContent: %s" % serverContent)
                if ctrl == "reschedule":
                    serverResponse, serverContent = rest.simpleRequest(
                        ctrl_link, method='POST', raiseAllErrors=True)

                if serverResponse.status != 200:
                    controller_exception = Exception(
                        'unhandled HTTP status=%s' % serverResponse.status)

                logger.debug("uri: %s, result of action: %s " %
                             (ctrl_link, serverResponse))
            except splunk.InternalServerError, e:
                logger.debug("Error occurred: %s" % e)
                #TODO: This exception is not caught or handled in the summmarization dashboard html page
                controller_exception = e

            # return a redirect so that when users reload the page they don't rerun their action
            raise cherrypy.HTTPRedirect(
                self.make_url(['manager', 'system', 'summarization']), 302)

        entities = Summarization.all().filter_by_app('-').filter_by_user('-')
        if selection is not None:
            savedsearch = selection

        detailed_dashboard = False
        try:
            detailed_dashboard_str = comm.getConfKeyValue(
                "limits", "auto_summarizer", "detailed_dashboard")
            if detailed_dashboard_str in ['true', '1', 't', 'y', 'yes']:
                detailed_dashboard = True
            logger.debug("detailed_dashboard = %s" % detailed_dashboard_str)
        except Exception, err:
            detailed_dashboard = False
Пример #8
0
    file_handler = logging.handlers.RotatingFileHandler(log_file,
                                                        maxBytes=25000000,
                                                        backupCount=5)
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    file_handler.setFormatter(formatter)

    logger.addHandler(file_handler)
    return logger


### MAIN FUNCTION ###

if __name__ == "__main__":

    ## get splunk app version
    version = cli.getConfKeyValue("app", "launcher", "version")

    ## Check if honeydb.json file exists ##
    jsonfile = os.path.join(sys.path[0], "honeydb.json")

    try:
        with open(jsonfile, 'r') as argfile:
            data = argfile.read()
    except:
        logger = setup_logger(logging.ERROR)
        logger.error("Sensor Data Error: HoneyDB args file missing : ./%s ",
                     jsonfile)
        sys.exit()

    # parse file
    try:
Пример #9
0
    def run(self, params):
        stanza_name = params.get('name').split("://", 1)[1]
        username = params['username']
        session_key = self._input_config.session_key
        password = self.get_password(self.QUEUE_REALM, username, session_key)
        if password is None:
            self.logger.error("couldn't get password for %s", username)
            return

        worker = getConfKeyValue('server', 'general', 'serverName')
        client_cert = self.normalize_cert_path(params['client_cert'])
        target = params['uri']
        verify = self.normalize_verify(params['verify'])

        if verify not in [True, False] and not os.path.isfile(client_cert):
            self.logger.error("invalid verify: %s", params['verify'])
            return

        if client_cert != '' and not os.path.isfile(client_cert):
            self.logger.error("invalid client cert: %s", params['client_cert'])
            return

        actions = self.get_actions(target, worker, password, verify,
                                   client_cert)
        alert_actions = self.get_local_alert_actions(session_key)
        self.logger.info("local alert actions: %s", alert_actions.keys())

        for action in actions:
            search_info = action.pop('info', None)
            action_name = action['action_name']
            self.logger.info("original action: %s", action_name)
            key = None
            try:
                key = ModularActionQutils.build_key({
                    'worker': worker,
                    'sid': action['sid'],
                    'action_name': action_name
                })
                payload = self.get_action_payload(action['settings'])
            except (KeyError, ValueError, ModularActionQueueBR):
                self.logger.exception("Invalid modaction received: %s",
                                      json.dumps(action, indent=2))
                if key:
                    self.dequeue(target, key, worker, password, verify,
                                 client_cert)
                continue

            sid = payload['sid']
            alert_action, normalized_maxtime = self.validate_action(
                action_name, alert_actions)
            if alert_action is None:
                self.logger.error("Modular action %s not found", action_name)
                self.dequeue(target, key, worker, password, verify,
                             client_cert)
                continue
            elif normalized_maxtime is None:
                self.logger.error("Invalid maxtime received: %s",
                                  alert_action['content']['maxtime'])
                self.dequeue(target, key, worker, password, verify,
                             client_cert)
                continue

            payload['server_uri'] = target
            file_name = os.path.basename(payload['results_file'])
            results_file = None
            with self.ensure_local_dispatch_dir(stanza_name,
                                                sid) as dispatch_dir:
                try:
                    results_file = self.fetch_results(target, key,
                                                      dispatch_dir, file_name,
                                                      worker, password, verify,
                                                      client_cert)
                except HTTPError as e:
                    self.logger.error("Failed to fetch results: %s", e)
                    if e.response.status_code == requests.codes.not_found:
                        self.dequeue(target, key, worker, password, verify,
                                     client_cert)

                if results_file is not None:
                    self.save_search_info(search_info, dispatch_dir)
                    python_cmd = self.get_python_cmd(action_name,
                                                     alert_actions)
                    payload['results_file'] = results_file
                    name = alert_action['name']
                    app = alert_action['acl']['app']
                    script = make_splunkhome_path(
                        ["etc", "apps", app, "bin",
                         "%s.py" % name])
                    cmd = [
                        make_splunkhome_path(["bin", "splunk"]), "cmd",
                        python_cmd, script, "--execute"
                    ]
                    try:
                        returncode = self.run_action(cmd, payload,
                                                     normalized_maxtime)
                        if returncode != 0:
                            self.logger.info("Modular alert exit with code %d",
                                             returncode)
                        self.dequeue(target, key, worker, password, verify,
                                     client_cert)
                    except Exception as e:
                        self.logger.error(
                            "Exception when running modular alert %s: %s",
                            stanza_name, e)
Пример #10
0
def firstTimeRun(args, fromCLI):
    """
  All of our first time run checks that used to happen in the former bin/splunk shell script.
  Does any number of things, such as config migration, directory validation, and so on.  For
  the most up to date info, read the code.  It tends to be fairly well documented.
  """

    paramReq = (
        ARG_DRYRUN,
        ARG_FRESHINST,
    )
    paramOpt = (ARG_LOGFILE, )
    comm.validateArgs(paramReq, paramOpt, args)

    isFirstInstall = comm.getBoolValue(ARG_FRESHINST, args[ARG_FRESHINST])
    isDryRun = comm.getBoolValue(ARG_DRYRUN, args[ARG_DRYRUN])
    retDict = {}

    # ...arg parsing done now.

    # NOTE:
    # none of the changes that are made in this function are subjected to isDryRun.
    # these things just have to be done - they're not considered to be migration.

    ##### if user doesn't have a ldap.conf, put our default in its place.
    if not os.path.exists(migration.PATH_LDAP_CONF):
        comm.copyItem(migration.PATH_LDAP_CONF_DEF, migration.PATH_LDAP_CONF)

    if not os.path.exists(PATH_AUDIT_PRIV_KEY) and not os.path.exists(
            PATH_AUDIT_PUB_KEY):
        kCmd = ["splunk", "createssl", "audit-keys"]
        kPriv, kPub, kDir = PATH_AUDIT_PRIV_KEY, PATH_AUDIT_PUB_KEY, PATH_AUDIT_KEY_DIR
        retCode = comm.runAndLog(kCmd + ["-p", kPriv, "-k", kPub, "-d", kDir])
        if 0 != retCode:
            raise cex.FilePath, "Could not create audit keys (returned %d)." % retCode

    try:
        keyScript = comm.getConfKeyValue("distsearch", "tokenExchKeys",
                                         "genKeyScript")
        keyCmdList = [
            os.path.expandvars(x.strip()) for x in keyScript.split(",")
            if len(x) > 0
        ]  # a,b,,d -> [a,b,d]
        pubFilename = comm.getConfKeyValue("distsearch", "tokenExchKeys",
                                           "publicKey")
        privateFilename = comm.getConfKeyValue("distsearch", "tokenExchKeys",
                                               "privateKey")
        certDir = comm.getConfKeyValue("distsearch", "tokenExchKeys",
                                       "certDir")
        certDir = os.path.expandvars(certDir)
        privateFilename = os.path.join(certDir, privateFilename)
        pubFilename = os.path.join(certDir, pubFilename)
        if not (os.path.exists(os.path.join(certDir, privateFilename))
                or os.path.exists(os.path.join(certDir, pubFilename))):
            cmdList = keyCmdList + [
                "-p", privateFilename, "-k", pubFilename, "-d", certDir
            ]
            success = comm.runAndLog(cmdList) == 0
            if not success:
                logger.warn("Unable to generate distributed search keys.")
                #TK mgn 06/19/09
                raise cex.FilePath, "Unable to generate distributed search keys."  #TK mgn 06/19/09
    except:
        logger.warn("Unable to generate distributed search keys.")
        #TK mgn 06/19/09
        raise

    if isFirstInstall:
        ##### if user doesn't have a ui modules dir, put our default in its place. only run this in this block - otherwise,
        #     in an upgrade, we run the same code during migration and show an incorrect warning ("oh noes dir is missing").
        if not os.path.exists(migration.PATH_UI_MOD_ACTIVE):
            comm.moveItem(migration.PATH_UI_MOD_NEW,
                          migration.PATH_UI_MOD_ACTIVE)
    ##### we're in an upgrade situation.
    else:
        ##### now do the actual migration (or fake it, if the user wants).
        #     upon faking, this function will throw an exception.
        if not ARG_LOGFILE in args:
            raise cex.ArgError, "Cannot migrate without the '%s' parameter." % ARG_LOGFILE
        migration.autoMigrate(args[ARG_LOGFILE], isDryRun)

    ##### FTR succeeded.  johnvey's never gonna have eggs. T_T

    # --- done w/ FTR, now i can has bucket?? ---
    return retDict
Пример #11
0
    def __init__(self, settings, logger, action_name='unknown'):
        """ Initialize ModularAction class.

        @param settings:    A modular action payload in JSON format.
        @param logger:      A logging instance.
                            Recommend using ModularAction.setup_logger.
        @param action_name: The action name.
                            action_name in payload will take precedence.
        """
        # used to compute duration
        self.start_timer = timer()

        # worker
        try:
            self.worker = getConfKeyValue('server', 'general', 'serverName')
        except Exception:
            self.worker = ''

        # settings
        self.settings = json.loads(settings)

        # configuration
        self.configuration = self.settings.get('configuration', {})
        if not isinstance(self.configuration, dict):
            self.configuration = {}

        # logger
        self.logger = logger
        # set loglevel to DEBUG if verbose
        verbose = normalizeBoolean(self.configuration.get('verbose', False))
        if verbose is True:
            self.logger.setLevel(logging.DEBUG)
            self.logger.debug('Log level set to DEBUG')

        # replay mode
        replay_mode = normalizeBoolean(
            self.settings.get('_cam_replay', False))
        if replay_mode is True:
            self.replay_mode = replay_mode
            self.logger.info('Replay mode detected')
        else:
            self.replay_mode = False

        # session key
        self.session_key = self.settings.get('session_key')

        # search id
        self.sid = self.settings.get('sid')
        self.sid_snapshot = ''
        # if sid contains rt_scheduler with snapshot-sid; drop snapshot-sid
        # sometimes self.sid may be an integer (1465593470.1228)
        try:
            rtsid = re.match(r'^(rt_scheduler.*)\.(\d+)(_?.*)$', self.sid)
            if rtsid:
                self.sid = rtsid.group(1)
                self.sid_snapshot = rtsid.group(2)
                # CIM-665: SHC realtime alerts have _guidval appended
                if rtsid.group(3):
                    self.sid += rtsid.group(3)
        except Exception:
            pass
        # rid_ntuple is a named tuple that represents
        # the three variables that change on a per-result basis
        self.rid_ntuple = collections.namedtuple('ID', ['orig_sid', 'rid', 'orig_rid'])
        # rids is a list of rid_ntuple values
        # automatically maintained by update() calls
        self.rids = []
        # current orig_sid based on update()
        # aka self.rids[-1].orig_sid
        self.orig_sid = ''
        # current rid based on update()
        # aka self.rids[-1].rid
        self.rid = ''
        # current orig_rid based on update()
        # aka self.rids[-1].orig_rid
        self.orig_rid = ''

        # results_file
        self.results_file = self.settings.get('results_file')
        # results_path
        results_path = ''
        if self.results_file:
            results_path = os.path.dirname(self.results_file)

        # digest_mode (per-result alerting)
        self.digest_mode = 1
        # per SPL-172319 - splunkd to provide result_id for per-result-alerting
        if truthy_strint_from_dict(self.settings, 'result_id'):
            self.digest_mode = 0
        # pre SPL-172319 behavior
        elif results_path.split(os.sep)[-1] == 'per_result_alert':
            self.digest_mode = 0

        # info/job
        self.info = {}
        self.info_file = None
        if self.results_file:
            # handle per-result alerting
            if self.digest_mode == 0:
                self.info_file = os.path.join(
                    os.path.dirname(results_path), 'info.csv')
            else:
                self.info_file = os.path.join(results_path, 'info.csv')
        self.job = {}

        self.search_name = self.settings.get('search_name')
        self.app = self.settings.get('app')
        self.user = self.settings.get('user') or self.settings.get('owner')

        # use | sendalert param.action_name=$action_name$
        self.action_name = self.configuration.get('action_name') or action_name

        # use sid to determine action_mode
        if isinstance(self.sid, basestring) and 'scheduler' in self.sid:
            self.action_mode = 'saved'
        else:
            self.action_mode = 'adhoc'

        self.action_status = ''

        # Since we don't use the result object we get from settings it will be purged
        try:
            del self.settings['result']
        except Exception:
            pass

        # events
        self.events = []
Пример #12
0
def firstTimeRun(args, fromCLI):
  """
  All of our first time run checks that used to happen in the former bin/splunk shell script.
  Does any number of things, such as config migration, directory validation, and so on.  For
  the most up to date info, read the code.  It tends to be fairly well documented.
  """

  paramReq = (ARG_DRYRUN, ARG_FRESHINST,)
  paramOpt = (ARG_LOGFILE,)
  comm.validateArgs(paramReq, paramOpt, args)

  isFirstInstall = comm.getBoolValue(ARG_FRESHINST, args[ARG_FRESHINST])
  isDryRun = comm.getBoolValue(ARG_DRYRUN, args[ARG_DRYRUN])
  retDict  = {}

  # ...arg parsing done now.

  # NOTE:
  # none of the changes that are made in this function are subjected to isDryRun.
  # these things just have to be done - they're not considered to be migration.

  ##### if user doesn't have a ldap.conf, put our default in its place.
  if not os.path.exists(migration.PATH_LDAP_CONF):
    comm.copyItem(migration.PATH_LDAP_CONF_DEF, migration.PATH_LDAP_CONF)

  if not os.path.exists(PATH_AUDIT_PRIV_KEY) and not os.path.exists(PATH_AUDIT_PUB_KEY):
    kCmd = ["splunk", "createssl", "audit-keys"]
    kPriv, kPub, kDir = PATH_AUDIT_PRIV_KEY, PATH_AUDIT_PUB_KEY, PATH_AUDIT_KEY_DIR
    retCode = comm.runAndLog(kCmd + ["-p", kPriv, "-k", kPub, "-d", kDir])
    if 0 != retCode:
      raise cex.FilePath, "Could not create audit keys (returned %d)." % retCode

  try:    
    keyScript = comm.getConfKeyValue("distsearch", "tokenExchKeys", "genKeyScript" );
    keyCmdList = [os.path.expandvars(x.strip()) for x in keyScript.split(",") if len(x) > 0] # a,b,,d -> [a,b,d]
    pubFilename = comm.getConfKeyValue("distsearch", "tokenExchKeys", "publicKey" );
    privateFilename = comm.getConfKeyValue("distsearch", "tokenExchKeys", "privateKey" );
    certDir = comm.getConfKeyValue("distsearch", "tokenExchKeys", "certDir" )
    certDir = os.path.expandvars( certDir )
    privateFilename = os.path.join( certDir,privateFilename )
    pubFilename = os.path.join( certDir, pubFilename )
    if not ( os.path.exists( os.path.join( certDir,privateFilename ) ) or os.path.exists( os.path.join( certDir, pubFilename ) ) ):
      cmdList = keyCmdList + [ "-p", privateFilename, "-k", pubFilename,"-d", certDir ]
      success = comm.runAndLog( cmdList ) == 0
      if not success:
        logger.warn("Unable to generate distributed search keys."); #TK mgn 06/19/09
        raise cex.FilePath, "Unable to generate distributed search keys." #TK mgn 06/19/09
  except:
    logger.warn("Unable to generate distributed search keys."); #TK mgn 06/19/09
    raise 

  if isFirstInstall:
    ##### if user doesn't have a ui modules dir, put our default in its place. only run this in this block - otherwise,
    #     in an upgrade, we run the same code during migration and show an incorrect warning ("oh noes dir is missing").
    if not os.path.exists(migration.PATH_UI_MOD_ACTIVE):
      comm.moveItem(migration.PATH_UI_MOD_NEW, migration.PATH_UI_MOD_ACTIVE)
  ##### we're in an upgrade situation.
  else:
    ##### now do the actual migration (or fake it, if the user wants).
    #     upon faking, this function will throw an exception.
    if not ARG_LOGFILE in args:
      raise cex.ArgError, "Cannot migrate without the '%s' parameter." % ARG_LOGFILE
    migration.autoMigrate(args[ARG_LOGFILE], isDryRun)


  ##### FTR succeeded.  johnvey's never gonna have eggs. T_T

  # --- done w/ FTR, now i can has bucket?? ---
  return retDict
Пример #13
0
def get_conf_value(conf, stanza, key):
    # This wrapper is here for future functionality
    return getConfKeyValue(conf, stanza, key)