예제 #1
0
 def writeevents(self,
                 index='summary',
                 host=None,
                 source=None,
                 fext='common_action_model'):
     """ The purpose of this method is to create arbitrary splunk events
     from the list of events in the ModularAction instance.
     
     Please use addevent() for populating the list of events in
     the ModularAction instance.
     
     @param index:  The index to write the events to.
                    Defaults to "summary".
     @param host:   The value of host the events should take on.
                    Defaults to None (auto).
     @param source: The value of source the events should take on.
                    Defaults to None (auto).
     @param fext:   The extension of the file to write out.
                    Files are written to $SPLUNK_HOME/var/spool/splunk.
                    File extensions can only contain word characters,
                    dash, and have a 200 char max.
                    "stash_" is automatically prepended to all extensions.
                    Defaults to "common_action_model" ("stash_common_action_model").
                    Only override if you've set up a corresponding props.conf
                    stanza to handle the extension.
     
     @return bool:  Returns True if all events were successfully written
                    Returns False if any errors were encountered
     """
     if self.events:
         ## sanitize file extension
         if not fext or not re.match('^[\w-]+$', fext):
             self.logger.warn(
                 'Requested file extension was ignored due to invalid characters'
             )
             fext = 'common_action_model'
         elif len(fext) > 200:
             self.logger.warn(
                 'Requested file extension was ignored due to length')
             fext = 'common_action_model'
         ## header
         header_line = ModularAction.DEFAULT_HEADER % (
             ModularAction.get_header_item('index', index,
                                           ModularAction.DEFAULT_INDEX),
             ModularAction.get_header_item('host', host),
             ModularAction.get_header_item('source', source))
         header_line = header_line.rstrip()
         ## process event chunks
         for chunk in (self.events[x:x + ModularAction.DEFAULT_CHUNK]
                       for x in xrange(0, len(self.events),
                                       ModularAction.DEFAULT_CHUNK)):
             ## initialize output string
             default_breaker = '\n' + ModularAction.DEFAULT_BREAKER
             fout = header_line + default_breaker + (
                 default_breaker).join(chunk)
             ## write output string
             try:
                 fn = '%s_%s.stash_%s' % (mktimegm(
                     time.gmtime()), random.randint(0, 100000), fext)
                 fp = make_splunkhome_path(['var', 'spool', 'splunk', fn])
                 ## obtain fh
                 with open(fp, 'w') as fh:
                     fh.write(fout)
             except:
                 signature = 'Error obtaining file handle during makeevents'
                 self.message(signature, level=logging.ERROR, file_path=fp)
                 self.logger.exception(signature + ' file_path=%s' % fp)
                 return False
         self.message('Successfully created splunk events',
                      event_count=len(self.events))
         return True
     return False
    def writeevents(self, index='summary', host=None, source=None, fext='common_action_model'):
        """ The purpose of this method is to create arbitrary splunk events
        from the list of events in the ModularAction instance.
        
        Please use addevent() for populating the list of events in
        the ModularAction instance.
        
        @param index:  The index to write the events to.
                       Defaults to "summary".
        @param host:   The value of host the events should take on.
                       Defaults to None (auto).
        @param source: The value of source the events should take on.
                       Defaults to None (auto).
        @param fext:   The extension of the file to write out.
                       Files are written to $SPLUNK_HOME/var/spool/splunk.
                       File extensions can only contain word characters,
                       dash, and have a 200 char max.
                       "stash_" is automatically prepended to all extensions.
                       Defaults to "common_action_model" ("stash_common_action_model").
                       Only override if you've set up a corresponding props.conf
                       stanza to handle the extension.
        
        @return bool:  Returns True if all events were successfully written
                       Returns False if any errors were encountered
        """
        ## internal makeevents method for normalizing strings
        ## that will be used in the various headers we write out
        def get_string(input, default):
            try:
                return input.replace('"', '_')
            except AttributeError:
                return default

        if self.events:
            ## sanitize file extension
            if not fext or not re.match('^[\w-]+$', fext):
                self.logger.warn('Requested file extension was ignored due to invalid characters')
                fext = 'common_action_model'
            elif len(fext)>200:
                self.logger.warn('Requested file extension was ignored due to length')
                fext = 'common_action_model'
            ## header
            header_line = ModularAction.DEFAULT_HEADER % (
                            get_string(index, ModularAction.DEFAULT_INDEX),
                            get_string(host, ''),
                            get_string(source, ''))
            ## process event chunks
            for chunk in (self.events[x:x+ModularAction.DEFAULT_CHUNK] 
                          for x in xrange(0, len(self.events), ModularAction.DEFAULT_CHUNK)):
                ## initialize output string
                default_breaker = '\n' + ModularAction.DEFAULT_BREAKER
                fout            = header_line + default_breaker + (default_breaker).join(chunk)
                ## write output string
                try: 
                    fn = '%s_%s.stash_%s' % (mktimegm(time.gmtime()), random.randint(0, 100000), fext)
                    fp = make_splunkhome_path(['var', 'spool', 'splunk', fn])
                    ## obtain fh    
                    with open(fp, 'w') as fh:
                        fh.write(fout)
                except:
                    signature = 'Error obtaining file handle during makeevents'
                    self.message(signature, level=logging.ERROR, file_path=fp)
                    self.logger.exception(signature + ' file_path=%s' % fp)
                    return False
            self.message('Successfully created splunk events', event_count=len(self.events))
            return True
        return False
예제 #3
0
    def result2stash(self,
                     result,
                     dropexp=DEFAULT_DROPEXP,
                     mapexp=DEFAULT_MAPEXP,
                     addinfo=False):
        """ The purpose of this method is to formulate an event in stash format
        
        @param result:  The result dictionary to generate a stash event for.
        @param dropexp: A lambda expression used to determine whether a field
                        should be dropped or not.
                        Defaults to DEFAULT_DROPEXP.
        @param mapexp:  A lambda expression used to determine whether a field
                        should be mapped (prepended with "orig_") or not.
                        Defaults to DEFAULT_MAPEXP.
        @param addinfo: Whether or not to add search information to the event.
                        "info" includes search_now, info_min_time, info_max_time,
                        and info_search_time fields.
                        Requires that information was loaded into the ModularAction
                        instance via addinfo()
                        
        @return _raw:   Returns a string which represents the result in stash format.
        
        The following example has been broken onto multiple lines for readability:
        06/21/2016 10:00:00 -0700,
        search_name="Access - Brute Force Access Behavior Detected - Rule",
        search_now=0.000, info_min_time=1466528400.000, info_max_time=1466532600.000, info_search_time=1465296264.179,
        key1=key1val, key2=key2val, key3=key3val, key4=key4val1, key4=key4val2, ...
        """
        dropexp = dropexp or (lambda x: False)
        mapexp = mapexp or (lambda x: False)
        orig_dropexp = lambda x: x.startswith('orig_') and x[
            5:] in result and mapexp(x[5:])

        ## addinfo
        if addinfo:
            result['info_min_time'] = self.info.get('_search_et', '0.000')
            info_max_time = self.info.get('_search_lt')
            if not info_max_time or info_max_time == 0 or info_max_time == '0':
                info_max_time = '+Infinity'
            result['info_max_time'] = info_max_time
            result['info_search_time'] = self.info.get('_timestamp', '')

        ## construct _raw
        _raw = '%s' % result.get('_time', mktimegm(time.gmtime()))
        if self.search_name:
            _raw += ', search_name="%s"' % self.search_name

        processed_keys = []
        for key, val in sorted(result.items()):
            vals = []
            ## if we have a proper mv field
            if (key.startswith('__mv_') and val
                    and isinstance(val, basestring) and val.startswith('$')
                    and val.endswith('$')):
                real_key = key[5:]
                vals = val[1:-1].split('$;$')
            ## if proper sv field
            elif val and not key.startswith('__mv_'):
                real_key = key
                vals = [val]

            ## if we have vals and key hasn't been processed
            ## and key is not to be dropped...
            if (vals and (real_key not in processed_keys)
                    and not dropexp(real_key) and not orig_dropexp(real_key)):
                ## iterate vals
                for val in vals:
                    ## format literal '$'
                    if key.startswith('__mv'):
                        val = val.replace('$$', '$')
                    ## escape quotes
                    if isinstance(val, basestring):
                        val = val.replace('"', r'\"')
                    ## check map
                    if mapexp(real_key):
                        _raw += ', %s="%s"' % ('orig_' + real_key.lstrip('_'),
                                               val)
                    else:
                        _raw += ', %s="%s"' % (real_key, val)
                processed_keys.append(real_key)

        return _raw
예제 #4
0
파일: ping.py 프로젝트: reza/es_eventgens
    ## defaults
    ping = None
    if os.name == 'nt':
        ping_switch = '-n 4'
    else:
        ping_switch = '-c 4'

    orig_sid = None
    orig_rid = None
    host = None
    host_field = None
    MAX_RESULTS = 1
    max_results = 1
    host_validation = '^([A-Za-z0-9\.\_\-]+)$'

    the_time = util.mktimegm(time.gmtime())

    ## retrieve results and settings
    results, dummyresults, settings = splunk.Intersplunk.getOrganizedResults()
    logger.debug(settings)
    ## modular action hooks
    modaction_payload = {
        'sid': settings.get('sid', ''),
        'owner': settings.get('owner'),
        'app': settings.get('namespace')
    }
    modaction = ModularAction(json.dumps(modaction_payload),
                              logger,
                              action_name="ping")

    ## override defaults w/ opts below
    def result2stash(self, result, dropexp=DEFAULT_DROPEXP, mapexp=DEFAULT_MAPEXP, addinfo=False):
        """ The purpose of this method is to formulate an event in stash format
        
        @param result:  The result dictionary to generate a stash event for.
        @param dropexp: A lambda expression used to determine whether a field
                        should be dropped or not.
                        Defaults to DEFAULT_DROPEXP.
        @param mapexp:  A lambda expression used to determine whether a field
                        should be mapped (prepended with "orig_") or not.
                        Defaults to DEFAULT_MAPEXP.
        @param addinfo: Whether or not to add search information to the event.
                        "info" includes search_now, info_min_time, info_max_time,
                        and info_search_time fields.
                        Requires that information was loaded into the ModularAction
                        instance via addinfo()
                        
        @return _raw:   Returns a string which represents the result in stash format.
        
        The following example has been broken onto multiple lines for readability:
        06/21/2016 10:00:00 -0700,
        search_name="Access - Brute Force Access Behavior Detected - Rule",
        search_now=0.000, info_min_time=1466528400.000, info_max_time=1466532600.000, info_search_time=1465296264.179,
        key1=key1val, key2=key2val, key3=key3val, key4=key4val1, key4=key4val2, ...
        """
        dropexp      = dropexp or (lambda x: False)
        mapexp       = mapexp  or (lambda x: False)
        orig_dropexp = lambda x: x.startswith('orig_') and x[5:] in result and mapexp(x[5:])
          
        ## addinfo
        if addinfo:
            result['info_min_time']    = self.info.get('_search_et', '0.000')
            info_max_time              = self.info.get('_search_lt')
            if not info_max_time or info_max_time==0 or info_max_time=='0':
                info_max_time = '+Infinity'
            result['info_max_time']    = info_max_time
            result['info_search_time'] = self.info.get('_timestamp', '')
    
        ## construct _raw
        _raw = '%s' % result.get('_time', mktimegm(time.gmtime()))
        if self.search_name:
            _raw += ', search_name="%s"' % self.search_name

        processed_keys = []
        for key, val in sorted(result.items()):
            vals = []
            ## if we have a proper mv field
            if (key.startswith('__mv_')
                and val and isinstance(val, basestring)
                and val.startswith('$') and val.endswith('$')):
                real_key = key[5:]
                vals     = val[1:-1].split('$;$')
            ## if proper sv field
            elif val and not key.startswith('__mv_'):
                real_key = key
                vals     = [val]
            
            ## if we have vals and key hasn't been processed
            ## and key is not to be dropped...
            if (vals
                and (real_key not in processed_keys)
                and not dropexp(real_key)
                and not orig_dropexp(real_key)):
                ## iterate vals
                for val in vals:
                    ## format literal '$'
                    if key.startswith('__mv'):
                        val = val.replace('$$', '$')
                    ## escape quotes
                    if isinstance(val, basestring):
                        val = val.replace('"', r'\"')
                    ## check map
                    if mapexp(real_key):
                        _raw += ', %s="%s"' % ('orig_' + real_key.lstrip('_'), val)
                    else:
                        _raw += ', %s="%s"' % (real_key, val)
                processed_keys.append(real_key)
        
        return _raw
예제 #6
0
    def writeevents(self,
                    index="summary",
                    host=None,
                    source=None,
                    fext="common_action_model"):
        """The purpose of this method is to create arbitrary splunk events
        from the list of events in the ModularAction instance.

        Please use addevent() for populating the list of events in
        the ModularAction instance.

        @param index:  The index to write the events to.
                       Defaults to "summary".
        @param host:   The value of host the events should take on.
                       Defaults to None (auto).
        @param source: The value of source the events should take on.
                       Defaults to None (auto).
        @param fext:   The extension of the file to write out.
                       Files are written to $SPLUNK_HOME/var/spool/splunk.
                       File extensions can only contain word characters,
                       dash, and have a 200 char max.
                       "stash_" is automatically prepended to all extensions.
                       Defaults to "common_action_model" ("stash_common_action_model").
                       Only override if you've set up a corresponding props.conf
                       stanza to handle the extension.

        @return bool:  Returns True if all events were successfully written
                       Returns False if any errors were encountered
        """

        ## internal makeevents method for normalizing strings
        ## that will be used in the various headers we write out
        def get_string(input, default):
            try:
                return input.replace('"', "_")
            except AttributeError:
                return default

        if self.events:
            ## sanitize file extension
            if not fext or not re.match("^[\w-]+$", fext):
                self.logger.warn(
                    "Requested file extension was ignored due to invalid characters"
                )
                fext = "common_action_model"
            elif len(fext) > 200:
                self.logger.warn(
                    "Requested file extension was ignored due to length")
                fext = "common_action_model"
            ## header
            header_line = ModularAction.DEFAULT_HEADER % (
                get_string(index, ModularAction.DEFAULT_INDEX),
                get_string(host, ""),
                get_string(source, ""),
            )
            ## process event chunks
            for chunk in (self.events[x:x + ModularAction.DEFAULT_CHUNK]
                          for x in range(0, len(self.events),
                                         ModularAction.DEFAULT_CHUNK)):
                ## initialize output string
                default_breaker = "\n" + ModularAction.DEFAULT_BREAKER
                fout = header_line + default_breaker + (
                    default_breaker).join(chunk)
                ## write output string
                try:
                    fn = "%s_%s.stash_%s" % (
                        mktimegm(time.gmtime()),
                        random.randint(0, 100000),
                        fext,
                    )
                    fp = make_splunkhome_path(["var", "spool", "splunk", fn])
                    ## obtain fh
                    with open(fp, "w") as fh:
                        fh.write(fout)
                except:
                    signature = "Error obtaining file handle during makeevents"
                    self.message(signature, level=logging.ERROR, file_path=fp)
                    self.logger.exception(signature + " file_path=%s" % fp)
                    return False
            self.message("Successfully created splunk events",
                         event_count=len(self.events))
            return True
        return False
예제 #7
0
def make_uba_alarm(modaction, result):
    new_result = {}

    severity_map = {
        'informational': '1',
        'low': '3',
        'medium': '5',
        'high': '7',
        'critical': '9'
    }

    map_keys = {
        'bytes_in': 'bytesReceived',
        'bytes_out': 'bytesSent',
        'src_port': 'sourcePort',
        'src_user': '******',
        'dest_port': 'destinationPort',
        'user': '******',
        'duration': 'sessionDuration',
        'app': 'application',
        'file_name': 'filename',
        'file_path': 'filepath',
        'file_size': 'filesize',
        'process_name': 'process',
        'process_id': 'processId'
    }

    asset_keys = {'src': 'source', 'dest': 'destination', 'dvc': 'server'}

    ## handle mv by setting the non-mvkey to multival[0]
    mvkeys = [
        x for x in result if x.startswith('__mv_')
        and result[x].startswith('$') and result[x].endswith('$')
    ]
    for mvkey in mvkeys:
        key = mvkey[5:]
        result[key] = result[mvkey][1:-1].split('$;$')[0]

    ## timestamp
    new_result['timestamp'] = '%s' % mktimegm(time.gmtime())
    ## dataformat
    new_result['dataformat'] = modaction.configuration.get(
        'dataformat', 'unknown')
    ## evcls
    new_result['evcls'] = modaction.search_name or result.get(
        'search_name') or 'AdHoc UBA Alarm'
    ## evsubctg
    if result.get('signature'):
        new_result['evsubctg'] = result['signature']
    ## severity
    severity = result.get('severity') or modaction.configuration.get(
        'severity') or '5'
    try:
        if severity in severity_map:
            severity = severity_map[severity]
        elif int(severity) < 1 or int(severity) > 10:
            severity = '5'
    except:
        severity = '5'
    new_result['severity'] = severity
    ## map_keys
    for key in map_keys:
        if key in result:
            new_result[map_keys[key]] = result[key]
    ## src/dest/dvc
    for asset_key in asset_keys:
        ip_determined = False
        ip_sources = ['%s_ip' % asset_key, asset_key]
        for ip_key in ip_sources:
            possible_ip = result.get(ip_key)
            if possible_ip and (ipv4_re.match(possible_ip)
                                or ipv6_re.match(possible_ip)):
                if not ip_determined:
                    new_result['%sIp' % asset_keys[asset_key]] = possible_ip
                    ip_determined = True

        dns_determined = False
        dns_sources = [
            '%s_dns' % asset_key,
            '%s_nt_host' % asset_key, asset_key
        ]
        for dns_key in dns_sources:
            possible_dns = result.get(dns_key)
            if possible_dns:
                ip_match = (ipv4_re.match(possible_dns)
                            or ipv6_re.match(possible_dns))
                mac_match = mac_re.match(possible_dns)
                if not ip_match and not mac_match:
                    if not dns_determined:
                        new_result['%sDns' %
                                   asset_keys[asset_key]] = possible_dns
                        dns_determined = True

    return new_result
예제 #8
0
    def queuework(self, exit_strategy=None, tries=3, raise_all=False):
        """ This method will queue an action (instead of performing it)
        by inserting a row into the cam_queue kvstore collection

        For action authors wanting to support remote workers it is important
        that this method be called immediately after class initialization
        and prior to processing results_file and calling dowork().

        Whether or not queuework() returns or exits is determined by exit_strategy
        which can be specified explicitly or determined dynamically based on
        the list of _cam_workers.  For instance, if exit_strategy is None and
        "local" is present in _cam_worker, queuework() will return (not exit).

        @param exit_strategy:
            Whether or not queuework should exit the script
            None (dynamic), True, False

        @param tries:
            The number of times to try the queue operation

        @param raise_all:
            Whether or not to raise certain exceptions we would
            normally WARN for.
        """
        # 1. Load the common action model.
        # If we can't load _cam, "continue" unless raise_all=True
        try:
            _cam = json.loads(self.configuration.get('_cam') or '{}')
        except Exception:
            signature = 'Failed to load _cam'
            if raise_all:
                raise Exception(signature)
            else:
                self.message(signature, level=logging.WARN)
                return

        # 2. Determine if the action supports workers.
        # If it does not; return.
        supports_workers = normalizeBoolean(
            _cam.get('supports_workers', False))

        # the lack of worker support is not a "raisable" offense
        # action_name must be valid as well
        if (supports_workers is not True
                or not self.action_name
                or self.action_name == 'unknown'):
            self.message(
                'Action does not support workers',
                level=logging.WARN
            )
            return

        # 3. Load _cam_workers (workers list).
        # If we can't load _cam_workers, "continue" unless raise_all=True
        try:
            _cam_workers = list(set(
                json.loads(self.configuration.get('_cam_workers') or '[]')
            ))
        except Exception:
            signature = 'Failed to load _cam_workers'
            if raise_all:
                raise Exception(signature)
            else:
                self.message(signature, level=logging.WARN)
                return

        # 4. Determine if queuework will exit or keep going
        # If exit is None, determine exit based on _cam_workers (dynamically)
        exit_strategy = normalizeBoolean(exit_strategy)

        # if exit_strategy is None and work to be done locally
        if (exit_strategy is None
                and ('local' in _cam_workers)):
            exit_strategy = False
        # if exit_strategy is None and work not to be done locally
        elif exit_strategy is None:
            exit_strategy = True
        # if exit_strategy is True, take at face value
        elif exit_strategy is True:
            pass
        # otherwise exit_strategy is False
        else:
            exit_strategy = False

        self.logger.debug('exit_strategy={0}'.format(exit_strategy))

        # Remove local
        _cam_workers = sorted(list(
            set(_cam_workers) - set(['local'])
        ))

        # If no workers or local-only workers; return
        if not _cam_workers:
            self.message('No workers specified')
            return

        # use unaltered sid from payload
        sid = str(self.settings.get('sid'))

        # 5. Get info
        info = ''
        info_data = self.addinfo()
        info = base64.b64encode(info_data.encode('utf-8')).decode('utf-8')

        # 6. Queue action
        records = []
        record = {
            'time': mktimegm(time.gmtime()),
            'action_name': self.action_name,
            'sid': sid,
            'info': info,
            'settings': json.dumps(self.settings)
        }

        for worker in _cam_workers:
            new_record = record.copy()
            new_record['worker'] = worker
            records.append(new_record)

        c = None
        e = Exception('Exception Impossible')

        for unused_x in range(0, tries):
            try:
                r, c = rest.simpleRequest(
                    self.CAM_QUEUE_URI,
                    sessionKey=self.session_key,
                    jsonargs=json.dumps(records)
                )
                c = c.decode('utf-8')

                if r.status == http_client.OK:
                    # generate pending messages on behalf
                    # of each worker
                    for worker in _cam_workers:
                        self.message(
                            'Successfully queued action',
                            status='pending',
                            worker=worker
                        )
                    if exit_strategy:
                        sys.exit(0)
                    else:
                        break

            except Exception as tmp_e:
                e = tmp_e
                if self.logger.isEnabledFor(logging.DEBUG):
                    self.logger.exception(tmp_e)

        else:
            if exit_strategy:
                if c is not None:
                    raise Exception(c)
                else:
                    raise e
            else:
                if c is not None:
                    self.logger.error(c)
                # if we're in debug don't keep barking
                elif not self.logger.isEnabledFor(logging.DEBUG):
                    self.logger.error(e)

                for worker in _cam_workers:
                    self.message(
                        'Unable to queue action',
                        status='failure',
                        worker=worker
                    )
예제 #9
0
    def result2stash(
            self,
            result,
            dropexp=default_dropexp,
            mapexp=default_mapexp,
            addinfo=False):
        """ The purpose of this method is to formulate an event in stash format

        @param result:  The result dictionary to generate a stash event for.
        @param dropexp: A lambda expression used to determine whether a field
                        should be dropped or not.
                        Defaults to default_dropexp.
        @param mapexp:  A lambda expression used to determine whether a field
                        should be mapped (prepended with "orig_") or not.
                        Defaults to default_mapexp.
        @param addinfo: Whether or not to add search information to the event.
                        "info" includes search_now, info_min_time, info_max_time,
                        and info_search_time fields.
                        Requires that information was loaded into the ModularAction
                        instance via addinfo()

        @return _raw:   Returns a string which represents the result in stash format.

        The following example has been broken onto multiple lines for readability:
        06/21/2016 10:00:00 -0700,
        search_name="Access - Brute Force Access Behavior Detected - Rule",
        search_now=0.000, info_min_time=1466528400.000,
        info_max_time=1466532600.000, info_search_time=1465296264.179,
        key1=key1val, key2=key2val, key3=key3val, key4=key4val1, key4=key4val2, ...
        """
        dropexp = dropexp or (lambda x: False)
        mapexp = mapexp or (lambda x: False)

        def orig_dropexp(x):
            """ The purpose of this method is to determine whether we should
            drop an orig_key that will be overwritten by a key to be mapped.

            For instance, if we have orig_foo and foo in result, and foo is a
            mapped key, then orig_foo will be dropped.
            """
            return x.startswith('orig_') and x[5:] in result and mapexp(x[5:])

        # addinfo
        if addinfo:
            result['info_min_time'] = self.info.get('_search_et', '0.000')
            info_max_time = self.info.get('_search_lt')
            if not info_max_time or info_max_time == 0 or info_max_time == '0':
                info_max_time = '+Infinity'
            result['info_max_time'] = info_max_time
            result['info_search_time'] = self.info.get('_timestamp', '')

        # construct _raw
        _raw = '%s' % result.get('_time', mktimegm(time.gmtime()))
        if self.search_name:
            _raw += ', search_name="%s"' % self.search_name.replace('"', r'\"')

        # get key names
        result_keys = set([
            x[5:] if x.startswith('__mv_') else x
            for x in result
        ])

        # iterate keys
        for key in sorted(list(result_keys)):
            if dropexp(key) or orig_dropexp(key):
                continue

            # if key is MV
            mv_key = '__mv_{0}'.format(key)
            if (mv_key in result
                    and isinstance(result[mv_key], basestring)  # string
                    and result[mv_key].startswith('$')  # prefix
                    and result[mv_key].endswith('$')):  # suffix
                vals = parse_mv(result[mv_key])
            # if key is SV
            elif key in result:
                vals = [result[key]]
            else:
                vals = []

            # iterate vals
            for v in vals:
                if isinstance(v, basestring) and v:
                    # escape slashes
                    v = v.replace('\\', '\\\\')
                    # escape quotes
                    v = v.replace('"', '\\"')
                elif isinstance(v, basestring) and len(vals) == 1:
                    continue

                # check map
                if mapexp(key):
                    _raw += ', %s="%s"' % ('orig_' + key.lstrip('_'), v)
                else:
                    _raw += ', %s="%s"' % (key, v)

        return _raw
예제 #10
0
def do_nbtstat(argv,
               input_str=None,
               outputfile=sys.stdout,
               logger=logging.getLogger('dummy')):
    ## defaults
    nbtstat = None
    orig_sid = None
    orig_rid = None
    host = None
    host_field = None
    MAX_RESULTS = 1
    max_results = 1
    host_validation = '^([A-Za-z0-9\.\_\-]+)$'
    ip_rex = re.compile('^(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})$')

    the_time = util.mktimegm(time.gmtime())

    ## retrieve results and settings
    results, dummyresults, settings = splunk.Intersplunk.getOrganizedResults(
        input_str)
    logger.debug(settings)
    ## modular action hooks
    modaction_payload = {
        'sid': settings.get('sid', ''),
        'owner': settings.get('owner'),
        'app': settings.get('namespace')
    }
    modaction = ModularAction(json.dumps(modaction_payload),
                              logger,
                              action_name="nbtstat")

    ## override defaults w/ opts below
    if len(argv) > 1:
        for a in argv:
            if a.startswith('host=') or a.startswith('dest='):
                where = a.find('=')
                host = a[where + 1:len(a)]
            elif a.startswith('host_field=') or a.startswith('dest_field='):
                where = a.find('=')
                host_field = a[where + 1:len(a)]
            elif a.startswith('orig_sid='):
                where = a.find('=')
                orig_sid = a[where + 1:len(a)]
            elif a.startswith('orig_rid'):
                where = a.find('=')
                orig_rid = a[where + 1:len(a)]
            elif a.startswith('max_results'):
                where = a.find('=')
                max_results = a[where + 1:len(a)]
    try:
        if int(max_results) > 0:
            MAX_RESULTS = int(max_results)
    except:
        pass
    logger.info('max_results setting determined: %s', MAX_RESULTS)

    handleError = errorHandler(modaction, outputfile, logger)
    ## validate presence of host/host_field
    if not host and not host_field:
        signature = 'Must specify either host or host_field'
        handleError(signature)
        return
    ## set up single result
    if host:
        host_field = 'host'
        result = {'host': host}
        if orig_sid and orig_rid:
            result.update({'orig_sid': orig_sid, 'orig_rid': orig_rid})
        results = [result]
    ## process result(s)
    new_results = []
    rids = []
    results_processed = 0
    for num, result in enumerate(results):
        if results_processed >= MAX_RESULTS:
            break
        ## set result id
        result.setdefault('rid', str(num))
        ## update and invoke
        modaction.update(result)
        modaction.invoke()
        ## validate host_field is present in result
        if host_field not in result:
            signature = 'host_field not present in result set'
            handleError(signature)
            return
        else:
            ## handle MV
            hosts = result[host_field].split('\n')
        ## iterate hosts (as MV is a possibility)
        for host in hosts:
            if results_processed >= MAX_RESULTS:
                break
            results_processed += 1
            ## validate host value but don't exit
            if re.match(host_validation, host):
                ip_match = ip_rex.match(host)
                ## set up new result which will be sent back to splunk
                new_result = {
                    '_time': the_time,
                    'sid': modaction.sid,
                    'rid': modaction.rid,
                    'dest': host
                }
                if modaction.orig_sid and modaction.orig_rid:
                    new_result.update({
                        'orig_sid': modaction.orig_sid,
                        'orig_rid': modaction.orig_rid
                    })
                ## determine nbtstat_cmd
                if os.name == 'nt':
                    if ip_match:
                        nbtstat_cmd = ['nbtstat', '-A', host]
                    else:
                        nbtstat_cmd = ['nbtstat', '-a', host]
                elif sys.platform == 'darwin':
                    if ip_match:
                        nbtstat_cmd = None
                        modaction.message(
                            'Unable to perform reverse netbios lookup',
                            status='failure',
                            level=logging.WARN)
                    else:
                        nbtstat_cmd = ['smbutil', 'lookup', host]
                else:
                    if ip_match:
                        nbtstat_cmd = ['nmblookup', '-A', host]
                    else:
                        nbtstat_cmd = ['nmblookup', host]
                ## do nbtstat
                if nbtstat_cmd:
                    try:
                        nbtstat = subprocess.Popen(nbtstat_cmd,
                                                   stdout=subprocess.PIPE)
                        new_result['_raw'] = nbtstat.communicate()[0]
                    except Exception:
                        signature = 'Exception when executing nbtstat command'
                        handleError(signature)
                        return
                    ## add to successful rid list
                    rids.append(
                        modaction.rid_ntuple(modaction.orig_sid, modaction.rid,
                                             modaction.orig_rid))
                    ## add result for intersplunk output
                    new_results.append(new_result)
                    ## add result for event creation
                    modaction.addevent(new_result['_raw'], 'nbtstat')
            else:
                modaction.message('Invalid characters detected in host input',
                                  status='failure',
                                  level=logging.WARN)

    if len(new_results) > 0:
        if modaction.writeevents(index='main', source='nbtstat'):
            modaction.message('Successfully created splunk event',
                              status='success',
                              rids=rids)
        else:
            modaction.message('Failed to create splunk event',
                              status='failure',
                              rids=rids,
                              level=logging.ERROR)

    splunk.Intersplunk.outputResults(new_results, outputfile=outputfile)