def __init__(self, sample): OutputPlugin.__init__(self, sample) # Logger already setup by config, just get an instance logger = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logger, {'module': 'SplunkStreamOutputPlugin', 'sample': sample.name}) globals()['logger'] = adapter from eventgenconfig import Config globals()['c'] = Config() self._splunkUrl, self._splunkMethod, self._splunkHost, self._splunkPort = c.getSplunkUrl(self._sample) self._splunkUser = self._sample.splunkUser self._splunkPass = self._sample.splunkPass if self._sample.sessionKey == None: try: myhttp = httplib2.Http(disable_ssl_certificate_validation=True) logger.debugv("Getting session key from '%s' with user '%s' and pass '%s'" % (self._splunkUrl + '/services/auth/login', self._splunkUser, self._splunkPass)) response = myhttp.request(self._splunkUrl + '/services/auth/login', 'POST', headers = {}, body=urllib.urlencode({'username': self._splunkUser, 'password': self._splunkPass}))[1] self._sample.sessionKey = minidom.parseString(response).getElementsByTagName('sessionKey')[0].childNodes[0].nodeValue logger.debug("Got new session for splunkstream, sessionKey '%s'" % self._sample.sessionKey) except: logger.error("Error getting session key for non-SPLUNK_EMBEEDED for sample '%s'. Credentials are missing or wrong" % self._sample.name) raise IOError("Error getting session key for non-SPLUNK_EMBEEDED for sample '%s'. Credentials are missing or wrong" % self._sample.name) logger.debug("Retrieved session key '%s' for Splunk session for sample %s'" % (self._sample.sessionKey, self._sample.name))
def __init__(self, sample): self.__plugins = {} # Logger already setup by config, just get an instance logobj = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logobj, { 'module': 'Output', 'sample': sample.name }) globals()['logger'] = adapter from eventgenconfig import Config globals()['c'] = Config() self._app = sample.app self._sample = sample self._outputMode = sample.outputMode self._queue = deque([]) self._workers = [] if self._sample.maxQueueLength == 0: self.MAXQUEUELENGTH = c.getPlugin(self._sample.name).MAXQUEUELENGTH else: self.MAXQUEUELENGTH = self._sample.maxQueueLength
def __init__(self, sample): OutputPlugin.__init__(self, sample) #disable any "requests" warnings requests.packages.urllib3.disable_warnings() #Setup loggers from the root eventgen logger = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logger, { 'module': 'BattlecatOutputPlugin', 'sample': sample.name }) globals()['logger'] = adapter from eventgenconfig import Config globals()['c'] = Config() #Bind passed in samples to the outputter. if hasattr(sample, 'battlecatServers') == False: logger.error( 'outputMode battlecat but battlecatServers not specified for sample %s' % self._sample.name) raise ValueError( 'outputMode battlecat but battlecatServers not specified for sample %s' % self._sample.name) self.battlecatServers = sample.battlecatServers logger.debug("Setting up the connection pool for %s in %s" % (self._sample.name, self._app)) self.createConnections() logger.debug("Pool created.")
def __init__(self, sample): GeneratorPlugin.__init__(self, sample) # Logger already setup by config, just get an instance logger = logging.getLogger('eventgen') globals()['logger'] = logger from eventgenconfig import Config globals()['c'] = Config()
def __init__(self, sample): OutputPlugin.__init__(self, sample) #disable any "requests" warnings requests.packages.urllib3.disable_warnings() # set default output mode to round robin #Setup loggers from the root eventgen logger = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logger, {'module': 'HTTPEventOutputPlugin', 'sample': sample.name}) globals()['logger'] = adapter from eventgenconfig import Config globals()['c'] = Config() #Bind passed in samples to the outputter. logger.debug("Outputmode: %s" % sample.httpeventOutputMode) self.lastsourcetype = None try: if hasattr(sample, 'httpeventServers') == False: logger.error('outputMode httpevent but httpeventServers not specified for sample %s' % self._sample.name) raise NoServers('outputMode httpevent but httpeventServers not specified for sample %s' % self._sample.name) self.httpeventoutputmode = sample.httpeventOutputMode if hasattr(sample, 'httpeventOutputMode') and sample.httpeventOutputMode else 'roundrobin' self.httpeventmaxsize = sample.httpeventMaxPayloadSize if hasattr(sample, 'httpeventMaxPayloadSize') and sample.httpeventMaxPayloadSize else 10000 logger.debug("Currentmax size: %s " % self.httpeventmaxsize) self.httpeventServers = sample.httpeventServers logger.debug("Setting up the connection pool for %s in %s" % (self._sample.name, self._app)) self.createConnections() logger.debug("Pool created.") logger.debug("Finished init of httpevent plugin.") except Exception as e: logger.exception(e)
def __init__(self, time, sample=None, interruptcatcher=None): # Logger already setup by config, just get an instance logobj = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter if sample == None: adapter = EventgenAdapter(logobj, { 'module': 'Timer', 'sample': 'null' }) else: adapter = EventgenAdapter(logobj, { 'module': 'Timer', 'sample': sample.name }) self.logger = adapter globals()['c'] = Config() self.logger.debug('Initializing timer for %s' % sample.name if sample is not None else "None") self.time = time self.stopping = False self.interruptcatcher = interruptcatcher self.countdown = 0 self.sample = sample if self.sample != None: self.rater = c.getPlugin('rater.' + self.sample.rater)(self.sample) threading.Thread.__init__(self)
def __init__(self, sample): GeneratorPlugin.__init__(self, sample) # Logger already setup by config, just get an instance logger = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logger, {'module': 'WeblogGenerator', 'sample': sample.name}) globals()['logger'] = adapter from eventgenconfig import Config globals()['c'] = Config() f = open('tests/perf/weblog/external_ips.sample') self.external_ips = [x.strip() for x in f.readlines()] self.external_ips_len = len(self.external_ips) f.close() f = open('tests/perf/weblog/webhosts.sample') self.webhosts = [x.strip() for x in f.readlines()] f.close() self.webhosts_len = len(self.webhosts) f = open('tests/perf/weblog/useragents.sample') self.useragents = [x.strip() for x in f.readlines()] f.close() self.useragents_len = len(self.useragents) f = open('tests/perf/weblog/webserverstatus.sample') self.webserverstatus = [x.strip() for x in f.readlines()] f.close() self.webserverstatus_len = len(self.webserverstatus)
def __init__(self, sample): OutputPlugin.__init__(self, sample) # Logger already setup by config, just get an instance logger = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logger, { 'module': 'FileOutputPlugin', 'sample': sample.name }) globals()['logger'] = adapter from eventgenconfig import Config globals()['c'] = Config() if sample.fileName == None: logger.error( 'outputMode file but file not specified for sample %s' % self._sample.name) raise ValueError( 'outputMode file but file not specified for sample %s' % self._sample.name) self._file = sample.pathParser(sample.fileName) self._fileMaxBytes = sample.fileMaxBytes self._fileBackupFiles = sample.fileBackupFiles self._fileHandle = open(self._file, 'a') self._fileLength = os.stat(self._file).st_size logger.debug("Configured to log to '%s' with maxBytes '%s' with backupCount '%s'" % \ (self._file, self._fileMaxBytes, self._fileBackupFiles))
def __init__(self, sample): GeneratorPlugin.__init__(self, sample) self._sample = sample # Logger already setup by config, just get an instance logger = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logger, {'module': 'ReplayGenerator', 'sample': sample.name}) globals()['logger'] = adapter from eventgenconfig import Config globals()['c'] = Config() self._currentevent = 0 self._timeSinceSleep = datetime.timedelta() self._times = [ ] s = self._sample # Load sample from a file, using cache if possible, from superclass GeneratorPlugin s.loadSample() self._rpevents = s.sampleDict self._currentevent = 0 # 8/18/15 CS Because this is not a queueable plugin, we can in a threadsafe way modify these data structures at init # Iterate through events and remove any events which do not match a configured timestamp, # log it and then continue on for e in self._rpevents: try: s.getTSFromEvent(e[s.timeField]) except ValueError: self._rpevents = [x for x in self._rpevents if x['_raw'] != e['_raw']] # Quick check to see if we're sorted in time order, if not reverse if len(self._rpevents) > 1: ts1 = s.getTSFromEvent(self._rpevents[0][s.timeField]) ts2 = s.getTSFromEvent(self._rpevents[1][s.timeField]) td = ts2 - ts1 x = 2 # Make sure we're not all zero while td.days == 0 and td.seconds == 0 and td.microseconds == 0 and x < len(self._rpevents): ts2 = s.getTSFromEvent(self._rpevents[x][s.timeField]) td = ts2 - ts1 x += 1 self.logger.debug("Testing timestamps ts1: %s ts2: %s" % (ts1.strftime('%Y-%m-%d %H:%M:%S'), ts2.strftime('%Y-%m-%d %H:%M:%S'))) if td.days < 0: self.logger.debug("Timestamp order seems to be reverse chronological, reversing") self._rpevents.reverse() try: self.setupBackfill() except ValueError as e: self.logger.error("Exception during backfill for sample '%s': '%s'" % (s.name, str(e)))
def __init__(self, sample): GeneratorPlugin.__init__(self, sample) # Logger already setup by config, just get an instance logger = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logger, {'module': 'PerDayVolumeGenerator', 'sample': sample.name}) globals()['logger'] = adapter from eventgenconfig import Config globals()['c'] = Config()
def __init__(self, sample): # Logger already setup by config, just get an instance logger = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logger, {'module': 'PerDayVolume', 'sample': sample.name}) self.logger = adapter from eventgenconfig import Config globals()['c'] = Config() self.logger.debug('Starting PerDayVolumeRater for %s' % sample.name if sample is not None else "None") self._sample = sample
def __init__(self, sample): OutputPlugin.__init__(self, sample) # Logger already setup by config, just get an instance logger = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logger, {'module': 'SpoolOutputPlugin', 'sample': sample.name}) globals()['logger'] = adapter from eventgenconfig import Config globals()['c'] = Config() self._spoolDir = sample.pathParser(sample.spoolDir) self._spoolFile = sample.spoolFile
def __init__(self, name): # 9/2/15 CS Can't make logger an attribute of the object like we do in other classes # because it borks deepcopy of the sample object logger = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logger, {'module': 'Sample', 'sample': name}) globals()['logger'] = adapter self.name = name self.tokens = [ ] self._lockedSettings = [ ] self.backfilldone = False # Import config from eventgenconfig import Config globals()['c'] = Config()
def __init__(self, sample, output_counter=None): OutputPlugin.__init__(self, sample, output_counter) from eventgenconfig import Config globals()['c'] = Config() self._splunkUrl, self._splunkMethod, self._splunkHost, self._splunkPort = c.getSplunkUrl( self._sample) # noqa self._splunkUser = self._sample.splunkUser self._splunkPass = self._sample.splunkPass if not self._sample.sessionKey: try: myhttp = httplib2.Http(disable_ssl_certificate_validation=True) self.logger.debug( "Getting session key from '%s' with user '%s' and pass '%s'" % (self._splunkUrl + '/services/auth/login', self._splunkUser, self._splunkPass)) response = myhttp.request(self._splunkUrl + '/services/auth/login', 'POST', headers={}, body=urllib.urlencode({ 'username': self._splunkUser, 'password': self._splunkPass }))[1] self._sample.sessionKey = minidom.parseString( response).getElementsByTagName( 'sessionKey')[0].childNodes[0].nodeValue self.logger.debug( "Got new session for splunkstream, sessionKey '%s'" % self._sample.sessionKey) except: self.logger.error( "Error getting session key for non-SPLUNK_EMBEEDED for sample '%s'." % self._sample.name + " Credentials are missing or wrong") raise IOError( "Error getting session key for non-SPLUNK_EMBEEDED for sample '%s'." % self._sample.name + "Credentials are missing or wrong") self.logger.debug( "Retrieved session key '%s' for Splunk session for sample %s'" % (self._sample.sessionKey, self._sample.name))
def __init__(self, num, q1, q2, stop): from eventgenconfig import Config # Logger already setup by config, just get an instance logger = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logger, { 'module': 'GeneratorRealWorker', 'sample': 'null' }) globals()['logger'] = adapter globals()['c'] = Config() self.stopping = False self.working = False self._pluginCache = {} self.num = num c.generatorQueue = q1 c.outputQueue = q2 self.stop = stop # 10/9/15 CS Prime plugin cache to avoid concurrency bugs when creating local copies of samples time.sleep(random.randint(0, 100) / 1000) logger.debug("Priming plugin cache for GeneratorWorker%d" % num) with c.copyLock: while c.pluginsStarting.value() > 0: logger.debug( "Waiting for exclusive lock to start for GeneratorWorker%d" % num) time.sleep(random.randint(0, 100) / 1000) c.pluginsStarting.increment() for sample in c.samples: plugin = c.getPlugin('generator.' + sample.generator, sample) if plugin.queueable: p = plugin(sample) self._pluginCache[sample.name] = p c.pluginsStarting.decrement() c.pluginsStarted.increment()
def __init__(self, num): from eventgenconfig import Config # Logger already setup by config, just get an instance logger = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logger, { 'module': 'OutputRealWorker', 'sample': 'null' }) globals()['logger'] = adapter globals()['c'] = Config() self.stopping = False logger.debug("Starting OutputWorker %d" % num) self.num = num
def __init__(self, sample): # Logger already setup by config, just get an instance logger = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logger, {'module': 'GeneratorPlugin', 'sample': sample.name}) self.logger = adapter from eventgenconfig import Config globals()['c'] = Config() # # 2/10/14 CS Make a threadsafe copy of all of the samples for us to work on # with c.copyLock: # # 10/9/15 CS Moving this to inside the lock, in theory, there should only be one thread # # trying to start at once, going to try to ensure this is the case and hoping for no deadlocks # while c.pluginsStarting.value() > 0: # self.logger.debug("Waiting for exclusive lock to start for GeneratorPlugin '%s'" % sample.name) # time.sleep(0.1) # c.pluginsStarting.increment() self.logger.debug("GeneratorPlugin being initialized for sample '%s'" % sample.name) self._out = Output(sample) # # 9/6/15 Don't do any work until all the timers have started # while c.timersStarted.value() < len(c.sampleTimers): # self.logger.debug("Not all timers started, sleeping for GeneratorPlugin '%s'" % sample.name) # time.sleep(1.0) self._samples = { } for s in c.samples: news = copy.copy(s) news.tokens = [ copy.copy(t) for t in s.tokens ] for setting in c._jsonSettings: if setting in s.__dict__: setattr(news, setting, getattr(s, setting)) self._samples[news.name] = news # self._samples = dict((s.name, copy.deepcopy(s)) for s in c.samples) self._sample = sample
def __init__(self, sample): self._app = sample.app self._sample = sample self._outputMode = sample.outputMode # Logger already setup by config, just get an instance logger = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logger, { 'module': 'OutputPlugin', 'sample': sample.name }) self.logger = adapter from eventgenconfig import Config globals()['c'] = Config() self.logger.debug( "Starting OutputPlugin for sample '%s' with output '%s'" % (self._sample.name, self._sample.outputMode)) self._queue = deque([])
# validate_conf(config, "secret_key") # validate_conf(config, "checkpoint_dir") except Exception, e: raise Exception, "Error getting Splunk configuration via STDIN: %s" % str( e) return config if __name__ == '__main__': if len(sys.argv) > 1: if sys.argv[1] == "--scheme": do_scheme() sys.exit(0) c = Config() # Logger is setup by Config, just have to get an instance logobj = logging.getLogger('eventgen') logobj.propagate = False # Prevent the log messages from being duplicated in the python.log file logobj.setLevel(logging.INFO) formatter = logging.Formatter('%(levelname)s %(message)s') streamHandler = logging.StreamHandler(sys.stderr) streamHandler.setFormatter(formatter) logobj.handlers = [] logobj.addHandler(streamHandler) from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logobj, {'sample': 'null', 'module': 'main'}) logger = adapter logobj.info('Starting eventgen')
def __init__(self, sample): from eventgenconfig import Config self._c = Config() self._app = sample.app self._sample = sample.name self._outputMode = sample.outputMode self._queue = deque([]) self._workers = [] # Logger already setup by config, just get an instance logger = logging.getLogger('eventgen') globals()['logger'] = logger if self._outputMode in ('splunkstream', 'stormstream'): self._index = sample.index self._source = sample.source self._sourcetype = sample.sourcetype self._host = sample.host self._hostRegex = sample.hostRegex if self._outputMode == 'spool': self._spoolDir = sample.pathParser(sample.spoolDir) self._spoolFile = sample.spoolFile elif self._outputMode == 'file': if sample.fileName == None: logger.error( 'outputMode file but file not specified for sample %s' % self._sample) raise ValueError( 'outputMode file but file not specified for sample %s' % self._sample) self._file = sample.fileName self._fileMaxBytes = sample.fileMaxBytes self._fileBackupFiles = sample.fileBackupFiles self._fileLogger = logging.getLogger('eventgen_realoutput_' + self._file) formatter = logging.Formatter('%(message)s') handler = logging.handlers.RotatingFileHandler( filename=self._file, maxBytes=self._fileMaxBytes, backupCount=self._fileBackupFiles) handler.setFormatter(formatter) self._fileLogger.addHandler(handler) self._fileLogger.setLevel(logging.DEBUG) logger.debug("Configured to log to '%s' with maxBytes '%s' with backupCount '%s'" % \ (self._file, self._fileMaxBytes, self._fileBackupFiles)) elif self._outputMode == 'splunkstream': if self._c.splunkEmbedded: try: import splunk.auth self._splunkUrl = splunk.auth.splunk.getLocalServerInfo() results = re.match('(http|https)://([^:/]+):(\d+).*', self._splunkUrl) self._splunkMethod = results.groups()[0] self._splunkHost = results.groups()[1] self._splunkPort = results.groups()[2] except: import traceback trace = traceback.format_exc() logger.error( 'Error parsing host from splunk.auth.splunk.getLocalServerInfo() for sample %s. Stacktrace: %s' % (self._sample, trace)) raise ValueError( 'Error parsing host from splunk.auth.splunk.getLocalServerInfo() for sample %s' % self._sample) else: if sample.splunkHost == None: logger.error( 'outputMode splunkstream but splunkHost not specified for sample %s' % self._sample) raise ValueError( 'outputMode splunkstream but splunkHost not specified for sample %s' % self._sample) elif sample.splunkHost == '[': try: sample.splunkHost = json.loads(sample.splunkHost) except: logger.error( 'splunkHost configured as JSON, but unparseable for sample %s' % self._sample) raise ValueError( 'splunkHost configured as JSON, but unparseable for sample %s' % self._sample) if sample.splunkUser == None: logger.error( 'outputMode splunkstream but splunkUser not specified for sample %s' % self._sample) raise ValueError( 'outputMode splunkstream but splunkUser not specified for sample %s' % self._sample) if sample.splunkPass == None: logger.error( 'outputMode splunkstream but splunkPass not specified for sample %s' % self._sample) raise ValueError( 'outputMode splunkstream but splunkPass not specified for sample %s' % self._sample) self._splunkHost = sample.splunkHost self._splunkPort = sample.splunkPort self._splunkMethod = sample.splunkMethod self._splunkUser = sample.splunkUser self._splunkPass = sample.splunkPass self._splunkUrl = '%s://%s:%s' % ( self._splunkMethod, self._splunkHost, self._splunkPort) try: myhttp = httplib2.Http( disable_ssl_certificate_validation=True) response = myhttp.request(self._splunkUrl + '/services/auth/login', 'POST', headers={}, body=urllib.urlencode({ 'username': self._splunkUser, 'password': self._splunkPass }))[1] self._c.sessionKey = minidom.parseString( response).getElementsByTagName( 'sessionKey')[0].childNodes[0].nodeValue except: logger.error( 'Error getting session key for non-SPLUNK_EMBEEDED for sample %s' % self._sample) raise IOError( 'Error getting session key for non-SPLUNK_EMBEEDED for sample %s' % self._sample) logging.debug( "Retrieved session key '%s' for Splunk session for sample %s'" % (self._c.sessionKey, self._sample)) elif self._outputMode == 'stormstream': self._accessToken = sample.accessToken self._projectID = sample.projectID logger.debug("Output init completed. Output: %s" % self)
else: import signal signal.signal(signal.SIGTERM, func) signal.signal(signal.SIGINT, func) def handle_exit(sig=None, func=None): print '\n\nCaught kill, exiting...' for sampleTimer in sampleTimers: sampleTimer.stop() sys.exit(0) if __name__ == '__main__': debug = False c = Config() # Logger is setup by Config, just have to get an instance logger = logging.getLogger('eventgen') logger.info('Starting eventgen') # 5/6/12 CS use select to listen for input on stdin # if we timeout, assume we're not splunk embedded # Only support standalone mode on Unix due to limitation with select() if os.name != "nt": rlist, _, _ = select([sys.stdin], [], [], 5) if rlist: sessionKey = sys.stdin.readline().strip() else: sessionKey = '' else: sessionKey = sys.stdin.readline().strip()
# validate_conf(config, "name") # validate_conf(config, "key_id") # validate_conf(config, "secret_key") # validate_conf(config, "checkpoint_dir") except Exception, e: raise Exception, "Error getting Splunk configuration via STDIN: %s" % str(e) return config if __name__ == '__main__': if len(sys.argv) > 1: if sys.argv[1] == "--scheme": do_scheme() sys.exit(0) c = Config() # Logger is setup by Config, just have to get an instance logobj = logging.getLogger('eventgen') logobj.propagate = False # Prevent the log messages from being duplicated in the python.log file logobj.setLevel(logging.INFO) formatter = logging.Formatter('%(levelname)s %(message)s') streamHandler = logging.StreamHandler(sys.stderr) streamHandler.setFormatter(formatter) logobj.handlers = [ ] logobj.addHandler(streamHandler) from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logobj, {'sample': 'null', 'module': 'main'}) logger = adapter logobj.info('Starting eventgen')
args = parser.parse_args() # Allow passing of a Splunk app on the command line and expand the full path before passing up the chain if not os.path.exists(args.configfile): if 'SPLUNK_HOME' in os.environ: if os.path.isdir( os.path.join(os.environ['SPLUNK_HOME'], 'etc', 'apps', args.configfile)): args.configfile = os.path.join(os.environ['SPLUNK_HOME'], 'etc', 'apps', args.configfile) return args if __name__ == '__main__': args = parse_args() c = Config(args) # Logger is setup by Config, just have to get an instance logobj = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logobj, {'sample': 'null', 'module': 'main'}) logger = adapter logger.info('Starting eventgen') c.parse() t = Timer(1.0, interruptcatcher=True) for s in c.samples: if s.interval > 0 or s.mode == 'replay': logger.info("Creating timer object for sample '%s' in app '%s'" % (s.name, s.app))
value = match.group(3) try: value = float(value) except: pass return {'section': section, 'key': key, 'value': value} return None def update_config(replacements, config): for replacement in replacements: for sample in config.samples: if replacement['section'] == sample.name: setattr(sample, replacement['key'], replacement['value']) if __name__ == '__main__': c = Config() # Logger is setup by Config, just have to get an instance logobj = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logobj, {'sample': 'null', 'module': 'main'}) logger = adapter logger.info('Starting eventgen') # 5/6/12 CS use select to listen for input on stdin # if we timeout, assume we're not splunk embedded # Only support standalone mode on Unix due to limitation with select() if os.name != "nt": rlist, _, _ = select([sys.stdin], [], [], 1) if rlist: sessionKey = sys.stdin.readline().strip() else:
group.add_argument("--disableOutputQueue", action="store_true", help="Disable reducer step") group.add_argument("--multiprocess", action="store_true", help="Use multiprocesing instead of threading") group.add_argument("--profiler", action="store_true", help="Turn on cProfiler") args = parser.parse_args() # Allow passing of a Splunk app on the command line and expand the full path before passing up the chain if not os.path.exists(args.configfile): if 'SPLUNK_HOME' in os.environ: if os.path.isdir(os.path.join(os.environ['SPLUNK_HOME'], 'etc', 'apps', args.configfile)): args.configfile = os.path.join(os.environ['SPLUNK_HOME'], 'etc', 'apps', args.configfile) return args if __name__ == '__main__': args = parse_args() c = Config(args) # Logger is setup by Config, just have to get an instance logobj = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logobj, {'sample': 'null', 'module': 'main'}) logger = adapter logger.info('Starting eventgen') c.parse() t = Timer(1.0, interruptcatcher=True) for s in c.samples: if s.interval > 0 or s.mode == 'replay': logger.info("Creating timer object for sample '%s' in app '%s'" % (s.name, s.app) ) t = Timer(1.0, s)
import signal signal.signal(signal.SIGTERM, func) signal.signal(signal.SIGINT, func) def handle_exit(sig=None, func=None): print "\n\nCaught kill, exiting..." for sampleTimer in sampleTimers: sampleTimer.stop() sys.exit(0) if __name__ == "__main__": debug = False c = Config() # Logger is setup by Config, just have to get an instance logger = logging.getLogger("eventgen") logger.info("Starting eventgen") # 5/6/12 CS use select to listen for input on stdin # if we timeout, assume we're not splunk embedded # Only support standalone mode on Unix due to limitation with select() if os.name != "nt": rlist, _, _ = select([sys.stdin], [], [], 5) if rlist: sessionKey = sys.stdin.readline().strip() else: sessionKey = "" else: sessionKey = sys.stdin.readline().strip()
def __init__(self, sample): GeneratorPlugin.__init__(self, sample) # Logger already setup by config, just get an instance logger = logging.getLogger('eventgen') from eventgenconfig import EventgenAdapter adapter = EventgenAdapter(logger, { 'module': 'WindbagGenerator', 'sample': sample.name }) globals()['logger'] = adapter from eventgenconfig import Config globals()['c'] = Config() # Pull customers into a dictionary fh = open( os.path.join( os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'samples', 'customer_master.sample'), 'r') # fh = open('../samples/customer_master.sample', 'r') self.customers = [] csvReader = csv.DictReader(fh) for line in csvReader: newline = dict( (k, line[k]) for k in ('Address', 'Age', 'Sex', 'accountNumber', 'customerCity', 'customerMDN', 'customerState', 'customerZip', 'firstName', 'lastName')) newline['address'] = newline['Address'] del newline['Address'] newline['age'] = newline['Age'] del newline['Age'] newline['sex'] = newline['Sex'] del newline['Sex'] newline['city'] = newline['customerCity'] del newline['customerCity'] newline['phone'] = newline['customerMDN'] del newline['customerMDN'] newline['state'] = newline['customerState'] del newline['customerState'] newline['zip'] = newline['customerZip'] del newline['customerZip'] self.customers.append(newline) # Bring items into a dictionary fh = open( os.path.join( os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'samples', 'items.sample'), 'r') self.items = [] csvReader = csv.reader(fh) for line in csvReader: self.items.append({ 'category': line[0], 'itemid': line[1], 'description': line[2], 'price': float(line[3]) }) self.transType = [ 'purchase', 'purchase', 'purchase', 'purchase', 'purchase', 'purchase', 'sale' ] self.characterType = [ 'Milk Maid', 'Masked Mouse', 'Curd Cobbler', 'Whey Warrior', 'Fermented Friar' ] self.regions = [ 'Gorgonzolia', 'Camemberalot', 'Jarlsberg', 'Swiss Alps', 'Limburgerland' ] self.servers = [] for a in ['ace', 'bubbles', 'cupcake', 'dash']: for b in xrange(0, random.randint(1, 12)): self.servers.append('%s.%s.woc.com' % (a, b)) self.typeRate = {'purchase': 1.0, 'sale': 0.2} self.maxItems = 12 self.tps = 5.0 self.customerslen = len(self.customers) self.itemslen = len(self.items) self.transtypelen = len(self.transType) self.charactertypelen = len(self.characterType) self.serverslen = len(self.servers) self.regionslen = len(self.regions)