def __init__(self, servers, bulk_amount=100, bulk_refresh_time=30): self.es_connection = Elasticsearch(servers) self.es_connection.ping() self.bulk_queue = BulkQueue(self, threshold=bulk_amount, flush_time=bulk_refresh_time) initLogger()
# seems to cause memory leaks.. # regular updates are disabled for now, # though we set the frequency anyway. options.plugincheckfrequency = getConfig('plugincheckfrequency', 120, options.configfile) # This is the full ARN that the s3 bucket lives under options.cloudtrail_arn = getConfig('cloudtrail_arn', 'cloudtrail_arn', options.configfile) if __name__ == '__main__': # configure ourselves parser = OptionParser() parser.add_option("-c", dest='configfile', default=sys.argv[0].replace('.py', '.conf'), help="configuration file to use") (options, args) = parser.parse_args() initConfig() initLogger(options) # open ES connection globally so we don't waste time opening it per message es = esConnect() # force a check for plugins and establish the plugin list pluginList = list() lastPluginCheck = datetime.now() - timedelta(minutes=60) pluginList, lastPluginCheck = checkPlugins(pluginList, lastPluginCheck) main()