Esempio n. 1
0
 def __init__(self, time, sample=None, config=None, genqueue=None, outputqueue=None, loggingqueue=None):
     # Logger already setup by config, just get an instance
     # setup default options
     self.profiler = config.profiler
     self.config = config
     self.sample = sample
     self.end = getattr(self.sample, "end", -1)
     self.endts = getattr(self.sample, "endts", None)
     self.generatorQueue = genqueue
     self.outputQueue = outputqueue
     self.time = time
     self.stopping = False
     self.countdown = 0
     self.executions = 0
     self.interval = getattr(self.sample, "interval", config.interval)
     logger.debug('Initializing timer for %s' % sample.name if sample is not None else "None")
     # load plugins
     if self.sample is not None:
         rater_class = self.config.getPlugin('rater.' + self.sample.rater, self.sample)
         self.rater = rater_class(self.sample)
         self.generatorPlugin = self.config.getPlugin('generator.' + self.sample.generator, self.sample)
         self.outputPlugin = self.config.getPlugin('output.' + self.sample.outputMode, self.sample)
         if self.sample.timeMultiple < 0:
             logger.error("Invalid setting for timeMultiple: {}, value should be positive".format(
                 self.sample.timeMultiple))
         elif self.sample.timeMultiple != 1:
             self.interval = self.sample.interval
             logger.debug("Adjusting interval {} with timeMultiple {}, new interval: {}".format(
                 self.sample.interval, self.sample.timeMultiple, self.interval))
     logger.info(
         "Start '%s' generatorWorkers for sample '%s'" % (self.sample.config.generatorWorkers, self.sample.name))
 def startElement(self, name, attrs):
     if name == "title":
         logger.debug("in title")
         self.IN_TITLE = True
     if name == "username":
         logger.debug("in username")
         self.IN_USERNAME = True
Esempio n. 3
0
def index():
    """ Index Page.
    """
    logger.debug('FUNC::::::: app.auth.route.index')
    logger.debug('** Leaving FUNC::::::: app.auth.route.index')
    form = LoginForm ()
    return render_template('auth/login.html', form = form)
Esempio n. 4
0
    def __init__(self,
                 population_size: int = 3,
                 model_reruns: int = 2,
                 number_of_models_tobe_changed_based_on_training_time: int = 1):
        _, __, self.input_shape, self.output_size = load_data(1)
        self.number_of_models_tobe_changed_based_on_training_time = number_of_models_tobe_changed_based_on_training_time
        self.number_of_models_per_generation = population_size
        self.model_reruns = model_reruns
        self.current_generation = 0
        self.metrics = pd.DataFrame(
            columns=['test_Accuracy', 'train_Accuracy', 'training_time', 'prev_model', 'generation'])
        logger.debug('Creating the initial models (generation #0)')
        self.models = dict()
        self.current_generation_models = []

        for _ in range(self.number_of_models_per_generation):
            modelg0hp = create_random_hyper_parameter(output_size=self.output_size,
                                                      number_of_cnn_layers=1,
                                                      number_of_ann_layers=1)
            modelg0hp = CNN.change_name_to(modelg0hp, f'model_gen0_{_}')
            modelg0hp['prev_model'] = 'new'
            self.current_generation_models.append(modelg0hp)
            logger.debug(f'New Hyper-parameter created:-\n{pp.pformat(modelg0hp)}')
        self.train_current_generation()
        print('Done')
 def endElement(self, name):
     if name == "title":
         logger.debug("out title")
         self.IN_TITLE = False
     if name == "username":
         logger.debug("out title")
         self.IN_USERNAME = False
 def startElement(self, name, attrs):
     if name == "title":
         logger.debug("in title")
         self.IN_TITLE = True
     if name == "username":
         logger.debug("in username")
         self.IN_USERNAME = True
 def endElement(self, name):
     if name == "title":
         logger.debug("out title")
         self.IN_TITLE = False
     if name == "username":
         logger.debug("out title")
         self.IN_USERNAME = False
Esempio n. 8
0
    def __init__(self, sample, output_counter=None):

        # Override maxQueueLength to EventPerKey so that each flush
        # will generate one aws key
        if sample.awsS3EventPerKey:
            sample.maxQueueLength = sample.awsS3EventPerKey

        OutputPlugin.__init__(self, sample, output_counter)

        if not boto_imported:
            logger.error("There is no boto3 or botocore library available")
            return

        # disable any "requests" warnings
        requests.packages.urllib3.disable_warnings()

        # Bind passed in samples to the outputter.
        self.awsS3compressiontype = sample.awsS3CompressionType if hasattr(
            sample,
            'awsS3CompressionType') and sample.awsS3CompressionType else None
        self.awsS3eventtype = sample.awsS3EventType if hasattr(
            sample, 'awsS3EventType') and sample.awsS3EventType else 'syslog'
        self.awsS3objectprefix = sample.awsS3ObjectPrefix if hasattr(
            sample, 'awsS3ObjectPrefix') and sample.awsS3ObjectPrefix else ""
        self.awsS3objectsuffix = sample.awsS3ObjectSuffix if hasattr(
            sample, 'awsS3ObjectSuffix') and sample.awsS3ObjectSuffix else ""
        self.awsS3bucketname = sample.awsS3BucketName
        logger.debug("Setting up the connection pool for %s in %s" %
                     (self._sample.name, self._app))
        self._client = None
        self._createConnections(sample)
        logger.debug("Finished init of awsS3 plugin.")
Esempio n. 9
0
 def build_events(self,
                  eventsDict,
                  startTime,
                  earliest,
                  latest,
                  ignore_tokens=False):
     """Ready events for output by replacing tokens and updating the output queue"""
     # Replace tokens first so that perDayVolume evaluates the correct event length
     send_objects = self.replace_tokens(eventsDict,
                                        earliest,
                                        latest,
                                        ignore_tokens=ignore_tokens)
     try:
         self._out.bulksend(send_objects)
         self._sample.timestamp = None
     except Exception as e:
         logger.exception("Exception {} happened.".format(type(e)))
         raise e
     try:
         # TODO: Change this logic so that we don't lose all events if an exception is hit (try/except/break?)
         endTime = datetime.datetime.now()
         timeDiff = endTime - startTime
         timeDiffFrac = "%d.%06d" % (timeDiff.seconds,
                                     timeDiff.microseconds)
         logger.debug("Interval complete, flushing feed")
         self._out.flush(endOfInterval=True)
         logger.debug(
             "Generation of sample '%s' in app '%s' completed in %s seconds."
             % (self._sample.name, self._sample.app, timeDiffFrac))
     except Exception as e:
         logger.exception("Exception {} happened.".format(type(e)))
         raise e
Esempio n. 10
0
 def _load_user(self):
     user = User.query.filter(
         (User.login_name == self.form.username.data)
         | (User.mobile == self.form.username.data)).first()
     if not user:
         logger.debug(u"{} not exist".format(self.form.username.data))
         raise UserNotExistedError()
     return user
Esempio n. 11
0
 def __init__(self, sample, output_counter=None):
     self._app = sample.app
     self._sample = sample
     self._outputMode = sample.outputMode
     self.events = None
     logger.debug("Starting OutputPlugin for sample '%s' with output '%s'" %
                  (self._sample.name, self._sample.outputMode))
     self._queue = deque([])
     self.output_counter = output_counter
Esempio n. 12
0
 def save_model(self, model_hp: Dict):
     model_name = model_hp['name']
     self.models[model_name] = model_hp
     file_path = os.path.join('models', f'{model_name}.xml')
     with open(file_path, 'w') as file:
         file.writelines(dict2xml(
             model_hp,
             wrap='model_hyper_parameters',
             indent="\t"))
         logger.debug(f'{model_name} was saved in {file_path}')
Esempio n. 13
0
 def update_throughput(self, timestamp):
     # B/s, count/s
     delta_time = timestamp - self.current_time
     self.throughput_volume = self.event_size_1_min / (delta_time)
     self.throughput_count = self.event_count_1_min / (delta_time)
     self.current_time = timestamp
     self.event_count_1_min = 0
     self.event_size_1_min = 0
     logger.debug("Current throughput is {} B/s, {} count/s".format(
         self.throughput_volume, self.throughput_count))
Esempio n. 14
0
    def flush(self, q):
        if len(q) > 0:
            logger.debug(
                "Flushing output for sample '%s' in app '%s' for queue '%s'" %
                (self._sample.name, self._app, self._sample.source))

            # Loop through all the messages and build the long string, write once for each flush
            # This may cause the file exceed the maxFileBytes a little bit but will greatly improve the performance
            msglist = ""
            try:
                for metamsg in q:
                    msg = metamsg['_raw']
                    if msg[-1] != '\n':
                        msg += '\n'
                    msglist += msg

                self._fileHandle.write(msglist)
                self._fileLength += len(msglist)

                # If we're at the end of the max allowable size, shift all files
                # up a number and create a new one
                if self._fileLength > self._fileMaxBytes:
                    self._fileHandle.flush()
                    self._fileHandle.close()
                    if os.path.exists(self._file + '.' +
                                      str(self._fileBackupFiles)):
                        logger.debug('File Output: Removing file: %s' %
                                     self._file + '.' +
                                     str(self._fileBackupFiles))
                        os.unlink(self._file + '.' +
                                  str(self._fileBackupFiles))
                    for x in range(1, self._fileBackupFiles)[::-1]:
                        logger.debug('File Output: Checking for file: %s' %
                                     self._file + '.' + str(x))
                        if os.path.exists(self._file + '.' + str(x)):
                            logger.debug(
                                'File Output: Renaming file %s to %s' %
                                (self._file + '.' + str(x),
                                 self._file + '.' + str(x + 1)))
                            os.rename(self._file + '.' + str(x),
                                      self._file + '.' + str(x + 1))
                    os.rename(self._file, self._file + '.1')
                    self._fileHandle = open(self._file, 'w')
                    self._fileLength = 0
            except IndexError:
                logger.warning(
                    "IndexError when writting for app '%s' sample '%s'" %
                    (self._app, self._sample.name))

            if not self._fileHandle.closed:
                self._fileHandle.flush()
            logger.debug("Queue for app '%s' sample '%s' written" %
                         (self._app, self._sample.name))

            self._fileHandle.close()
Esempio n. 15
0
    def _validateSeed(self, value):
        """Callback to set random seed"""
        logger.debug("Validating random seed {}".format(value))
        try:
            value = int(value)
        except:
            logger.error("Could not parse int for seed {}".format(value))
            raise ValueError("Could not parse int for seed {}".format(value))

        logger.info("Using random seed {}".format(value))
        random.seed(value)
Esempio n. 16
0
 def predict_event_size(self):
     try:
         self.sample.loadSample()
         logger.debug("File sample loaded successfully.")
     except TypeError:
         logger.debug("Error loading sample file for sample '%s'" % self.sample.name)
         return
     total_len = sum([len(e['_raw']) for e in self.sample.sampleDict])
     sample_count = len(self.sample.sampleDict)
     if sample_count == 0:
         return 0
     else:
         return total_len/sample_count
Esempio n. 17
0
    def createConnections(self):
        self.serverPool = []
        if self.httpeventServers:
            for server in self.httpeventServers.get('servers'):
                if not server.get('address'):
                    logger.error(
                        'requested a connection to a httpevent server, but no address specified for sample %s'
                        % self._sample.name)
                    raise ValueError(
                        'requested a connection to a httpevent server, but no address specified for sample %s'
                        % self._sample.name)
                if not server.get('port'):
                    logger.error(
                        'requested a connection to a httpevent server, but no port specified for server %s'
                        % server)
                    raise ValueError(
                        'requested a connection to a httpevent server, but no port specified for server %s'
                        % server)
                if not server.get('key'):
                    logger.error(
                        'requested a connection to a httpevent server, but no key specified for server %s'
                        % server)
                    raise ValueError(
                        'requested a connection to a httpevent server, but no key specified for server %s'
                        % server)
                if not ((server.get('protocol') == 'http') or
                        (server.get('protocol') == 'https')):
                    logger.error(
                        'requested a connection to a httpevent server, but no protocol specified for server %s'
                        % server)
                    raise ValueError(
                        'requested a connection to a httpevent server, but no protocol specified for server %s'
                        % server)
                logger.debug(
                    "Validation Passed, Creating a requests object for server: %s"
                    % server.get('address'))

                setserver = {}
                setserver['url'] = "%s://%s:%s/services/collector" % (
                    server.get('protocol'), server.get('address'),
                    server.get('port'))
                setserver['header'] = "Splunk %s" % server.get('key')
                logger.debug("Adding server set to pool, server: %s" %
                             setserver)
                self.serverPool.append(setserver)
        else:
            raise NoServers(
                'outputMode %s but httpeventServers not specified for sample %s'
                % (self.name, self._sample.name))
Esempio n. 18
0
    def _normal_login(self):
        login_failed_times_key = LOGIN_FAILED_TIMES_FORMAT.format(
            self.form.username.data)
        failed_times = int(
            self.connection.get(login_failed_times_key)
        ) if self.connection.get(login_failed_times_key) else 0
        if failed_times >= settings.LOGIN_FAILED_MAXIMAL_TIMES:
            logger.debug(u"{} have been locked. source: {}".format(
                self.form.username.data, self.form.source.data))
            raise UserBannedError()

        info = self._get_user_info()
        if not info:
            raise UsernamePasswordError()
        return info
Esempio n. 19
0
 def run(self, output_counter=None):
     if output_counter is not None and hasattr(
             self.config, 'outputCounter') and self.config.outputCounter:
         # Use output_counter to calculate throughput
         self._out.setOutputCounter(output_counter)
     self.gen(count=self.count,
              earliest=self.start_time,
              latest=self.end_time,
              samplename=self._sample.name)
     # TODO: Make this some how handle an output queue and support intervals and a master queue
     # Just double check to see if there's something in queue to flush out at the end of run
     if len(self._out._queue) > 0:
         logger.debug(
             "Queue is not empty, flush out at the end of each run")
         self._out.flush()
Esempio n. 20
0
 def flush(self, q):
     if len(q) > 0:
         logger.debug(
             "Flushing output for sample '%s' in app '%s' for queue '%s'" %
             (self._sample.name, self._app, self._sample.source))
         # Keep trying to open destination file as it might be touched by other processes
         data = ''.join(event['_raw'] for event in q if event.get('_raw'))
         while True:
             try:
                 with open(self.spoolPath, 'a') as dst:
                     dst.write(data)
                 break
             except:
                 time.sleep(0.1)
         logger.debug("Queue for app '%s' sample '%s' written" %
                      (self._app, self._sample.name))
Esempio n. 21
0
    def flush(self, endOfInterval=False):
        """
        Flushes output buffer, unless endOfInterval called, and then only flush if we've been called
        more than maxIntervalsBeforeFlush tunable.
        """
        # TODO: Fix interval flushing somehow with a queue, not sure I even want to support this feature anymore.
        '''if endOfInterval:
            logger.debugv("Sample calling flush, checking increment against maxIntervalsBeforeFlush")
            c.intervalsSinceFlush[self._sample.name].increment()
            if c.intervalsSinceFlush[self._sample.name].value() >= self._sample.maxIntervalsBeforeFlush:
                logger.debugv("Exceeded maxIntervalsBeforeFlush, flushing")
                flushing = True
                c.intervalsSinceFlush[self._sample.name].clear()
            else:
                logger.debugv("Not enough events to flush, passing flush routine.")
        else:
            logger.debugv("maxQueueLength exceeded, flushing")
            flushing = True'''

        # TODO: This is set this way just for the time being while I decide if we want this feature.
        flushing = True
        if flushing:
            q = self._queue
            logger.debug("Flushing queue for sample '%s' with size %d" % (self._sample.name, len(q)))
            self._queue = []
            outputer = self.outputPlugin(self._sample, self.output_counter)
            outputer.updateConfig(self.config)
            outputer.set_events(q)
            # When an outputQueue is used, it needs to run in a single threaded nature which requires to be put back
            # into the outputqueue so a single thread worker can execute it. When an outputQueue is not used, it can be
            # ran by multiple processes or threads. Therefore, no need to put the outputer back into the Queue. Just
            # execute it.
            # if outputPlugin must be used for useOutputQueue, use outputQueue regardless of user config useOutputQueue:
            if self.outputPlugin.useOutputQueue or self.config.useOutputQueue:
                try:
                    self.outputQueue.put(outputer)
                except Full:
                    logger.warning("Output Queue full, looping again")
            else:
                if self.config.splunkEmbedded:
                    tmp = [len(s['_raw']) for s in q]
                    if len(tmp) > 0:
                        metrics_logger.info({
                            'timestamp': datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S'), 'sample':
                            self._sample.name, 'events': len(tmp), 'bytes': sum(tmp)})
                    tmp = None
                outputer.run()
Esempio n. 22
0
    def _buildConfDict(self):
        """Build configuration dictionary that we will use """

        # Abstracts grabbing configuration from Splunk or directly from Configuration Files
        if self.splunkEmbedded and not STANDALONE:
            logger.info('Retrieving eventgen configurations from /configs/eventgen')
            import splunk.entity as entity
            self._confDict = entity.getEntities('configs/conf-eventgen', count=-1, sessionKey=self.sessionKey)
        else:
            logger.info('Retrieving eventgen configurations with ConfigParser()')
            # We assume we're in a bin directory and that there are default and local directories
            conf = ConfigParser()
            # Make case sensitive
            conf.optionxform = str
            conffiles = []
            # 2/1/15 CS  Moving to argparse way of grabbing command line parameters
            if self.configfile:
                if os.path.exists(self.configfile):
                    # 2/1/15 CS Adding a check to see whether we're instead passed a directory
                    # In which case we'll assume it's a splunk app and look for config files in
                    # default and local
                    if os.path.isdir(self.configfile):
                        conffiles = [
                            os.path.join(self.grandparentdir, 'default', 'eventgen.conf'),
                            os.path.join(self.configfile, 'default', 'eventgen.conf'),
                            os.path.join(self.configfile, 'local', 'eventgen.conf')]
                    else:
                        conffiles = [os.path.join(self.grandparentdir, 'default', 'eventgen.conf'), self.configfile]
            if len(conffiles) == 0:
                conffiles = [
                    os.path.join(self.grandparentdir, 'default', 'eventgen.conf'),
                    os.path.join(self.grandparentdir, 'local', 'eventgen.conf')]

            logger.debug('Reading configuration files for non-splunkembedded: %s' % conffiles)
            conf.read(conffiles)

            sections = conf.sections()
            ret = {}
            for section in sections:
                ret[section] = dict(conf.items(section))
                # For compatibility with Splunk's configs, need to add the app name to an eai:acl key
                ret[section]['eai:acl'] = {'app': self.grandparentdir.split(os.sep)[-1]}
            self._confDict = ret

        logger.debug("ConfDict returned %s" % pprint.pformat(dict(self._confDict)))
Esempio n. 23
0
    def setOutputMetadata(self, event):
        if self._sample.sampletype == 'csv' and (
                event['index'] != self._sample.index
                or event['host'] != self._sample.host
                or event['source'] != self._sample.source
                or event['sourcetype'] != self._sample.sourcetype):
            self._sample.index = event['index']
            self._sample.host = event['host']
            # Allow randomizing the host:
            if self._sample.hostToken:
                self.host = self._sample.hostToken.replace(self.host)

            self._sample.source = event['source']
            self._sample.sourcetype = event['sourcetype']
            logger.debug(
                "Setting CSV parameters. index: '%s' host: '%s' source: '%s' sourcetype: '%s'"
                % (self._sample.index, self._sample.host, self._sample.source,
                   self._sample.sourcetype))
Esempio n. 24
0
 def _validateTimezone(self, value):
     """Callback for complexSetting timezone which will parse and validate the timezone"""
     logger.debug("Parsing timezone {}".format(value))
     if value.find('local') >= 0:
         value = datetime.timedelta(days=1)
     else:
         try:
             # Separate the hours and minutes (note: minutes = the int value - the hour portion)
             if int(value) > 0:
                 mod = 100
             else:
                 mod = -100
             value = datetime.timedelta(hours=int(int(value) / 100.0), minutes=int(value) % mod)
         except:
             logger.error("Could not parse timezone {}".format(value))
             raise ValueError("Could not parse timezone {}".format(value))
     logger.debug("Parsed timezone {}".format(value))
     return value
Esempio n. 25
0
    def __init__(self, sample, output_counter=None):
        OutputPlugin.__init__(self, sample, output_counter)

        if sample.fileName is None:
            logger.error(
                'outputMode file but file not specified for sample %s' %
                self._sample.name)
            raise ValueError(
                'outputMode file but file not specified for sample %s' %
                self._sample.name)

        self._file = sample.pathParser(sample.fileName)
        self._fileMaxBytes = sample.fileMaxBytes
        self._fileBackupFiles = sample.fileBackupFiles

        self._fileHandle = open(self._file, 'a')
        self._fileLength = os.stat(self._file).st_size
        logger.debug(
            "Configured to log to '%s' with maxBytes '%s' with backupCount '%s'"
            % (self._file, self._fileMaxBytes, self._fileBackupFiles))
Esempio n. 26
0
 def earliestTime(self):
     # First optimization, we need only store earliest and latest
     # as an offset of now if they're relative times
     if self._earliestParsed is not None:
         earliestTime = self.now() - self._earliestParsed
         logger.debug("Using cached earliest time: %s" % earliestTime)
     else:
         if self.earliest.strip()[0:1] == '+' or \
                 self.earliest.strip()[0:1] == '-' or \
                 self.earliest == 'now':
             tempearliest = timeParser(self.earliest,
                                       timezone=self.timezone)
             temptd = self.now(realnow=True) - tempearliest
             self._earliestParsed = datetime.timedelta(
                 days=temptd.days, seconds=temptd.seconds)
             earliestTime = self.now() - self._earliestParsed
             logger.debug(
                 "Calulating earliestParsed as '%s' with earliestTime as '%s' and self.sample.earliest as '%s'"
                 % (self._earliestParsed, earliestTime, tempearliest))
         else:
             earliestTime = timeParser(self.earliest,
                                       timezone=self.timezone)
             logger.debug("earliestTime as absolute time '%s'" %
                          earliestTime)
     return earliestTime
Esempio n. 27
0
 def _transmitEvents(self, payloadstring):
     targetServer = []
     logger.debug("Transmission called with payloadstring: %s " %
                  payloadstring)
     if self.httpeventoutputmode == "mirror":
         targetServer = self.serverPool
     else:
         targetServer.append(random.choice(self.serverPool))
     for server in targetServer:
         logger.debug("Selected targetServer object: %s" % targetServer)
         url = server['url']
         headers = {}
         headers['Authorization'] = server['header']
         headers['content-type'] = 'application/json'
         try:
             payloadsize = len(payloadstring)
             # response = requests.post(url, data=payloadstring, headers=headers, verify=False)
             self.active_sessions.append(
                 self.session.post(url=url,
                                   data=payloadstring,
                                   headers=headers,
                                   verify=False))
         except Exception as e:
             logger.error("Failed for exception: %s" % e)
             logger.error(
                 "Failed sending events to url: %s  sourcetype: %s  size: %s"
                 % (url, self.lastsourcetype, payloadsize))
             logger.debug(
                 "Failed sending events to url: %s  headers: %s payload: %s"
                 % (url, headers, payloadstring))
             raise e
Esempio n. 28
0
    def __init__(self, sample, output_counter=None):
        OutputPlugin.__init__(self, sample, output_counter)

        from eventgenconfig import Config
        globals()['c'] = Config()

        self._splunkUrl, self._splunkMethod, self._splunkHost, self._splunkPort = c.getSplunkUrl(self._sample)  # noqa
        self._splunkUser = self._sample.splunkUser
        self._splunkPass = self._sample.splunkPass

        if not self._sample.sessionKey:
            try:
                myhttp = httplib2.Http(disable_ssl_certificate_validation=True)
                logger.debug("Getting session key from '%s' with user '%s' and pass '%s'" %
                                  (self._splunkUrl + '/services/auth/login', self._splunkUser, self._splunkPass))
                response = myhttp.request(
                    self._splunkUrl + '/services/auth/login', 'POST', headers={}, body=urllib.urlencode({
                        'username':
                        self._splunkUser, 'password':
                        self._splunkPass}))[1]
                self._sample.sessionKey = minidom.parseString(response).getElementsByTagName(
                    'sessionKey')[0].childNodes[0].nodeValue
                logger.debug("Got new session for splunkstream, sessionKey '%s'" % self._sample.sessionKey)
            except:
                logger.error("Error getting session key for non-SPLUNK_EMBEEDED for sample '%s'." %
                                  self._sample.name + " Credentials are missing or wrong")
                raise IOError("Error getting session key for non-SPLUNK_EMBEEDED for sample '%s'." % self._sample.name +
                              "Credentials are missing or wrong")

        logger.debug("Retrieved session key '%s' for Splunk session for sample %s'" % (self._sample.sessionKey,
                                                                                            self._sample.name))
Esempio n. 29
0
    def getPlugin(self, name, s=None):
        """Return a reference to a Python object (not an instance) referenced by passed name"""
        '''
        APPPERF-263:
        make sure we look in __outputPlugins as well. For some reason we
        keep 2 separate dicts of plugins.
        '''
        plugintype = name.split(".")[0]
        if name not in self.plugins and name not in self.outputPlugins:
            # 2/1/15 CS If we haven't already seen the plugin, try to load it
            # Note, this will only work for plugins which do not specify config validation
            # parameters.  If they do, configs may not validate for user provided plugins.
            if s:
                if plugintype in ('generator', 'rater'):
                    plugin = getattr(s, plugintype)
                else:
                    plugin = getattr(s, 'outputMode')
                if plugin is not None:
                    logger.debug("Attempting to dynamically load plugintype '%s' named '%s' for sample '%s'" %
                                      (plugintype, plugin, s.name))
                    bindir = os.path.join(s.sampleDir, os.pardir, 'bin')
                    libdir = os.path.join(s.sampleDir, os.pardir, 'lib')
                    plugindir = os.path.join(libdir, 'plugins', plugintype)
                    targetplugin = PluginNotLoaded(bindir=bindir, libdir=libdir, plugindir=plugindir, name=plugin,
                                                   type=plugintype)
                    if targetplugin.name not in self.extraplugins:
                        self.extraplugins.append(targetplugin.name)
                        raise targetplugin
                    else:
                        raise FailedLoadingPlugin(name=plugin)

        # APPPERF-263: consult both __outputPlugins and __plugins
        if name not in self.plugins and name not in self.outputPlugins:
            raise KeyError('Plugin ' + name + ' not found')

        # return in order of precedence:  __plugins, __outputPlugins, None
        # Note: because of the above KeyError Exception we should never return
        # None, but it is the sane behavior for a getter method
        return self.plugins.get(name, self.outputPlugins.get(name, None))
Esempio n. 30
0
    def _createConnections(self, sample):
        try:
            if hasattr(sample, 'awsKeyId') and hasattr(sample, 'awsSecretKey'):
                self._client = boto3.client(
                    "s3",
                    region_name=sample.awsRegion,
                    aws_access_key_id=sample.awsKeyId,
                    aws_secret_access_key=sample.awsSecretKey)
                if self._client is None:
                    msg = '''
                    [your_eventgen_stanza]
                    awsKeyId = YOUR_ACCESS_KEY
                    awsSecretKey = YOUR_SECRET_KEY
                    '''

                    logger.error(
                        "Failed for init boto3 client: %s, you should define correct 'awsKeyId'\
                        and 'awsSecretKey' in eventgen conf %s" % msg)
                    raise Exception(msg)
            else:
                self._client = boto3.client('s3', region_name=sample.awsRegion)
        except Exception as e:
            logger.error("Failed for init boto3 client: exception =  %s" % e)
            raise e
        # Try list bucket method to validate if the connection works
        try:
            self._client.list_buckets()
        except botocore.exceptions.NoCredentialsError:
            msg = '''
            [default]
            aws_access_key_id = YOUR_ACCESS_KEY
            aws_secret_access_key = YOUR_SECRET_KEY
            '''

            logger.error("Failed for init boto3 client, you should create "
                         "'~/.aws/credentials' with credential info %s" % msg)
            raise
        logger.debug("Init conn done, conn = %s" % self._client)
Esempio n. 31
0
    def getSplunkUrl(self, s):
        """
        If we're embedded in Splunk, get it from Splunk's Python libraries, otherwise get it from config.

        Returns a tuple of ( splunkUrl, splunkMethod, splunkHost, splunkPort )
        """
        if self.splunkEmbedded:
            try:
                import splunk.auth
                splunkUrl = splunk.auth.splunk.getLocalServerInfo()
                results = re.match('(http|https)://([^:/]+):(\d+).*', splunkUrl)
                splunkMethod = results.groups()[0]
                splunkHost = results.groups()[1]
                splunkPort = results.groups()[2]
            except:
                import traceback
                trace = traceback.format_exc()
                logger.error(
                    'Error parsing host from splunk.auth.splunk.getLocalServerInfo() for sample %s.  Stacktrace: %s' %
                    (s.name, trace))
                raise ValueError(
                    'Error parsing host from splunk.auth.splunk.getLocalServerInfo() for sample %s' % s.name)
        else:
            # splunkMethod and splunkPort are defaulted so only check for splunkHost
            if s.splunkHost is None:
                logger.error("Splunk URL Requested but splunkHost not set for sample '%s'" % s.name)
                raise ValueError("Splunk URL Requested but splunkHost not set for sample '%s'" % s.name)

            splunkUrl = '%s://%s:%s' % (s.splunkMethod, s.splunkHost, s.splunkPort)
            splunkMethod = s.splunkMethod
            splunkHost = s.splunkHost
            splunkPort = s.splunkPort

        logger.debug(
            "Getting Splunk URL: %s Method: %s Host: %s Port: %s" % (splunkUrl, splunkMethod, splunkHost, splunkPort))
        return (splunkUrl, splunkMethod, splunkHost, splunkPort)
Esempio n. 32
0
 def updateConfig(self, config):
     OutputPlugin.updateConfig(self, config)
     try:
         if hasattr(self.config, 'httpeventServers') is False:
             if hasattr(self._sample, 'httpeventServers'):
                 self.config.httpeventServers = self._sample.httpeventServers
             else:
                 logger.error(
                     'outputMode %s but httpeventServers not specified for sample %s'
                     % (self.name, self._sample.name))
                 raise NoServers(
                     'outputMode %s but httpeventServers not specified for sample %s'
                     % (self.name, self._sample.name))
         # set default output mode to round robin
         if hasattr(
                 self.config,
                 'httpeventOutputMode') and self.config.httpeventOutputMode:
             self.httpeventoutputmode = config.httpeventOutputMode
         else:
             if hasattr(self._sample, 'httpeventOutputMode'
                        ) and self._sample.httpeventOutputMode:
                 self.httpeventoutputmode = self._sample.httpeventOutputMode
             else:
                 self.httpeventoutputmode = 'roundrobin'
         if hasattr(self.config, 'httpeventMaxPayloadSize'
                    ) and self.config.httpeventMaxPayloadSize:
             self.httpeventmaxsize = self.config.httpeventMaxPayloadSize
         else:
             if hasattr(self._sample, 'httpeventMaxPayloadSize'
                        ) and self._sample.httpeventMaxPayloadSize:
                 self.httpeventmaxsize = self._sample.httpeventMaxPayloadSize
             else:
                 self.httpeventmaxsize = 10000
         logger.debug("Currentmax size: %s " % self.httpeventmaxsize)
         if isinstance(config.httpeventServers, str):
             self.httpeventServers = json.loads(config.httpeventServers)
         else:
             self.httpeventServers = config.httpeventServers
         logger.debug("Setting up the connection pool for %s in %s" %
                      (self._sample.name, self._app))
         self.createConnections()
         logger.debug("Pool created.")
         logger.debug("Finished init of %s plugin." % self.name)
     except Exception as e:
         logger.exception(str(e))
Esempio n. 33
0
 def _transmitEvents(self, payloadstring):
     logger.debug(
         "Transmission called with payloadstring event number: %d " %
         len(payloadstring))
     records = "".join(payloadstring)
     # Different key prefix for different log type
     if self.awsS3eventtype == 'elbaccesslog':
         s3keyname = self.awsS3objectprefix + datetime.datetime.utcnow(
         ).strftime("%Y%m%dT%H%MZ") + '_' + str(
             uuid.uuid1()) + self.awsS3objectsuffix
     elif self.awsS3eventtype == 's3accesslog':
         s3keyname = self.awsS3objectprefix + datetime.datetime.utcnow(
         ).strftime("%Y-%m-%d-%H-%M-%S") + '-' + str(uuid.uuid1()).replace(
             '-', '').upper()[0:15] + self.awsS3objectsuffix
     else:
         s3keyname = self.awsS3objectprefix + datetime.datetime.utcnow(
         ).isoformat() + str(uuid.uuid1()) + self.awsS3objectsuffix
     logger.debug("Uploading %d events into s3 key: %s " %
                  (len(records), s3keyname))
     if self.awsS3compressiontype == 'gz':
         import StringIO
         import gzip
         out = StringIO.StringIO()
         with gzip.GzipFile(fileobj=out, mode="w") as f:
             f.write(records)
         records = out.getvalue()
     try:
         response = self._client.put_object(Bucket=self.awsS3bucketname,
                                            Key=s3keyname,
                                            Body=records)
         logger.debug("response = %s" % response)
     except Exception as e:
         logger.error("Failed for exception: %s" % e)
         logger.debug("Failed sending events to payload: %s" %
                      (payloadstring))
         raise e
Esempio n. 34
0
def login(error = None):
    logger.debug(('FUNC::::::: auth.route.login::: request Method is ::' + request.method))
    
    form = LoginForm()
    #if request.method == 'POST':
    if form.validate_on_submit():
        logger.debug ('Performing logon')
        session['username'] = request.form['username']
        session['password'] = request.form['password']
        
        user = User.query.filter_by(username = request.form['username']).first()
        if user is not None and user.verify_password(request.form['password']):
            login_user(user)
            session['emaSession'] = ema.emaLogin()
            session['transaction_id'] = '2222222'
            return redirect(url_for(('main.subscribers')))
        flash('Invalid Username and Password')
    logger.debug('** Leaving FUNC::::::: app.route.login')
    return render_template('auth/login.html', form = form)
Esempio n. 35
0
import shutil

import docker
from docker.utils import create_host_config

from logging_config import logger
from redis_connection import db
from server_status import status

doc_user = status.docker_user
doc_pass = status.docker_password

SRC_DIR = os.path.dirname(__file__) or '.'
DOC_DIR = os.path.abspath(os.path.join(SRC_DIR, '..', 'build/docker'))
BUILD_DIR = os.path.abspath(os.path.join(DOC_DIR, '..'))
logger.debug([('SRC_DIR', SRC_DIR), ('DOC_DIR', DOC_DIR), ('BUILD_DIR', BUILD_DIR)])


# Initiate communication with build daemon
try:
    doc = docker.Client(base_url='unix://var/run/docker.sock', version='auto')
    # doc.build(path=DOC_DIR, tag="arch-devel", quiet=False, timeout=None)
except Exception as err:
    logger.error("Cant connect to Docker daemon. Error msg: %s", err)


def create_pkgs_host_config(cache, pkgbuild_dir, result):
    pkgs_hconfig = create_host_config(
        binds={
            cache:
                {
Esempio n. 36
0
def load_user(user_id):
    logger.debug('***FUNC::::::: app.models.load_user(user_id)')
    return User.query.get(int(user_id)) #required callback function to load a given user from Id.
 def startElement(self, name, attrs):
     if name == "title":
         logger.debug("[ START ]: <title>")
         self.IN_TITLE = True
 def endElement(self, name):
     if name == "title":
         logger.debug("[ END ]: <title>\n")
         self.IN_TITLE = False