def work(self): logger.info("ElasticSearch Queue size is: %s " % self.queue.qsize()) docs = [] for i in xrange(self.batch_size): try: res = self.queue.get(block=False) format_date = self.date_from_context(res) res.update( _index='%s.%s' % (self.index_prefix, format_date), _type=self.index_type ) docs.append(res) except eventlet.queue.Empty: break logger.info("Got %s elasticsearch docs" % len(docs)) if docs: try: logger.debug(docs) start = utils.time_in_ms() count, extra = bulk(self.client, docs) time_taken = (utils.time_in_ms() - start) logger.info("Processed %s %s entries to elasticsearch. " "Took %s ms." % (count, extra, time_taken)) except elasticsearch.exceptions.RequestError: logger.exception("Error submitting to elasticsearch") if len(docs) < self.batch_size: time.sleep(1)
def work(self): data = [] logger.info("Blueflood Queue size is: %s " % self.queue.qsize()) for i in xrange(self.batch_size): try: entry = self.queue.get(block=False) data.append(entry) except eventlet.queue.Empty: break logger.info("Got %s docs" % len(data)) if data: try: start = utils.time_in_ms() self.client.ingest(data) for i in xrange(len(data)): self.queue.task_done() time_taken = (utils.time_in_ms() - start) logger.info("Processed %s entries to blueflood. Took %s ms \ " % (len(data), time_taken)) except Exception: logger.exception("Error submitting to blueflood") # Sleep if we hit the empty queue if len(data) < self.batch_size: time.sleep(1)
def work(self): data = [] logger.info("Blueflood Queue size is: %s " % self.queue.qsize()) start_work = utils.time_in_s() for i in xrange(self.batch_size): try: entry = self.queue.get(block=False) data.append(entry) except eventlet.queue.Empty: break logger.info("Got %s docs" % len(data)) if data: try: start = utils.time_in_ms() self.client.ingest(data) for i in xrange(len(data)): self.queue.task_done() time_taken = (utils.time_in_ms() - start) logger.info("Processed %s entries to blueflood. Took %s ms \ " % (len(data), time_taken)) except Exception: logger.exception("Error submitting to blueflood") # Sleep till queue is filled enough or time out is reached while True: work_time_taken = (utils.time_in_s() - start_work) if (self.queue.qsize() > self.batch_size) or work_time_taken > self.timeout: break time.sleep(1)
def handle(self, data): ms = utils.time_in_ms() for d in data.get('metrics'): entry = { "ttlInSeconds": 86400, "collectionTime": ms, "metricName": d['name'], "metricValue": d['value'] } if d.get('units'): entry.update(unit=d.get('units')) self.queue.put(entry)