def get_analytics_db_purge_status(self, redis_list): for redis_ip_port in redis_list: try: redish = StrictRedisWrapper(redis_ip_port[0], redis_ip_port[1], db=0, password=self._redis_password) if (redish.exists('ANALYTICS_DB_PURGE')): return redish.hgetall('ANALYTICS_DB_PURGE') except redis.exceptions.ConnectionError: self._logger.error("Exception: " "Failure in connection to redis-server") response = { 'status': 'failed', 'reason': 'Failure in connection to redis-server: ' + redis_ip_port[0] } return response except redis.exceptions.ResponseError: self._logger.error("Exception: " "Redis authentication failed") response = { 'status': 'failed', 'reason': 'Redis authentication failed' } return response return None
def set_analytics_db_purge_status(self, purge_id, purge_cutoff): try: redish = StrictRedisWrapper(db=0, host='127.0.0.1', port=self._redis_query_port, password=self._redis_password) redish.hset('ANALYTICS_DB_PURGE', 'status', 'running') redish.hset('ANALYTICS_DB_PURGE', 'purge_input', str(purge_cutoff)) redish.hset('ANALYTICS_DB_PURGE', 'purge_start_time', UTCTimestampUsec()) redish.hset('ANALYTICS_DB_PURGE', 'purge_id', purge_id) except redis.exceptions.ConnectionError: self._logger.error("Exception: " "Failure in connection to redis-server") response = { 'status': 'failed', 'reason': 'Failure in connection to redis-server' } return response except redis.exceptions.ResponseError: self._logger.error("Exception: " "Redis authentication failed") response = { 'status': 'failed', 'reason': 'Redis authentication failed' } return response return None
def delete_db_purge_status(self): try: redish = StrictRedisWrapper(db=0, host='127.0.0.1', port=self._redis_query_port, password=self._redis_password) redish.delete('ANALYTICS_DB_PURGE') except redis.exceptions.ConnectionError: self._logger.error("Exception: " "Failure in connection to redis-server") except redis.exceptions.ResponseError: self._logger.error("Exception: " "Redis authentication failed")
def _get_agg_redis_instance(self, ip, port): agg_redis = self._agg_redis_map.get((ip, port)) if not agg_redis: agg_redis = StrictRedisWrapper(host=ip, port=port, password=self._rpass, db=7, socket_timeout=30) self._agg_redis_map[(ip, port)] = agg_redis return agg_redis
def run(self): exitrun = False while not exitrun: for rkey in self._redis_uve_map.keys(): rinst = self._redis_uve_map[rkey] old_pid = rinst.collector_pid try: # check if it is marked as deleted during sighup handling if rinst.deleted == True: r_ip = rkey[0] r_port = rkey[1] del self._redis_uve_map[rkey] ConnectionState.delete(ConnectionType.REDIS_UVE,\ r_ip+":"+str(r_port)) continue if rinst.redis_handle is None: rinst.redis_handle = StrictRedisWrapper( host=rkey.ip, port=rkey.port, password=self._redis_password, db=1, socket_timeout=30) rinst.collector_pid = None # check for known collector pid string # if there's a mismatch, we must read it again if rinst.collector_pid is not None: if not rinst.redis_handle.sismember( "NGENERATORS", rinst.collector_pid): rinst.collector_pid = None # read the collector pid string if rinst.collector_pid is None: for gen in rinst.redis_handle.smembers("NGENERATORS"): module = gen.split(':')[2] if module == "contrail-collector": rinst.collector_pid = gen except gevent.GreenletExit: self._logger.error('UVEServer Exiting on gevent-kill') exitrun = True break except Exception as e: self._logger.error("redis/collector healthcheck failed %s for %s" \ % (str(e), str(rkey))) rinst.redis_handle = None rinst.collector_pid = None finally: # Update redis/collector health if old_pid is None and rinst.collector_pid is not None: ConnectionState.update(ConnectionType.REDIS_UVE,\ rkey.ip + ":" + str(rkey.port), ConnectionStatus.UP, [rkey.ip+":"+str(rkey.port)]) if old_pid is not None and rinst.collector_pid is None: ConnectionState.update(ConnectionType.REDIS_UVE,\ rkey.ip + ":" + str(rkey.port), ConnectionStatus.DOWN, [rkey.ip+":"+str(rkey.port)]) if not exitrun: gevent.sleep(self._freq)
def _get_agg_redis_instance(self, ip, port, redis_agg_db): agg_redis = self._agg_redis_map.get((ip, port, redis_agg_db)) if not agg_redis: agg_redis = StrictRedisWrapper(host=ip, port=port, password=self._rpass, db=redis_agg_db, socket_timeout=30, **self._redis_ssl_params) self._agg_redis_map[(ip, port, redis_agg_db)] = agg_redis return agg_redis
def _run(self): lredis = None pb = None pause = False self.redis_prev_time = 0 while True: try: if pause: gevent.sleep(2) pause = False lredis = StrictRedisWrapper(host=self._pi.ip_address, port=self._pi.port, password=self._rpass, db=self._pi.redis_agg_db, socket_timeout=30) pb = lredis.pubsub() inst = self._pi.instance_id part = self._partno pb.subscribe('AGPARTPUB:%s:%d' % (inst, part)) self.syncpart(lredis) while True: message = pb.get_message() if not message: gevent.sleep(0.001) continue if message["type"] != "message": gevent.sleep(0) continue dataline = message["data"] try: elems = json.loads(dataline) except: self._logger.error("AggUVE Parsing failed: %s" % str(message)) gevent.sleep(0) continue else: self._logger.info("AggUVE loading: %s" % str(elems)) if self._content: ppe = lredis.pipeline() lelems = [] for elem in elems: table, barekey = elem["key"].split(":", 1) if self._tablefilt: if not table in self._tablefilt: gevent.sleep(0) continue if self._patterns: kfilter_match = False for pattern in self._patterns: if pattern.match(barekey): kfilter_match = True break if not kfilter_match: gevent.sleep(0) continue if self._cfilter: if elem["type"] not in self._cfilter: gevent.sleep(0) continue lelems.append(elem) if self._content: # This UVE was deleted if elem["type"] is None: ppe.exists("AGPARTVALUES:%s:%d:%s" % \ (inst, part, elem["key"])) else: ppe.hget("AGPARTVALUES:%s:%d:%s" % \ (inst, part, elem["key"]), elem["type"]) # We need to execute this pipeline read only if we are # keeping track of UVE contents (streaming case) if self._content: pperes = ppe.execute() idx = 0 for elem in lelems: key = elem["key"] typ = elem["type"] vdata = None if not typ is None: if self._content: if not key in self._uvecache: self._uvecache[key] = {} vjson = pperes[idx] if vjson is None: vdata = None if typ in self._uvecache[key]: del self._uvecache[key][typ] else: vdata = json.loads(vjson) self._uvecache[key][typ] = vjson if self._token is not None: if not self.is_uve_read_permitted(\ self._uvecache[key]): gevent.sleep(0) continue else: vdata = {} self._cb(self._partno, self._pi, key, typ, vdata) idx += 1 gevent.sleep(0) except gevent.GreenletExit: break except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError): pass except Exception as ex: self.redis_cur_time = time.time() if self.redis_prev_time == 0 or self.redis_cur_time - self.redis_prev_time > 60: self.redis_prev_time = self.redis_cur_time template = "Exception {0} in uve stream proc. Arguments:\n{1!r}" messag = template.format(type(ex).__name__, ex.args) self._logger.error("[%s:%d] AlarmGen %s,%d %s : traceback %s" % \ (self._pi.ip_address, self._pi.port, \ self._pi.instance_id, self._partno, \ messag, traceback.format_exc())) else: template = "Exception {0} in uve stream proc. Arguments:\n{1!r}" messag = template.format(type(ex).__name__, ex.args) self._logger.error("[%s:%d] AlarmGen %s,%d %s" % \ (self._pi.ip_address, self._pi.port, \ self._pi.instance_id, self._partno, \ messag)) finally: lredis = None if pb is not None: pb.close() pb = None pause = True return None
def _get_uve_content(self, table, barekeys, tfilter, ackfilter, keysonly): brsp = {} uveparts = {} for barekey in barekeys: part = self._uvedb[table][barekey]["__SOURCE__"]["partition"] if not part in uveparts: uveparts[part] = set() uveparts[part].add(barekey) for pkey, pvalue in uveparts.iteritems(): pi = self._agp[pkey] lredis = StrictRedisWrapper(host=pi.ip_address, port=pi.port, password=self._rpass, db=7, socket_timeout=30) ppe = lredis.pipeline() luves = list(uveparts[pkey]) for elem in luves: if len(tfilter) != 0: ltypes = tfilter.keys() ppe.hmget("AGPARTVALUES:%s:%d:%s:%s" % \ (pi.instance_id, pkey, table, elem), *ltypes) else: ppe.hgetall("AGPARTVALUES:%s:%d:%s:%s" % \ (pi.instance_id, pkey, table, elem)) pperes = ppe.execute() for uidx in range(0, len(luves)): uvestruct = {} if len(tfilter) != 0: for tidx in range(0, len(ltypes)): if not pperes[uidx][tidx]: continue afilter_list = tfilter[ltypes[tidx]] ppeval = json.loads(pperes[uidx][tidx]) if len(afilter_list) == 0: uvestruct[ltypes[tidx]] = ppeval else: for akey, aval in ppeval.iteritems(): if akey not in afilter_list: continue else: if not ltypes[tidx] in uvestruct: uvestruct[ltypes[tidx]] = {} uvestruct[ltypes[tidx]][akey] = aval else: for tk, tv in pperes[uidx].iteritems(): uvestruct[tk] = json.loads(tv) if ackfilter is not None: if "UVEAlarms" in uvestruct and \ "alarms" in uvestruct["UVEAlarms"]: alarms = [] for alarm in uvestruct["UVEAlarms"]["alarms"]: ack = "false" if "ack" in alarm: if alarm["ack"]: ack = "true" else: ack = "false" if ack == ackfilter: alarms.append(alarm) if not len(alarms): del uvestruct["UVEAlarms"] else: uvestruct["UVEAlarms"]["alarms"] = alarms if len(uvestruct) != 0: if keysonly: brsp[luves[uidx]] = None else: brsp[luves[uidx]] = uvestruct return brsp
def _run(self): lredis = None pb = None pause = False self.redis_prev_time = 0 while True: try: if pause: gevent.sleep(2) pause = False lredis = StrictRedisWrapper( host=self._pi.ip_address, port=self._pi.port, password=self._rpass, db=7, socket_timeout=30) pb = lredis.pubsub() inst = self._pi.instance_id part = self._partno pb.subscribe('AGPARTPUB:%s:%d' % (inst, part)) self.syncpart(lredis) while True: message = pb.get_message() if not message: gevent.sleep(0.001) continue if message["type"] != "message": gevent.sleep(0) continue dataline = message["data"] try: elems = json.loads(dataline) except: self._logger.error("AggUVE Parsing failed: %s" % str(message)) gevent.sleep(0) continue else: self._logger.info("AggUVE loading: %s" % str(elems)) if self._content: ppe = lredis.pipeline() lelems = [] for elem in elems: table, barekey = elem["key"].split(":",1) if self._tablefilt: if not table in self._tablefilt: gevent.sleep(0) continue if self._patterns: kfilter_match = False for pattern in self._patterns: if pattern.match(barekey): kfilter_match = True break if not kfilter_match: gevent.sleep(0) continue if self._cfilter: if elem["type"] not in self._cfilter: gevent.sleep(0) continue lelems.append(elem) if self._content: # This UVE was deleted if elem["type"] is None: ppe.exists("AGPARTVALUES:%s:%d:%s" % \ (inst, part, elem["key"])) else: ppe.hget("AGPARTVALUES:%s:%d:%s" % \ (inst, part, elem["key"]), elem["type"]) # We need to execute this pipeline read only if we are # keeping track of UVE contents (streaming case) if self._content: pperes = ppe.execute() idx = 0 for elem in lelems: key = elem["key"] typ = elem["type"] vdata = None if not typ is None: if self._content: if not key in self._uvecache: self._uvecache[key] = {} vjson = pperes[idx] if vjson is None: vdata = None if typ in self._uvecache[key]: del self._uvecache[key][typ] else: vdata = json.loads(vjson) self._uvecache[key][typ] = vjson if self._token is not None: if not self.is_uve_read_permitted(\ self._uvecache[key]): gevent.sleep(0) continue else: vdata = {} self._cb(self._partno, self._pi, key, typ, vdata) idx += 1 gevent.sleep(0) except gevent.GreenletExit: break except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError): pass except Exception as ex: self.redis_cur_time = time.time() if self.redis_prev_time == 0 or self.redis_cur_time - self.redis_prev_time > 60: self.redis_prev_time = self.redis_cur_time template = "Exception {0} in uve stream proc. Arguments:\n{1!r}" messag = template.format(type(ex).__name__, ex.args) self._logger.error("[%s:%d] AlarmGen %s,%d %s : traceback %s" % \ (self._pi.ip_address, self._pi.port, \ self._pi.instance_id, self._partno, \ messag, traceback.format_exc())) else: template = "Exception {0} in uve stream proc. Arguments:\n{1!r}" messag = template.format(type(ex).__name__, ex.args) self._logger.error("[%s:%d] AlarmGen %s,%d %s" % \ (self._pi.ip_address, self._pi.port, \ self._pi.instance_id, self._partno, \ messag)) finally: lredis = None if pb is not None: pb.close() pb = None pause = True return None
def _get_uve_content(self, table, barekeys, tfilter, ackfilter, keysonly): brsp = {} uveparts = {} for barekey in barekeys: part = self._uvedb[table][barekey]["__SOURCE__"]["partition"] if not part in uveparts: uveparts[part] = set() uveparts[part].add(barekey) for pkey,pvalue in uveparts.iteritems(): pi = self._agp[pkey] lredis = StrictRedisWrapper( host=pi.ip_address, port=pi.port, password=self._rpass, db=7, socket_timeout=30) ppe = lredis.pipeline() luves = list(uveparts[pkey]) for elem in luves: if len(tfilter) != 0: ltypes = tfilter.keys() ppe.hmget("AGPARTVALUES:%s:%d:%s:%s" % \ (pi.instance_id, pkey, table, elem), *ltypes) else: ppe.hgetall("AGPARTVALUES:%s:%d:%s:%s" % \ (pi.instance_id, pkey, table, elem)) pperes = ppe.execute() for uidx in range(0,len(luves)): uvestruct = {} if len(tfilter) != 0: for tidx in range(0,len(ltypes)): if not pperes[uidx][tidx]: continue afilter_list = tfilter[ltypes[tidx]] ppeval = json.loads(pperes[uidx][tidx]) if len(afilter_list) == 0: uvestruct[ltypes[tidx]] = ppeval else: for akey, aval in ppeval.iteritems(): if akey not in afilter_list: continue else: if not ltypes[tidx] in uvestruct: uvestruct[ltypes[tidx]] = {} uvestruct[ltypes[tidx]][akey] = aval else: for tk,tv in pperes[uidx].iteritems(): uvestruct[tk] = json.loads(tv) if ackfilter is not None: if "UVEAlarms" in uvestruct and \ "alarms" in uvestruct["UVEAlarms"]: alarms = [] for alarm in uvestruct["UVEAlarms"]["alarms"]: ack = "false" if "ack" in alarm: if alarm["ack"]: ack = "true" else: ack = "false" if ack == ackfilter: alarms.append(alarm) if not len(alarms): del uvestruct["UVEAlarms"] else: uvestruct["UVEAlarms"]["alarms"] = alarms if len(uvestruct) != 0: if keysonly: brsp[luves[uidx]] = None else: brsp[luves[uidx]] = uvestruct return brsp
def run(self): exitrun = False while not exitrun: for rkey in self._redis_uve_map.keys(): rinst = self._redis_uve_map[rkey] old_pid = rinst.collector_pid try: # check if it is marked as deleted during sighup handling if rinst.deleted == True: r_ip = rkey[0] r_port = rkey[1] del self._redis_uve_map[rkey] ConnectionState.delete(ConnectionType.REDIS_UVE,\ r_ip+":"+str(r_port)) continue if rinst.redis_handle is None: rinst.redis_handle = StrictRedisWrapper( host=rkey.ip, port=rkey.port, password=self._redis_password, db=1, socket_timeout=30, **self._redis_ssl_params) rinst.collector_pid = None # check for known collector pid string # if there's a mismatch, we must read it again if rinst.collector_pid is not None: if not rinst.redis_handle.sismember( "NGENERATORS", rinst.collector_pid): rinst.collector_pid = None # read the collector pid string if rinst.collector_pid is None: for gen in rinst.redis_handle.smembers("NGENERATORS"): module = gen.split(':')[2] if module == "contrail-collector": rinst.collector_pid = gen except gevent.GreenletExit: self._logger.error('UVEServer Exiting on gevent-kill') exitrun = True break except Exception as e: self._logger.debug("redis/collector healthcheck failed %s for %s" \ % (str(e), str(rkey))) rinst.redis_handle = None rinst.collector_pid = None finally: # Update redis/collector health ''' when rinst.redis_handle is none, redis down when rkey.ip not in collectors, collector down if redis and collector are up or down, state should be up if redis is up, state should be up if redis is down but collector is up, the state shoue be down ''' if rkey in self._redis_uve_map.keys(): if rinst.redis_handle is None: if rkey.ip != '127.0.0.1': rkey_fqdn = socket.getfqdn(rkey.ip) else: rkey_fqdn = socket.getfqdn() if (self._active_collectors is not None and rkey_fqdn not in self._active_collectors): ConnectionState.update(ConnectionType.REDIS_UVE,\ rkey.ip + ":" + str(rkey.port), ConnectionStatus.UP, [rkey.ip+":"+str(rkey.port)],"Redis Instance is Up") else: ConnectionState.update(ConnectionType.REDIS_UVE,\ rkey.ip + ":" + str(rkey.port), ConnectionStatus.DOWN, [rkey.ip+":"+str(rkey.port)],"Redis Instance is Down") else: ConnectionState.update(ConnectionType.REDIS_UVE,\ rkey.ip + ":" + str(rkey.port), ConnectionStatus.UP, [rkey.ip+":"+str(rkey.port)]) if not exitrun: gevent.sleep(self._freq)