def check_internal_tasks(): try: # Run this crontab only on master node node = Cluster.get_current_node() if not node.is_master_mongo: return # Deleting done internal tasks older than a month last_month_date = make_aware(datetime.datetime.now() - datetime.timedelta(days=30)) MessageQueue.objects.filter(status="done", date_add__lte=last_month_date).delete() # Checking if a node has not executing his duty since a while. # If so, removing it from the cluster message_queue_not_finished = MessageQueue.objects.filter(date_add__lt=last_month_date, status="new") node_to_remove = [] for message in message_queue_not_finished: if message.node not in node_to_remove: node_to_remove.append(message.node) message.delete() for n in node_to_remove: logger.info('[REMOVING DEAD NODE FROM CLUSTER] Node: {}'.format(n.name)) c = MongoBase() c.connect_primary() c.repl_remove(n.name + ":9091") except Exception as e: logger.error("Crontab::check_internal_tasks: {}".format(e), exc_info=1) raise
def reconcile(): # MONGO # m = MongoBase() if not m.connect(): return False m.connect_primary() # REDIS # r = RedisBase() master_node = r.get_master() r = RedisBase(node=master_node) redis_list_name = "logs_darwin" ignored_alerts = list() rangeLen = r.redis.llen(redis_list_name) alerts = r.redis.lrange(redis_list_name, "0", str(rangeLen - 1)) r.redis.ltrim(redis_list_name, str(rangeLen), "-1") for alert in alerts: alert = str(alert, "utf-8") a = json.loads(alert) evt_id = a.get("evt_id") if evt_id is None: ignored_alerts.append(a) continue query = {"darwin_id": evt_id} newvalue = { "$set": { "darwin_alert_details": a, "darwin_is_alert": True } } m.update_one("logs", query, newvalue) return True
def remove_session_and_logs_filters(apps, schema_editor): # Manually delete all Darwin filters to prevent migration issue, they will be re-created in loaddata m = MongoBase() m.connect_primary() # If the node is not yet installed, no need to drop collections if m.db and m.db['vulture']: coll = m.db['vulture']['darwin_filterpolicy'] if coll: coll.delete_many({}) coll = m.db['vulture']['darwin_darwinfilter'] if coll: coll.delete_many({})
def is_standalone(self): """ Check if the current Node is a member of mongoDB :return: True / False, or None in case of a failure """ c = MongoBase() ok = c.connect() if ok: c.connect_primary() config = c.db.admin.command("replSetGetConfig")['config'] return len(config['members']) == 1 return True
def post(self, request, object_id, **kwargs): confirm = request.POST.get('confirm') if confirm == 'yes': try: obj_inst = self.obj.objects.get(pk=object_id) except ObjectDoesNotExist: return HttpResponseForbidden("Injection detected") """ Before Deleting the node we need to remove it from mongoDB """ c = MongoBase() c.connect() c.connect_primary() c.repl_remove(obj_inst.name + ":9091") """ Before Deleting the node we need to remove it from Redis """ c = RedisBase(obj_inst.management_ip) c.slave_of('NO', 'ONE') # Fixme: Cleanup Sentinel ? """ Let's rock """ obj_inst.delete() return HttpResponseRedirect(self.redirect_url)
def update_repo_attributes(apps, schema_editor): # Manually delete all Darwin filters to prevent migration issue, they will be re-created in loaddata m = MongoBase() m.connect_primary() # If the node is not yet installed, no need to drop collections if m.db and m.db['vulture']: coll = m.db['vulture']['authentication_userauthentication'] if coll: for portal in coll.find(): repo_attributes = [{ 'condition_var_kind': "constant", 'condition_var_name': "1", 'condition_criterion': "equals", 'condition_match': "1", 'action_var_name': r['key'], 'action_var_kind': r['source_attr'], 'action_var': r['key'] } for r in portal.get('repo_attributes', [])] coll.update_one({'id': portal['id']}, {'$set': { 'repo_attributes': repo_attributes }}) print("Portal {} updated".format(portal['name']))
def reconcile(self): node = Cluster.get_current_node() if not node.is_master_mongo: return False mongo = MongoBase() if not mongo.connect(): return False mongo.connect_primary() redis = RedisBase() master_node = redis.get_master() redis = RedisBase(node=master_node) filepath = ALERTS_FILE # Pops alerts produced when vulture was down # Do not retry, as there is likely no cache for remaining alerts in current Redis self.pops(mongo, redis, filepath, max_tries=1) if self.shutdown_flag.is_set(): return True redis_channel = REDIS_CHANNEL listener = redis.redis.pubsub() listener.subscribe([redis_channel]) logger.info( "Reconcile: start listening {} channel.".format(redis_channel)) while not self.shutdown_flag.is_set(): alert = listener.get_message(ignore_subscribe_messages=True, timeout=2) # If we have no messages, alert is None if alert: # Only use the channel to trigger popping alerts self.pops(mongo, redis, filepath) return True
def set_enrichment_tags(apps, schema_editor): mongo = MongoBase() if not mongo.connect(): print("[ERROR] could not connect to mongo to update data !!") return if not mongo.connect_primary(): print( "[ERROR] could not connect to mongo primary, please reload migration" ) return mongo.update_many('vulture', 'darwin_filterpolicy', {}, {"$set": { "enrichment_tags": [] }})