def cluster_stepdown(request, object_id, api=False): """ Ask the primary node to become secondary so that a promotion can occur This is an API request :param request: :param object_id: :param api: :return: """ if not request.is_ajax() and not api: return HttpResponseBadRequest() try: node_model = Node.objects.get(pk=object_id) except ObjectDoesNotExist: if api: return JsonResponse({'error': _("Object does not exist.")}, status=404) return HttpResponseForbidden("Injection detected") c = MongoBase() c.connect() # If the asked node is not primary, return error if c.get_primary() != node_model.name + ":9091": return JsonResponse({ 'status': False, 'error': _("Cannot step down a non-primary node.") }) status, message = c.repl_set_step_down( ) # Automagically connect to the primary node return JsonResponse({'status': status, 'message': message})
def cluster_add(request): slave_ip = request.POST.get('slave_ip') slave_name = request.POST.get('slave_name') # FIXME: improve security check (valid IPv4 / IPv6 and valid name) if not slave_name or not slave_ip: return JsonResponse({'status': False, 'message': 'Invalid call'}) """ Make the slave_name resolvable """ node = Cluster.get_current_node() node.api_request("toolkit.network.network.make_hostname_resolvable", (slave_name, slave_ip)) """ Now the slave should be in the cluster: Add it's management IP """ node = Node() node.name = slave_name node.management_ip = slave_ip node.internet_ip = slave_ip node.save() # We need to wait for the VultureD daemon to reload PF Conf time.sleep(6) """ Add NEW node into the REPLICASET, as a pending member """ c = MongoBase() c.connect() cpt = 0 response = None while not response: try: logger.debug("Adding {} to replicaset".format(slave_name)) response = c.repl_add(slave_name + ':9091') except Exception as e: logger.error( "Cannot connect to slave for the moment : {}".format(e)) cpt += 1 if cpt > 10: logger.error( "Failed to connect to the slave 10 times, aborting.") return JsonResponse({ 'status': False, 'message': 'Error during repl_add. Check logs' }) logger.info("Waiting for next connection to slave ...") time.sleep(1) if response: node.api_request('toolkit.network.network.refresh_nic') return JsonResponse({'status': True, 'message': 'ok'}) else: return JsonResponse({ 'status': False, 'message': 'Error during repl_add. Check logs' })
def reconcile(): # MONGO # m = MongoBase() if not m.connect(): return False m.connect_primary() # REDIS # r = RedisBase() master_node = r.get_master() r = RedisBase(node=master_node) redis_list_name = "logs_darwin" ignored_alerts = list() rangeLen = r.redis.llen(redis_list_name) alerts = r.redis.lrange(redis_list_name, "0", str(rangeLen - 1)) r.redis.ltrim(redis_list_name, str(rangeLen), "-1") for alert in alerts: alert = str(alert, "utf-8") a = json.loads(alert) evt_id = a.get("evt_id") if evt_id is None: ignored_alerts.append(a) continue query = {"darwin_id": evt_id} newvalue = { "$set": { "darwin_alert_details": a, "darwin_is_alert": True } } m.update_one("logs", query, newvalue) return True
def cluster_join(request, object_id, api=False): """ Join a node into the MongoDB replicaset This is an API request """ if not request.is_ajax() and not api: return HttpResponseBadRequest() try: node_model = Node.objects.get(pk=object_id) except ObjectDoesNotExist: return HttpResponseForbidden("Injection detected") c = MongoBase() c.connect() # Automagically connect to the primary node status, message = c.repl_add(node_model.name + ':9091') return JsonResponse({'status': status, 'message': message})
def post(self, request, object_id, **kwargs): confirm = request.POST.get('confirm') if confirm == 'yes': try: obj_inst = self.obj.objects.get(pk=object_id) except ObjectDoesNotExist: return HttpResponseForbidden("Injection detected") """ Before Deleting the node we need to remove it from mongoDB """ c = MongoBase() c.connect() c.connect_primary() c.repl_remove(obj_inst.name + ":9091") """ Before Deleting the node we need to remove it from Redis """ c = RedisBase(obj_inst.management_ip) c.slave_of('NO', 'ONE') # Fixme: Cleanup Sentinel ? """ Let's rock """ obj_inst.delete() return HttpResponseRedirect(self.redirect_url)
def is_standalone(self): """ Check if the current Node is a member of mongoDB :return: True / False, or None in case of a failure """ c = MongoBase() ok = c.connect() if ok: c.connect_primary() config = c.db.admin.command("replSetGetConfig")['config'] return len(config['members']) == 1 return True
def set_logs_ttl(self): """ Set keep-time of internal logs database by setting MongoDB indexes on PF, messageQueues and Internal logs """ # Connect to mongodb mongo = MongoBase() mongo.connect() # Call the current node, it will connect to primary automatically res, mess = mongo.set_index_ttl("logs", "pf", "time", self.logs_ttl) if not res: return res, mess res, mess = mongo.set_index_ttl("logs", "internal", "timestamp", self.logs_ttl) if not res: return res, mess res, mess = mongo.set_index_ttl("vulture", "system_messagequeue", "modified", self.logs_ttl) if not res: return res, mess res, mess = mongo.set_index_ttl("vulture", "system_messagequeue", "date_add", self.logs_ttl) if not res: return res, mess return True, ""
def set_enrichment_tags(apps, schema_editor): mongo = MongoBase() if not mongo.connect(): print("[ERROR] could not connect to mongo to update data !!") return if not mongo.connect_primary(): print( "[ERROR] could not connect to mongo primary, please reload migration" ) return mongo.update_many('vulture', 'darwin_filterpolicy', {}, {"$set": { "enrichment_tags": [] }})
def is_master_mongo(self): """ Check if the current Node is master or not :return: True / False, or None in case of a failure """ c = MongoBase() ok = c.connect() if ok: primary_node = c.get_primary() else: return None if ok and primary_node == self.name + ':9091': return True elif ok: return False return None
def reconcile(self): node = Cluster.get_current_node() if not node.is_master_mongo: return False mongo = MongoBase() if not mongo.connect(): return False mongo.connect_primary() redis = RedisBase() master_node = redis.get_master() redis = RedisBase(node=master_node) filepath = ALERTS_FILE # Pops alerts produced when vulture was down # Do not retry, as there is likely no cache for remaining alerts in current Redis self.pops(mongo, redis, filepath, max_tries=1) if self.shutdown_flag.is_set(): return True redis_channel = REDIS_CHANNEL listener = redis.redis.pubsub() listener.subscribe([redis_channel]) logger.info( "Reconcile: start listening {} channel.".format(redis_channel)) while not self.shutdown_flag.is_set(): alert = listener.get_message(ignore_subscribe_messages=True, timeout=2) # If we have no messages, alert is None if alert: # Only use the channel to trigger popping alerts self.pops(mongo, redis, filepath) return True
old_hostname = "" new_hostname = "" # Check args if len(sys.argv) != 1 and len(sys.argv) != 3: print("Replica_rename:: Usage: {} [old_hostname new_hostname]".format( sys.argv[0])) exit(1) elif len(sys.argv) == 3: old_hostname = sys.argv[1] new_hostname = sys.argv[2] # MongoDB - rename of node c = MongoBase() # Connect to the current renamed node c.connect(node=new_hostname + ":9091", primary=False) c.connect(node=new_hostname + ":9091", primary=False) print("Connected to {}".format(new_hostname)) # Automagically connect to the primary node res = c.repl_rename(old_hostname, new_hostname) print("Node renamed.") node = Node.objects.get(name=old_hostname) node.name = new_hostname node.save() logfwd = LogOMMongoDB.objects.get(internal=True) logfwd.uristr = c.get_replicaset_uri() logfwd.save() # Update node name in services config