def defender_policy_edit(request, object_id=None): policy = None if object_id: try: policy = DefenderPolicy.objects.get(pk=object_id) except ObjectDoesNotExist: return HttpResponseForbidden("Injection detected") form = DefenderPolicyForm(request.POST or None, instance=policy, error_class=DivErrorList) if request.method == "POST" and form.is_valid(): # Save the form to get an id if there is not already one policy = form.save(commit=False) """ If no error in filter forms """ if request.method == "POST" and not form.errors: # Save the policy before filters policy.save() # If it's a new object if not object_id: # Write backend defender config Cluster.api_request("darwin.defender_policy.policy.write_defender_backend_conf", policy.id) Cluster.api_request("darwin.defender_policy.policy.write_defender_conf", policy.id) # If everything succeed, redirect to list view return HttpResponseRedirect('/darwin/defender_policy/') return render(request, 'defender_policy_edit.html', {'form': form})
def save_conf(self): params = [self.get_filename(), self.rulebase, RSYSLOG_OWNER, RSYSLOG_PERMS] try: Cluster.api_request('system.config.models.write_conf', config=params) except Exception as e: # e used by VultureSystemConfigError logger.error(e, exc_info=1) raise VultureSystemConfigError("on cluster.\nRequest failure to write_conf()")
def post(self, request, object_id, **kwargs): confirm = request.POST.get('confirm') if confirm == 'yes': try: obj_inst = self.obj.objects.get(pk=object_id) except ObjectDoesNotExist: return HttpResponseForbidden('Injection detected.') logger.info( "Deleting filter policy configuration files associated with Darwin policy..." ) filter_confs = [] for filter_policy in obj_inst.filterpolicy_set.all(): filter_confs.append(filter_policy.conf_path) try: obj_inst.delete() for filter_policy_conf in filter_confs: Cluster.api_request( "services.darwin.darwin.delete_policy_conf", filter_policy_conf) Cluster.api_request("services.darwin.darwin.build_conf") except ProtectedError as e: error = "Policy is still used. Cannot remove" return HttpResponseRedirect(self.redirect_url)
def delete(self, delete=True): """ Delete file on disk on all nodes """ from system.cluster.models import Cluster if delete: Cluster.api_request("system.config.models.delete_conf", self.absolute_filename) super().delete()
def acme_update(): """ :return: Run acme.sh to automatically renew Let's encrypt certificates """ subprocess.check_output([ "/usr/local/sbin/acme.sh", "--cron", "--home", "/var/db/acme/.acme.sh" ]) """ Now update certificate database""" need_restart = False for cert in X509Certificate.objects.filter(is_vulture_ca=False, is_external=True): tmp_crt = X509.load_cert_string(cert.cert) cn = str(tmp_crt.get_subject()).replace("/CN=", "") if os.path.isfile("/var/db/acme/.acme.sh/{}/{}.cer".format(cn, cn)): with open("/var/db/acme/.acme.sh/{}/{}.cer".format( cn, cn)) as file_cert: pem_cert = file_cert.read() cert.cert = pem_cert cert.save() """ Update cert on cluster """ cert.save_conf() need_restart = True if need_restart: Cluster.api_request("services.haproxy.haproxy.reload_service")
def netif_refresh(request): """ Read system configuration and refresh NIC list :param request: :return: """ Cluster.api_request('toolkit.network.network.refresh_nic') return HttpResponseRedirect('/system/netif/')
def post(self, request, object_id, **kwargs): confirm = request.POST.get('confirm') if confirm == 'yes': DefenderPolicy.objects.get(pk=object_id).delete() Cluster.api_request( "darwin.defender_policy.policy.delete_defender_conf", object_id) return HttpResponseRedirect(self.redirect_url)
def _create_or_update_filters(policy, filters_list): new_filters = [] bufferings =[] current_filters = FilterPolicy.objects.filter(policy_id=policy.pk) for filt in filters_list: try: filt['policy'] = policy filt['filter_type'] = DarwinFilter.objects.get(id=filt.get('filter_type', 0)) except DarwinFilter.DoesNotExist: logger.error(f"Error while creating/updating filter for darwin policy : DarwinFilter id '{filt.get('filter_type', 0)}' does not exist") return f"unknown filter type {filt.get('filter_type', 0)}" buffering_opts = filt.pop('buffering', None) try: filt_id = filt.pop('id', 0) if filt_id != 0: filter_instance, _ = FilterPolicy.objects.update_or_create( id=filt_id, defaults=filt) else: filter_instance = FilterPolicy(**filt) filter_instance.status = {node.name: "STARTING" for node in Node.objects.all().only('name')} filter_instance.full_clean() new_filters.append(filter_instance) if buffering_opts: bufferings.append((filter_instance, buffering_opts)) except (ValidationError, ValueError, TypeError) as e: logger.error(str(e), exc_info=1) return str(e) for filter_instance in new_filters: filter_instance.save() filters_delete = set(current_filters) - set(new_filters) for filter_delete in filters_delete: Cluster.api_request("services.darwin.darwin.delete_filter_conf", filter_delete.conf_path) filter_delete.delete() try: for filter_instance, buffering_opts in bufferings: DarwinBuffering.objects.update_or_create( destination_filter=filter_instance, defaults={ 'interval': buffering_opts.get('interval'), 'required_log_lines': buffering_opts.get('required_log_lines'), } ) except Exception as e: logger.error(e, exc_info=1) return 'error while creating darwin buffering: {}'.format(e) return ""
def save_conf(self): """ Write configuration on disk """ if not self.configuration: return params = [self.get_filename(), self.configuration, BACKEND_OWNER, BACKEND_PERMS] try: Cluster.api_request('system.config.models.write_conf', config=params) except Exception as e: # e used by VultureSystemConfigError logger.error(e, exc_info=1) raise VultureSystemConfigError("on cluster.\nRequest failure to write_conf()")
def netdata_edit(request, api=False, update=False): """ Netdata view Allow to edit the history settings for netdata config request (Django Request) api Is this a django api Returns: TYPE: Django view """ service = NetdataService() status = {} try: for node in Node.objects.all(): status[node.name] = service.last_status()[0] except AttributeError: status[node.name] = "Error" try: netdata_model = NetdataSettings.objects.get() except NetdataSettings.DoesNotExist: netdata_model = NetdataSettings() if hasattr(request, "JSON") and api: if update: request.JSON = {**netdata_model.to_dict(), **request.JSON} netdata_form = NetdataForm(request.JSON or None, instance=netdata_model, error_class=DivErrorList) else: netdata_form = NetdataForm(request.POST or None, instance=netdata_model, error_class=DivErrorList) if request.method in ("POST", "PUT", "PATCH"): if netdata_form.is_valid(): netdata_model = netdata_form.save(commit=False) netdata_model.save() Cluster.api_request(action="services.netdata.netdata.reload_conf") elif api: return JsonResponse(netdata_form.errors.get_json_data(), status=400) if api: return build_response_netdata("services.netdata.api", COMMAND_LIST) return render(request, 'services/netdata.html', { 'netdata_form': netdata_form, 'status': status })
def delete(self, request, object_id=None): try: if object_id: try: policy = DarwinPolicy.objects.get(pk=object_id) except DarwinPolicy.DoesNotExist: return JsonResponse({ 'error': _('Object does not exist') }, status=404) filter_conf_paths = [obj.conf_path for obj in policy.filterpolicy_set.all()] try: policy.delete() except ProtectedError as e: logger.error("Error trying to delete Darwin policy '{}': policy is still being used".format(policy.name)) logger.exception(e) return JsonResponse({ "error": _("Object is still used with the following objects: {}".format([str(obj) for obj in e.protected_objects])) }, status=409) for filter_conf_path in filter_conf_paths: Cluster.api_request("services.darwin.darwin.delete_filter_conf", filter_conf_path) for frontend in policy.frontend_set.all(): for node in frontend.get_nodes(): node.api_request("services.rsyslogd.rsyslog.build_conf", frontend.pk) Cluster.api_request("services.darwin.darwin.reload_conf") else: return JsonResponse({ "error": _("You must provide an id") }, status=400) return JsonResponse({ 'status': True }, status=200) except Exception as e: logger.critical(e, exc_info=1) error = _("An error has occurred") if settings.DEV_MODE: error = str(e) return JsonResponse({ 'error': error }, status=500)
def save_conf(self): """ Write configuration on disk """ params = [ self.absolute_filename, self.download_file(), DATABASES_OWNER, DATABASES_PERMS ] try: Cluster.api_request('system.config.models.write_conf', config=params) except Exception as e: # e used by VultureSystemConfigError raise VultureSystemConfigError( "on cluster.\n" "Request failure to write conf of Reputation context '{}'". format(self.name))
def save_policy_file(self): params = [ self.get_full_filename(), self.generate_content(), DARWIN_OWNERS, DARWIN_PERMS ] try: logger.debug( "InspectionPolicy::save_policy_file:: calling api to save inspection policy file" ) Cluster.api_request('system.config.models.write_conf', config=params) except Exception as e: raise VultureSystemConfigError( "InspectionPolicy::save_policy_file:: failure to save inspection policy file" )
def save_portal_template(data, instance): form = PortalTemplateForm(data, instance=instance) if not form.is_valid(): return JsonResponse({"error": form.errors}, status=400) obj = form.save() # Reload template for all portals that uses this template for portal in obj.userauthentication_set.all().only('pk'): Cluster.api_request("authentication.user_portal.api.write_templates", portal.id) return JsonResponse({ "message": _("Portal Template saved"), "object": obj.to_dict() })
def to_template(self): """ Dictionary used to create configuration file :return Dictionnary of configuration parameters """ """ First, use to_mongo() internal django function """ return {'global_config': Cluster.get_global_config()}
def rss_fetch(): if not Cluster.get_current_node().is_master_mongo: logger.debug( "Crontab::rss_fetch: Not the master node, passing RSS fetch") return proxy = get_proxy() try: rss_uri = "https://predator.vultureproject.org/news.json" infos = requests.get(rss_uri, proxies=proxy).json() logger.debug("Crontab::rss_fetch: Received {} RSS feed".format( len(infos))) for info in infos: try: RSS.objects.get(title=info['title']) except RSS.DoesNotExist: RSS.objects.create(title=info['title'], date=timezone.make_aware( datetime.datetime.strptime( info['timestamp'], "%d/%m/%Y %H:%M:%S")), level=info['level'], content=info["content"]) except Exception as e: logger.error("Crontab::rss_fetch: {}".format(e), exc_info=1) raise
def netdata_reload(request, api=False): if not request.is_ajax() and not api: return HttpResponseBadRequest() try: res = Cluster.api_request("services.netdata.netdata.reload_conf") if not res.get('status'): res['error'] = res.pop('message', "") status = 500 else: status = 202 if not res.get('message'): res['message'] = _( "Reloading service. Please wait a moment please...") return JsonResponse(res, status=status) except Exception as e: # If API request failure, bring up the error logger.error("Error trying to reload netdata config: '{}'".format( str(e))) return JsonResponse( { 'status': False, 'error': "API request failure : ".format(str(e)), 'error_details': str.join('', format_exception(*exc_info())) }, status=500)
def check_internal_tasks(): try: # Run this crontab only on master node node = Cluster.get_current_node() if not node.is_master_mongo: return # Deleting done internal tasks older than a month last_month_date = make_aware(datetime.datetime.now() - datetime.timedelta(days=30)) MessageQueue.objects.filter(status="done", date_add__lte=last_month_date).delete() # Checking if a node has not executing his duty since a while. # If so, removing it from the cluster message_queue_not_finished = MessageQueue.objects.filter(date_add__lt=last_month_date, status="new") node_to_remove = [] for message in message_queue_not_finished: if message.node not in node_to_remove: node_to_remove.append(message.node) message.delete() for n in node_to_remove: logger.info('[REMOVING DEAD NODE FROM CLUSTER] Node: {}'.format(n.name)) c = MongoBase() c.connect_primary() c.repl_remove(n.name + ":9091") except Exception as e: logger.error("Crontab::check_internal_tasks: {}".format(e), exc_info=1) raise
def inner(cls_or_request, *args, **kwargs): request = None if not isinstance(cls_or_request, HttpRequest): if not isinstance(args[0], HttpRequest): logger.error( "API Call without request object : {} and {}".format( cls_or_request, request)) return HttpResponseForbidden() else: request = args[0] else: request = cls_or_request global_config = Cluster().get_global_config() api_key = request.META.get("HTTP_" + key_name.upper()) if getattr(global_config, key_name.replace('-', '_')) and \ getattr(global_config, key_name.replace('-', '_')) == api_key: return func(request, *args, **kwargs) logger.error('API Call without valid API key. Method (%s): %s', request.method, request.path, extra={ 'status_code': 405, 'request': request }) return HttpResponseForbidden()
def netif_refresh(request): """ Read system configuration and refresh NIC list :param request: :return: """ if request.method != "POST": return JsonResponse({ 'status': False, 'message': 'waiting for POST request' }) try: Cluster.api_request('toolkit.network.network.refresh_nic') except Exception as e: return JsonResponse({'error': str(e)}, status=500) return JsonResponse({'status': True})
def configure_pstats(node_logger): """ Pstats configuration """ node = Cluster.get_current_node() jinja2_env = Environment(loader=FileSystemLoader(JINJA_PATH)) pstats_template = jinja2_env.get_template("pstats.conf") write_conf(node_logger, ["{}/pstats.conf".format(RSYSLOG_PATH), pstats_template.render({'node': node}), RSYSLOG_OWNER, RSYSLOG_PERMS]) return "Rsyslog configuration 'pstats.conf' written.\n"
def update_crl(): """ :return: Update internal vulture's CRL """ if Cluster.get_current_node().is_master_mongo: for cert in X509Certificate.objects.filter(status='V'): cert.gen_crl() return True
def save_keytab(self): """ Write keytab on host to be used This function raise VultureSystemConfigError if failure """ """ API request """ api_res = Cluster.api_request( 'toolkit.auth.kerberos_client.write_keytabs') if not api_res.get('status'): raise VultureSystemSaveError("keytab. API request failure ", traceback=api_res.get('message'))
def cluster_add(request): slave_ip = request.POST.get('slave_ip') slave_name = request.POST.get('slave_name') # FIXME: improve security check (valid IPv4 / IPv6 and valid name) if not slave_name or not slave_ip: return JsonResponse({'status': False, 'message': 'Invalid call'}) """ Make the slave_name resolvable """ node = Cluster.get_current_node() node.api_request("toolkit.network.network.make_hostname_resolvable", (slave_name, slave_ip)) """ Now the slave should be in the cluster: Add it's management IP """ node = Node() node.name = slave_name node.management_ip = slave_ip node.internet_ip = slave_ip node.save() # We need to wait for the VultureD daemon to reload PF Conf time.sleep(6) """ Add NEW node into the REPLICASET, as a pending member """ c = MongoBase() c.connect() cpt = 0 response = None while not response: try: logger.debug("Adding {} to replicaset".format(slave_name)) response = c.repl_add(slave_name + ':9091') except Exception as e: logger.error( "Cannot connect to slave for the moment : {}".format(e)) cpt += 1 if cpt > 10: logger.error( "Failed to connect to the slave 10 times, aborting.") return JsonResponse({ 'status': False, 'message': 'Error during repl_add. Check logs' }) logger.info("Waiting for next connection to slave ...") time.sleep(1) if response: node.api_request('toolkit.network.network.refresh_nic') return JsonResponse({'status': True, 'message': 'ok'}) else: return JsonResponse({ 'status': False, 'message': 'Error during repl_add. Check logs' })
def configure_node(node_logger): """ Generate and write netdata conf files """ result = "" node = Cluster.get_current_node() global_config = Cluster.get_global_config() """ For each Jinja templates """ jinja2_env = Environment(loader=FileSystemLoader(JINJA_PATH)) for template_name in jinja2_env.list_templates(): """ Perform only "rsyslog_template_*.conf" templates """ match = re_search("^rsyslog_template_([^\.]+)\.conf$", template_name) if not match: continue template = jinja2_env.get_template(template_name) template_path = "{}/05-tpl-01-{}.conf".format(RSYSLOG_PATH, match.group(1)) """ Generate and write the conf depending on all nodes, and current node """ write_conf(node_logger, [ template_path, template.render({ 'node': node, 'global_config': global_config }), RSYSLOG_OWNER, RSYSLOG_PERMS ]) result += "Rsyslog template '{}' written.\n".format(template_path) """ PF configuration for Rsyslog """ pf_template = jinja2_env.get_template("pf.conf") write_conf(node_logger, [ "{}/pf.conf".format(RSYSLOG_PATH), pf_template.render({'mongodb_uri': MongoBase.get_replicaset_uri()}), RSYSLOG_OWNER, RSYSLOG_PERMS ]) result += "Rsyslog template 'pf.conf' written.\n" """ If this method has been called, there is a reason - a Node has been modified so we need to restart Rsyslog because at least PF conf has been changed """ # if Frontend.objects.filter(enable_logging=True).count() > 0: # node_logger.debug("Logging enabled, reload of Rsyslog needed.") restart_service(node_logger) node_logger.info("Rsyslog service restarted.") result += "Rsyslogd service restarted." return result
def refresh_nic(logger): """ Used by API calls to update mongodb with new system NIC / addresses :param node: node :return: """ from system.cluster.models import Cluster node = Cluster.get_current_node() return node.synchronizeNICs()
def delete_conf(self): api_res = {'status': True} for error_code in [400, 403, 405, 408, 425, 429, 500, 502, 503, 504]: api_res = Cluster.api_request("system.config.models.delete_conf", self.get_base_filename(error_code)) if not api_res.get('status'): # If error, return-it return api_res # Return True if no API error return {'status': True}
def to_template(self): """ Dictionary used to create configuration file :return Dictionnary of configuration parameters """ return { 'nodes': Node.objects.exclude(name=get_hostname()), 'global_config': Cluster.get_global_config(), 'jail_addresses': JAIL_ADDRESSES, 'databases_path': DATABASES_PATH, 'defender_policy_list': DefenderPolicy.objects.all(), 'proxy': get_sanitized_proxy() }
def reload_conf(self): """ Write new PF configuration, if needed :return: True / False """ conf_reloaded = super().reload_conf() config_model = Cluster.get_global_config() """ Check if firehol and vulture netsets exist """ filepath = DATABASES_PATH + "/firehol_level1.netset" if not os_path.isfile(filepath): write_conf(logger, [filepath, "", DATABASES_OWNER, DATABASES_PERMS]) filepath = DATABASES_PATH + "/vulture-v4.netset" if not os_path.isfile(filepath): write_conf(logger, [filepath, "", DATABASES_OWNER, DATABASES_PERMS]) filepath = DATABASES_PATH + "/vulture-v6.netset" if not os_path.isfile(filepath): write_conf(logger, [filepath, "", DATABASES_OWNER, DATABASES_PERMS]) """ Check if Whitelist and Blacklist has changed """ wl_bl = { 'pf.whitelist.conf': config_model.pf_whitelist, 'pf.blacklist.conf': config_model.pf_blacklist, } for filename, liste in wl_bl.items(): file_path = '{}{}'.format(PF_PATH, filename) config = "\n".join(liste.split(',')) md5_config = md5(config.encode('utf-8')).hexdigest().strip() md5sum = "" try: result = check_output(['/sbin/md5', file_path], stderr=PIPE).decode('utf8') md5sum = result.strip().split('= ')[1] except CalledProcessError as e: stderr = e.stderr.decode('utf8') logger.error("Failed to md5sum file '{}' : {}".format( filename, stderr)) """ If there was an error, bad permissions on file, rewrite-it with correct ones """ if md5_config != md5sum: conf_reloaded = True logger.info( 'Packet Filter {} need to be rewrite'.format(filename)) write_conf(logger, [file_path, config, PF_OWNERS, PF_PERMS]) return conf_reloaded
def write_conf(self): api_res = {'status': True} for error_code in [400, 403, 405, 408, 425, 429, 500, 502, 503, 504]: mode = getattr(self, "error_{}_mode".format(error_code)) if mode == "display": api_res = Cluster.api_request( "system.config.models.write_conf", [ self.get_filename(error_code), getattr(self, "error_{}_html".format(error_code)), TEMPLATE_OWNER, TEMPLATE_PERMS ]) # Return the last API request result return api_res