def save_conf(self): params = [self.get_filename(), self.rulebase, RSYSLOG_OWNER, RSYSLOG_PERMS] try: Cluster.api_request('system.config.models.write_conf', config=params) except Exception as e: # e used by VultureSystemConfigError logger.error(e, exc_info=1) raise VultureSystemConfigError("on cluster.\nRequest failure to write_conf()")
def acme_update(): """ :return: Run acme.sh to automatically renew Let's encrypt certificates """ subprocess.check_output([ "/usr/local/sbin/acme.sh", "--cron", "--home", "/var/db/acme/.acme.sh" ]) """ Now update certificate database""" need_restart = False for cert in X509Certificate.objects.filter(is_vulture_ca=False, is_external=True): tmp_crt = X509.load_cert_string(cert.cert) cn = str(tmp_crt.get_subject()).replace("/CN=", "") if os.path.isfile("/var/db/acme/.acme.sh/{}/{}.cer".format(cn, cn)): with open("/var/db/acme/.acme.sh/{}/{}.cer".format( cn, cn)) as file_cert: pem_cert = file_cert.read() cert.cert = pem_cert cert.save() """ Update cert on cluster """ cert.save_conf() need_restart = True if need_restart: Cluster.api_request("services.haproxy.haproxy.reload_service")
def delete(self, delete=True): """ Delete file on disk on all nodes """ from system.cluster.models import Cluster if delete: Cluster.api_request("system.config.models.delete_conf", self.absolute_filename) super().delete()
def post(self, request, object_id, **kwargs): confirm = request.POST.get('confirm') if confirm == 'yes': try: obj_inst = self.obj.objects.get(pk=object_id) except ObjectDoesNotExist: return HttpResponseForbidden('Injection detected.') logger.info( "Deleting filter policy configuration files associated with Darwin policy..." ) filter_confs = [] for filter_policy in obj_inst.filterpolicy_set.all(): filter_confs.append(filter_policy.conf_path) try: obj_inst.delete() for filter_policy_conf in filter_confs: Cluster.api_request( "services.darwin.darwin.delete_policy_conf", filter_policy_conf) Cluster.api_request("services.darwin.darwin.build_conf") except ProtectedError as e: error = "Policy is still used. Cannot remove" return HttpResponseRedirect(self.redirect_url)
def defender_policy_edit(request, object_id=None): policy = None if object_id: try: policy = DefenderPolicy.objects.get(pk=object_id) except ObjectDoesNotExist: return HttpResponseForbidden("Injection detected") form = DefenderPolicyForm(request.POST or None, instance=policy, error_class=DivErrorList) if request.method == "POST" and form.is_valid(): # Save the form to get an id if there is not already one policy = form.save(commit=False) """ If no error in filter forms """ if request.method == "POST" and not form.errors: # Save the policy before filters policy.save() # If it's a new object if not object_id: # Write backend defender config Cluster.api_request("darwin.defender_policy.policy.write_defender_backend_conf", policy.id) Cluster.api_request("darwin.defender_policy.policy.write_defender_conf", policy.id) # If everything succeed, redirect to list view return HttpResponseRedirect('/darwin/defender_policy/') return render(request, 'defender_policy_edit.html', {'form': form})
def post(self, request, object_id, **kwargs): confirm = request.POST.get('confirm') if confirm == 'yes': DefenderPolicy.objects.get(pk=object_id).delete() Cluster.api_request( "darwin.defender_policy.policy.delete_defender_conf", object_id) return HttpResponseRedirect(self.redirect_url)
def netif_refresh(request): """ Read system configuration and refresh NIC list :param request: :return: """ Cluster.api_request('toolkit.network.network.refresh_nic') return HttpResponseRedirect('/system/netif/')
def _create_or_update_filters(policy, filters_list): new_filters = [] bufferings =[] current_filters = FilterPolicy.objects.filter(policy_id=policy.pk) for filt in filters_list: try: filt['policy'] = policy filt['filter_type'] = DarwinFilter.objects.get(id=filt.get('filter_type', 0)) except DarwinFilter.DoesNotExist: logger.error(f"Error while creating/updating filter for darwin policy : DarwinFilter id '{filt.get('filter_type', 0)}' does not exist") return f"unknown filter type {filt.get('filter_type', 0)}" buffering_opts = filt.pop('buffering', None) try: filt_id = filt.pop('id', 0) if filt_id != 0: filter_instance, _ = FilterPolicy.objects.update_or_create( id=filt_id, defaults=filt) else: filter_instance = FilterPolicy(**filt) filter_instance.status = {node.name: "STARTING" for node in Node.objects.all().only('name')} filter_instance.full_clean() new_filters.append(filter_instance) if buffering_opts: bufferings.append((filter_instance, buffering_opts)) except (ValidationError, ValueError, TypeError) as e: logger.error(str(e), exc_info=1) return str(e) for filter_instance in new_filters: filter_instance.save() filters_delete = set(current_filters) - set(new_filters) for filter_delete in filters_delete: Cluster.api_request("services.darwin.darwin.delete_filter_conf", filter_delete.conf_path) filter_delete.delete() try: for filter_instance, buffering_opts in bufferings: DarwinBuffering.objects.update_or_create( destination_filter=filter_instance, defaults={ 'interval': buffering_opts.get('interval'), 'required_log_lines': buffering_opts.get('required_log_lines'), } ) except Exception as e: logger.error(e, exc_info=1) return 'error while creating darwin buffering: {}'.format(e) return ""
def save_conf(self): """ Write configuration on disk """ if not self.configuration: return params = [self.get_filename(), self.configuration, BACKEND_OWNER, BACKEND_PERMS] try: Cluster.api_request('system.config.models.write_conf', config=params) except Exception as e: # e used by VultureSystemConfigError logger.error(e, exc_info=1) raise VultureSystemConfigError("on cluster.\nRequest failure to write_conf()")
def netdata_edit(request, api=False, update=False): """ Netdata view Allow to edit the history settings for netdata config request (Django Request) api Is this a django api Returns: TYPE: Django view """ service = NetdataService() status = {} try: for node in Node.objects.all(): status[node.name] = service.last_status()[0] except AttributeError: status[node.name] = "Error" try: netdata_model = NetdataSettings.objects.get() except NetdataSettings.DoesNotExist: netdata_model = NetdataSettings() if hasattr(request, "JSON") and api: if update: request.JSON = {**netdata_model.to_dict(), **request.JSON} netdata_form = NetdataForm(request.JSON or None, instance=netdata_model, error_class=DivErrorList) else: netdata_form = NetdataForm(request.POST or None, instance=netdata_model, error_class=DivErrorList) if request.method in ("POST", "PUT", "PATCH"): if netdata_form.is_valid(): netdata_model = netdata_form.save(commit=False) netdata_model.save() Cluster.api_request(action="services.netdata.netdata.reload_conf") elif api: return JsonResponse(netdata_form.errors.get_json_data(), status=400) if api: return build_response_netdata("services.netdata.api", COMMAND_LIST) return render(request, 'services/netdata.html', { 'netdata_form': netdata_form, 'status': status })
def delete(self, request, object_id=None): try: if object_id: try: policy = DarwinPolicy.objects.get(pk=object_id) except DarwinPolicy.DoesNotExist: return JsonResponse({ 'error': _('Object does not exist') }, status=404) filter_conf_paths = [obj.conf_path for obj in policy.filterpolicy_set.all()] try: policy.delete() except ProtectedError as e: logger.error("Error trying to delete Darwin policy '{}': policy is still being used".format(policy.name)) logger.exception(e) return JsonResponse({ "error": _("Object is still used with the following objects: {}".format([str(obj) for obj in e.protected_objects])) }, status=409) for filter_conf_path in filter_conf_paths: Cluster.api_request("services.darwin.darwin.delete_filter_conf", filter_conf_path) for frontend in policy.frontend_set.all(): for node in frontend.get_nodes(): node.api_request("services.rsyslogd.rsyslog.build_conf", frontend.pk) Cluster.api_request("services.darwin.darwin.reload_conf") else: return JsonResponse({ "error": _("You must provide an id") }, status=400) return JsonResponse({ 'status': True }, status=200) except Exception as e: logger.critical(e, exc_info=1) error = _("An error has occurred") if settings.DEV_MODE: error = str(e) return JsonResponse({ 'error': error }, status=500)
def save_policy_file(self): params = [ self.get_full_filename(), self.generate_content(), DARWIN_OWNERS, DARWIN_PERMS ] try: logger.debug( "InspectionPolicy::save_policy_file:: calling api to save inspection policy file" ) Cluster.api_request('system.config.models.write_conf', config=params) except Exception as e: raise VultureSystemConfigError( "InspectionPolicy::save_policy_file:: failure to save inspection policy file" )
def save_conf(self): """ Write configuration on disk """ params = [ self.absolute_filename, self.download_file(), DATABASES_OWNER, DATABASES_PERMS ] try: Cluster.api_request('system.config.models.write_conf', config=params) except Exception as e: # e used by VultureSystemConfigError raise VultureSystemConfigError( "on cluster.\n" "Request failure to write conf of Reputation context '{}'". format(self.name))
def save_portal_template(data, instance): form = PortalTemplateForm(data, instance=instance) if not form.is_valid(): return JsonResponse({"error": form.errors}, status=400) obj = form.save() # Reload template for all portals that uses this template for portal in obj.userauthentication_set.all().only('pk'): Cluster.api_request("authentication.user_portal.api.write_templates", portal.id) return JsonResponse({ "message": _("Portal Template saved"), "object": obj.to_dict() })
def netdata_reload(request, api=False): if not request.is_ajax() and not api: return HttpResponseBadRequest() try: res = Cluster.api_request("services.netdata.netdata.reload_conf") if not res.get('status'): res['error'] = res.pop('message', "") status = 500 else: status = 202 if not res.get('message'): res['message'] = _( "Reloading service. Please wait a moment please...") return JsonResponse(res, status=status) except Exception as e: # If API request failure, bring up the error logger.error("Error trying to reload netdata config: '{}'".format( str(e))) return JsonResponse( { 'status': False, 'error': "API request failure : ".format(str(e)), 'error_details': str.join('', format_exception(*exc_info())) }, status=500)
def netif_refresh(request): """ Read system configuration and refresh NIC list :param request: :return: """ if request.method != "POST": return JsonResponse({ 'status': False, 'message': 'waiting for POST request' }) try: Cluster.api_request('toolkit.network.network.refresh_nic') except Exception as e: return JsonResponse({'error': str(e)}, status=500) return JsonResponse({'status': True})
def save_keytab(self): """ Write keytab on host to be used This function raise VultureSystemConfigError if failure """ """ API request """ api_res = Cluster.api_request( 'toolkit.auth.kerberos_client.write_keytabs') if not api_res.get('status'): raise VultureSystemSaveError("keytab. API request failure ", traceback=api_res.get('message'))
def delete_conf(self): api_res = {'status': True} for error_code in [400, 403, 405, 408, 425, 429, 500, 502, 503, 504]: api_res = Cluster.api_request("system.config.models.delete_conf", self.get_base_filename(error_code)) if not api_res.get('status'): # If error, return-it return api_res # Return True if no API error return {'status': True}
def write_conf(self): api_res = {'status': True} for error_code in [400, 403, 405, 408, 425, 429, 500, 502, 503, 504]: mode = getattr(self, "error_{}_mode".format(error_code)) if mode == "display": api_res = Cluster.api_request( "system.config.models.write_conf", [ self.get_filename(error_code), getattr(self, "error_{}_html".format(error_code)), TEMPLATE_OWNER, TEMPLATE_PERMS ]) # Return the last API request result return api_res
def delete_conf(self): """ Delete all format of the current certificate :return True if success raise VultureSystemConfigError if failure """ # Firstly try to delete the conf, if it fails the object will not be deleted extensions = self.get_extensions() for extension in extensions.keys(): api_res = Cluster.api_request("system.config.models.delete_conf", self.get_base_filename() + extension) if not api_res.get('status'): raise VultureSystemConfigError( ". API request failure.", traceback=api_res.get('message')) return True
def save_defender_raw_rule_set(request, object_id): try: if object_id is None: return JsonResponse({'status': False, 'error': 'Missing Mod Defender ruleset ID'}) try: defender_ruleset = DefenderRuleset.objects.get(pk=object_id) except DefenderRuleset.DoesNotExist: error_message = 'Mod Defender with ID "{}" not found'.format(object_id) logger.error(error_message) return JsonResponse({'status': False, 'error': error_message}, status=404) # if the user wants to get the list or raw rules (via the GET HTTP method) if request.method == 'GET': return JsonResponse({'status': True, 'raw_rules': defender_ruleset.raw_rules}, status=200) # if not, he wants to save the raw rules he sent try: raw_rules = request.POST['raw_rules'] except KeyError: return JsonResponse({'status': False, 'error': 'Missing raw rules'}) defender_ruleset.raw_rules = raw_rules defender_ruleset.save() policy_list = DefenderPolicy.objects.filter(defender_ruleset=defender_ruleset) for policy in policy_list: Cluster.api_request('darwin.defender_policy.policy.write_defender_conf', policy.pk) return JsonResponse({'status': True, 'message': 'Mod Defender ruleset correctly updated'}) except Exception as error: logger.exception(error) return JsonResponse({'status': False, 'error': str(error)})
def save_conf(self): """ Write cert as all formats currently supported This function raise VultureSystemConfigError if failure """ extensions = self.get_extensions() # Retrieve and stock variable to improve loop perf base_filename = self.get_base_filename() """ For each extensions to be written """ for extension, buffer in extensions.items(): params = [ base_filename + extension, buffer, CERT_OWNER, CERT_PERMS ] """ API request """ api_res = Cluster.api_request('system.config.models.write_conf', config=params, internal=True) if not api_res.get('status'): raise VultureSystemConfigError( ". API request failure ", traceback=api_res.get('message'))
def save_conf(self): """ :return A message of what has been done """ if not self.authentication: return "No authentication activated, no need to write portal conf." params = [ self.get_filename(), self.generate_conf(), WORKFLOW_OWNER, WORKFLOW_PERMS ] try: api_res = Cluster.api_request("system.config.models.write_conf", config=params) if not api_res.get('status'): raise VultureSystemConfigError( ". API request failure ", traceback=api_res.get('message')) except Exception: raise VultureSystemConfigError("API request failure.") return "Workflow configuration written."
def delete_conf(self): Cluster.api_request("system.config.models.delete_conf", self.get_filename())
def submit_defender_wl(request): try: logger.debug("A new Mod Defender whitelist will be saved") if not request.is_ajax(): return HttpResponseBadRequest() try: rules = json.loads(request.POST['rules']) assert isinstance(rules, list) except KeyError: return JsonResponse( { 'status': False, 'error': _('Missing rules parameter') }, status=400) except AssertionError: return JsonResponse( { 'status': False, 'error': _('Rules parameter must be a list') }, status=400) if len(rules) <= 0: return JsonResponse( { 'status': False, 'error': _('Rule list given is empty') }, status=400) try: save_type = request.POST['save_type'] save_type_list = ['create', 'edit', 'replace'] assert isinstance(save_type, str) if save_type not in save_type_list: return JsonResponse( { 'status': False, 'error': _('The save_type has to be one of the following:') + " {}".format(', '.join(save_type_list)) }, status=400) except KeyError: return JsonResponse( { 'status': False, 'error': _('You must provide a save type') }, status=400) except AssertionError: return JsonResponse( { 'status': False, 'error': _('The save type has to be a string') }, status=400) if save_type == "create": try: name = request.POST['name'] assert isinstance(name, str) except KeyError: return JsonResponse( { 'status': False, 'error': _('You must provide a name for the ruleset') }, status=400) except AssertionError: return JsonResponse( { 'status': False, 'error': _('Ruleset name has to be a string') }, status=400) name = name.replace(' ', '_') existing_rule_set_number = DefenderRuleset.objects.filter( name=name).count() if existing_rule_set_number > 0: return JsonResponse( { 'status': False, 'error': 'A ruleset with the name "{}" already exists'.format( name) }, status=400) ruleset_obj = DefenderRuleset(name=name) ruleset_obj.raw_rules = "" else: try: ruleset_id = request.POST['ruleset_id'] ruleset_id = int(ruleset_id) except KeyError: return JsonResponse( { 'status': False, 'error': _('You must provide an ID for the existing ruleset') }, status=400) except ValueError: return JsonResponse( { 'status': False, 'error': _('The ruleset ID has to be an integer') }, status=400) try: ruleset_obj = DefenderRuleset.objects.get(pk=ruleset_id) except DefenderRuleset.DoesNotExist: return JsonResponse( { 'status': False, 'error': 'The ruleset with the provided ID ({id}) does not exist' .format(id=ruleset_id) }, status=400) if save_type == "replace": logger.debug( 'Deleting existing rules for ruleset with ID {id}'.format( id=ruleset_obj.pk)) for rule in ruleset_obj.rules.all(): ruleset_obj.rules.remove(rule) rule.delete() ruleset_obj.raw_rules = "" for rule in rules: logger.debug("Processing rule {}".format(rule)) try: new_rule = DefenderRule.objects.create( zone=rule['zone'], ids=rule['ids'], key=rule['key'], value=rule['value'], url=rule['url'], matched_type=rule['matched_type'], ) logger.debug('New rule created: {}'.format(new_rule.to_dict())) except KeyError as error: error_message = 'Missing key for rule "{}" : {}'.format( rule, error) logger.error(error_message) return JsonResponse({ 'status': False, 'error': error_message }, status=400) ruleset_obj.rules.add(new_rule) ruleset_obj.raw_rules += '{}\n'.format(new_rule.generate_rule()) ruleset_obj.save() logger.debug('New set created: {}'.format(ruleset_obj.to_dict())) if save_type in ['edit', 'replace']: logger.info( 'Reloading configuration for existing ruleset with ID {id}'. format(id=ruleset_obj.pk)) defender_policy_list = DefenderPolicy.objects.filter( defender_ruleset=ruleset_obj) for defender_policy in defender_policy_list: Cluster.api_request( "darwin.defender_policy.policy.write_defender_conf", defender_policy.id) return JsonResponse({ 'status': True, 'message': _('WAF ruleset saved') }, status=201) except Exception as error: logger.exception(error) return JsonResponse( { 'status': False, 'error': "Unknown error: " + str(error) }, status=500)
def reputation_ctx_delete(request, object_id, api=False): """ Delete Backend and related Listeners """ error = "" try: reputation_ctx = ReputationContext.objects.get(pk=object_id) except ObjectDoesNotExist: if api: return JsonResponse({'error': _("Object does not exist.")}, status=404) return HttpResponseForbidden("Injection detected") if ((request.method == "POST" and request.POST.get('confirm', "").lower() == "yes") or (api and request.method == "DELETE" and request.JSON.get('confirm', "").lower() == "yes")): # Save reputation_ctx filename before delete-it ctx_filename = reputation_ctx.get_filename() try: """ Delete file """ Cluster.api_request("system.config.models.delete_conf", ctx_filename) """ Disable the reputation context in frontend, to reload rsyslog conf """ for frontendreputationcontext in reputation_ctx.frontendreputationcontext_set.all( ): frontendreputationcontext.enabled = False frontendreputationcontext.save() logger.info("Reputation '{}' disabled.".format( reputation_ctx.name)) """ And reload Frontends conf """ reputation_ctx.reload_frontend_conf() """ If everything's ok, delete the object """ reputation_ctx.delete() logger.info("Reputation '{}' deleted.".format(ctx_filename)) if api: return JsonResponse({'status': True}, status=204) return HttpResponseRedirect( reverse('applications.reputation_ctx.list')) except Exception as e: # If API request failure, bring up the error logger.error( "Error trying to delete reputation_ctx '{}': API|Database failure. " "Details:".format(reputation_ctx.name)) logger.exception(e) error = "API or Database request failure: {}".format(str(e)) if api: return JsonResponse({ 'status': False, 'error': error }, status=500) if api: return JsonResponse( {'error': _("Please confirm with confirm=yes in JSON body.")}, status=400) # If GET request or POST request and API/Delete failure return render( request, 'generic_delete.html', { 'menu_name': _("Applications -> Context Tags -> Delete"), 'redirect_url': reverse("applications.reputation_ctx.list"), 'delete_url': reverse("applications.reputation_ctx.delete", kwargs={'object_id': reputation_ctx.id}), 'obj_inst': reputation_ctx, 'error': error, 'used_by': reputation_ctx.frontend_set.all(), })
def backend_delete(request, object_id, api=False): """ Delete Backend and related Listeners """ error = "" try: backend = Backend.objects.get(pk=object_id) except ObjectDoesNotExist: if api: return JsonResponse({'error': _("Object does not exist.")}, status=404) return HttpResponseForbidden("Injection detected") if ((request.method == "POST" and request.POST.get('confirm', "").lower() == "yes") or (api and request.method == "DELETE" and request.JSON.get('confirm', "").lower() == "yes")): # Save backend filename before delete-it backend_filename = backend.get_base_filename() try: # If POST request and no error: delete frontend backend.delete() # API request deletion of backend filename Cluster.api_request('services.haproxy.haproxy.delete_conf', backend_filename) # And reload of HAProxy service Cluster.api_request('services.haproxy.haproxy.reload_service') if api: return JsonResponse({'status': True}, status=204) return HttpResponseRedirect(reverse('applications.backend.list')) except ProtectedError as e: logger.error( "Error trying to delete Backend '{}': Object is currently used :" .format(backend.name)) logger.exception(e) error = "Object is currently used by a Workflow, cannot be deleted" except Exception as e: # If API request failure, bring up the error logger.error( "Error trying to delete backend '{}': API|Database failure. Details:" .format(backend.name)) logger.exception(e) error = "API or Database request failure: {}".format(str(e)) if api: return JsonResponse({ 'status': False, 'error': error }, status=500) if api: return JsonResponse( {'error': _("Please confirm with confirm=yes in JSON body.")}, status=400) # If GET request or POST request and API/Delete failure return render( request, 'generic_delete.html', { 'menu_name': _("Applications -> Applications -> Delete"), 'redirect_url': reverse("applications.backend.list"), 'delete_url': reverse("applications.backend.delete", kwargs={'object_id': backend.id}), 'obj_inst': backend, 'error': error, })
def backend_edit(request, object_id=None, api=False): backend = None server_form_list = [] header_form_list = [] httpchk_header_form_list = [] api_errors = [] if object_id: try: backend = Backend.objects.get(pk=object_id) except ObjectDoesNotExist: if api: return JsonResponse({'error': _("Object does not exist.")}, status=404) return HttpResponseForbidden("Injection detected") """ Create form with object if exists, and request.POST (or JSON) if exists """ if hasattr(request, "JSON") and api: form = BackendForm(request.JSON or None, instance=backend, error_class=DivErrorList) else: form = BackendForm(request.POST or None, instance=backend, error_class=DivErrorList) def render_form(back, **kwargs): save_error = kwargs.get('save_error') if api: if len(api_errors) > 0 or form.errors: api_errors.append(form.errors.as_json()) return JsonResponse({"errors": api_errors}, status=400) if save_error: return JsonResponse({'error': save_error[0]}, status=500) available_sockets = get_darwin_sockets() if not server_form_list and back: for l_tmp in back.server_set.all(): server_form_list.append(ServerForm(instance=l_tmp)) if not header_form_list and back: for h_tmp in back.headers.all(): header_form_list.append(HeaderForm(instance=h_tmp)) if not httpchk_header_form_list and back: for k, v in back.http_health_check_headers.items(): httpchk_header_form_list.append( HttpHealthCheckHeaderForm({ 'check_header_name': k, 'check_header_value': v })) return render( request, 'apps/backend_edit.html', { 'form': form, 'servers': server_form_list, 'net_server_form': ServerForm(mode='net'), 'unix_server_form': ServerForm(mode='unix'), 'headers': header_form_list, 'header_form': HeaderForm(), 'sockets_choice': available_sockets, 'http_health_check_headers': httpchk_header_form_list, 'http_health_check_headers_form': HttpHealthCheckHeaderForm(), **kwargs }) if request.method in ("POST", "PUT"): """ Handle JSON formatted listeners """ try: if api: server_ids = request.JSON.get('servers', []) assert isinstance(server_ids, list), "Servers field must be a list." else: server_ids = json_loads(request.POST.get('servers', "[]")) except Exception as e: return render_form(backend, save_error=[ "Error in Servers field : {}".format(e), str.join('', format_exception(*exc_info())) ]) header_objs = [] httpchk_headers_dict = {} if form.data.get('mode') == "http": """ Handle JSON formatted request headers """ try: if api: header_ids = request.JSON.get('headers', []) assert isinstance(header_ids, list), "Headers field must be a list." else: header_ids = json_loads(request.POST.get('headers', "[]")) except Exception as e: return render_form( backend, save_error=[ "Error in Request-headers field : {}".format(e), str.join('', format_exception(*exc_info())) ]) """ Handle JSON formatted request Http-health-check-headers """ try: if api: httpchk_header_ids = request.JSON.get( 'http_health_check_headers', []) assert isinstance( httpchk_header_ids, list), "Health check headers field must be a list." else: httpchk_header_ids = json_loads( request.POST.get('http_health_check_headers', "[]")) except Exception as e: return render_form( backend, save_error=[ "Error in Http-health-check-headers field : {}".format( e), str.join('', format_exception(*exc_info())) ]) """ For each Health check header in list """ for header in httpchk_header_ids: httpchkform = HttpHealthCheckHeaderForm( header, error_class=DivErrorList) if not httpchkform.is_valid(): if api: api_errors.append({ "health_check": httpchkform.errors.get_json_data() }) else: form.add_error('enable_http_health_check', httpchkform.errors.as_ul()) continue # Save forms in case we re-print the page httpchk_header_form_list.append(httpchkform) httpchk_headers_dict[header.get( 'check_header_name')] = header.get('check_header_value') """ For each header in list """ for header in header_ids: """ If id is given, retrieve object from mongo """ try: instance_h = Header.objects.get( pk=header['id']) if header.get('id') else None except ObjectDoesNotExist: form.add_error( None, "Request-header with id {} not found. Injection detected ?" ) continue """ And instantiate form with the object, or None """ header_f = HeaderForm(header, instance=instance_h) if not header_f.is_valid(): if api: api_errors.append( {"headers": header_f.errors.get_json_data()}) else: form.add_error('headers', header_f.errors.as_ul()) continue # Save forms in case we re-print the page header_form_list.append(header_f) # And save objects list, to save them later, when Frontend will be saved header_objs.append(header_f.save(commit=False)) server_objs = [] """ For each listener in list """ for server in server_ids: """ If id is given, retrieve object from mongo """ try: instance_s = Server.objects.get( pk=server['id']) if server['id'] else None except ObjectDoesNotExist: form.add_error( None, "Server with id {} not found.".format(server['id'])) continue """ And instantiate form with the object, or None """ server_f = ServerForm(server, instance=instance_s) if not server_f.is_valid(): if api: api_errors.append( {'server': server_f.errors.get_json_data()}) else: form.add_error(None, server_f.errors.as_ul()) continue server_form_list.append(server_f) server_obj = server_f.save(commit=False) server_objs.append(server_obj) # If errors has been added in form if not form.is_valid(): logger.error("Form errors: {}".format(form.errors.as_json())) return render_form(backend) # Save the form to get an id if there is not already one backend = form.save(commit=False) backend.configuration = "" backend.http_health_check_headers = httpchk_headers_dict # At least one server is required if Frontend enabled if not server_objs and backend.enabled: form.add_error( None, "At least one server is required if backend is enabled.") return render_form(backend) """ Generate the non-yet saved object conf """ try: logger.debug("Generating conf of backend '{}'".format( backend.name)) backend.configuration = backend.generate_conf( header_list=header_objs, server_list=server_objs) """ Save the conf on disk, and test-it with haproxy -c """ logger.debug("Writing/Testing conf of backend '{}'".format( backend.name)) backend.test_conf() except ServiceError as e: logger.exception(e) return render_form(backend, save_error=[str(e), e.traceback]) except Exception as e: logger.exception(e) return render_form(backend, save_error=[ "No referenced error", str.join('', format_exception(*exc_info())) ]) """ If the conf is OK, save the Backend object """ # Is that object already in db or not first_save = not backend.id try: logger.debug("Saving backend") backend.save() logger.debug("Backend '{}' (id={}) saved in MongoDB.".format( backend.name, backend.id)) """ And all the listeners created earlier """ for s in server_objs: s.backend = backend logger.debug("Saving server {}".format(str(s))) s.save() """ Delete listeners deleted in form """ for s in backend.server_set.exclude( pk__in=[l.id for l in server_objs]): s.delete() logger.info("Deleting server {}".format(s)) """ If mode is HTTP """ if backend.mode == "http": """ Remove request-headers removed """ for header in backend.headers.all(): if header not in header_objs: backend.headers.remove(header) """ Associate added request-headers """ for header in header_objs: new_object = not header.id header.save() if new_object: backend.headers.add(header) logger.debug( "HTTP Headers {} associated to Frontend {}".format( header, backend)) """ if the Backend is updated and its name was changed """ if not first_save and "name" in form.changed_data: logger.info( "Backend name changed, looking for associated frontends..." ) workflow_list = backend.workflow_set.all() for workflow in workflow_list: logger.info( "reloading frontend '{}' haproxy configuration".format( workflow.frontend)) workflow.frontend.reload_conf() # Re-generate config AFTER save, to get ID backend.configuration = backend.generate_conf() """ asynchronous API request to save conf on node """ # Save conf first, to raise if there is an error backend.save_conf() logger.debug("Write conf of backend '{}' asked on cluster".format( backend.name)) """ Reload HAProxy service - After rsyslog to prevent logging crash """ api_res = Cluster.api_request( "services.haproxy.haproxy.reload_service") if not api_res.get('status'): raise ServiceReloadError("on cluster\n API request error.", "haproxy", traceback=api_res.get('message')) for node in Node.objects.all(): backend.status[node.name] = "WAITING" backend.save() except (VultureSystemError, ServiceError) as e: """ Error saving configuration file """ """ The object has been saved, delete-it if needed """ if first_save: for server in backend.server_set.all(): server.delete() backend.delete() logger.exception(e) return render_form(backend, save_error=[str(e), e.traceback]) except Exception as e: """ If we arrive here, the object has not been saved """ logger.exception(e) return render_form( backend, save_error=[ "Failed to save object in database :\n{}".format(e), str.join('', format_exception(*exc_info())) ]) if api: return build_response(backend.id, "applications.backend.api", COMMAND_LIST) return HttpResponseRedirect('/apps/backend/') return render_form(backend)
def cluster_join(master_hostname, master_ip, secret_key, ca_cert=None, cert=None, key=None): """ Join an existing cluster :param master_hostname: Master hostname :param master_ip: Master management Ip address :param secret_key: The node's secret key :param ca_cert: The CA Certificate (optional: can be retrieved automagically) :param cert: The node certificate (optional: can be retrieved automagically) :param key: The node private key (optional: can be retrieved automagically) :return: True / False """ """ We are coming from the CLI interface """ try: infos = requests.get( "https://{}:8000/api/v1/system/cluster/info".format(master_ip), headers={'Cluster-api-key': secret_key}, verify=False ).json() if not infos['status']: raise Exception('Error at API Request Cluster Info: {}'.format(infos['data'])) time_master = infos['data'][master_hostname]['unix_timestamp'] time_now = time.time() if abs(time_now - time_master) > 60 or abs(time_now + time_master) < 60: logger.info('Nodes not at the same date. Please sync with NTP Server') print('Nodes not at the same date. Please sync with NTP Server') return False except Exception as e: logger.error("Error at API Request Cluster Info: {} Invalid API KEY ?".format(e)) return False if not ca_cert: try: infos = requests.post( "https://{}:8000/api/system/pki/get_ca".format(master_ip), headers={'Cluster-api-key': secret_key}, verify=False ).json() ca_cert = infos.get('ca_cert') except Exception as e: logger.error("Unable to retrieve CA certificate: {}".format(e)) return False """ We are coming from the CLI interface """ if not cert or not key: try: infos = requests.post( "https://{}:8000/api/system/pki/get_cert/".format(master_ip), headers={'Cluster-api-key': secret_key}, data={'node_name': get_hostname()}, verify=False ).json() cert = infos.get('cert') key = infos.get('key') except Exception as e: logger.error("Unable to retrieve Node certificate: {}".format(e)) return False if cert and key: bundle = cert + key else: logger.error("Unable to retrieve Node certificate and key, check secret key") return False with open("/var/tmp/ca.pem", "w") as f: f.write(ca_cert) with open("/var/tmp/node.cert", "w") as f: f.write(cert) with open("/var/tmp/node.key", "w") as f: f.write(key) with open("/var/tmp/node.pem", "w") as f: f.write(bundle) """ At this point we should have valid certificates: Save them on system """ subprocess.check_output([ '/home/vlt-os/scripts/write_cert.sh' ]) """ At this point, certificates have been overwritten => We need to destroy replicaset & restart mongoDB """ logger.info("replDestroy: Restart Mongodb with new certificates") mongo = MongoBase() mongo.repl_destroy() """ Ask primary to join us on our management IP """ # TODO: verify to true infos = requests.post( "https://{}:8000/api/system/cluster/add/".format(master_ip), headers={'Cluster-api-key': secret_key}, data={'slave_ip': get_management_ip(), 'slave_name': get_hostname()}, verify=False ) if infos.status_code != 200: raise Exception("Error at API Call on /system/cluster/add/ Response code: {}".format(infos.status_code)) infos = infos.json() if not infos.get('status'): logger.error("Error during API Call to add node to cluster: {}".format(infos.get('message'))) return False """ Join our redis server to the redis master """ c = RedisBase() redis_master_node = c.get_master(master_ip) c.slave_of(redis_master_node, 6379) """ Tell local sentinel to monitor local redis server """ c = RedisBase(get_management_ip(), 26379) c.sentinel_monitor() """ Sleep a few seconds in order for the replication to occur """ time.sleep(3) """ Create the local node """ try: node = Node.objects.get( name=get_hostname(), management_ip=get_management_ip() ) except Exception: logger.error("cluster_join:: Unable to find slave node !") logger.error("cluster_join:: Unable to find slave node !") return False """ Update uri of internal Log Forwarder """ logfwd = LogOMMongoDB.objects.get() logfwd.uristr = mongo.get_replicaset_uri() logfwd.save() """ Save certificates on new node """ for cert in X509Certificate.objects.exclude(is_vulture_ca=True): cert.save_conf() """ Read network config and store it into mongo """ """ No rights to do that in jail - API request """ node.api_request('toolkit.network.network.refresh_nic') """ Read ZFS file systems """ node.api_request('system.zfs.zfs.refresh') """ Download reputation databases before crontab """ node.api_request("gui.crontab.feed.security_update") """ And configure + restart netdata """ logger.debug("API call to netdata configure_node") # API call to Cluster - to refresh nodes on each node conf Cluster.api_request('services.netdata.netdata.configure_node') logger.debug("API call to restart netdata service") node.api_request('services.netdata.netdata.restart_service') logger.debug("API call to configure HAProxy") node.api_request("services.haproxy.haproxy.configure_node") logger.debug("API call to write Darwin policies conf") for policy in DarwinPolicy.objects.all(): node.api_request("services.darwin.darwin.write_policy_conf", policy.pk) logger.debug("API call to configure Darwin") node.api_request("services.darwin.darwin.build_conf") # API call to while Cluster - to refresh Nodes list in conf logger.debug("API call to update configuration of Apache GUI") Cluster.api_request("services.apache.apache.reload_conf") """ The method configure restart rsyslog if needed """ logger.debug("API call to configure rsyslog") # API call to whole Cluster - to refresh mongodb uri in pf logs Cluster.api_request("services.rsyslogd.rsyslog.configure_node") logger.debug("API call to configure logrotate") node.api_request("services.logrotate.logrotate.reload_conf") return True
def save_workflow(request, workflow_obj, object_id=None): """ WARNING: Apply changes to the API as well ! """ workflow_acls = [] before_policy = True order = 1 old_defender_policy_id = None if workflow_obj.defender_policy: old_defender_policy_id = deepcopy(workflow_obj.defender_policy.pk) try: workflow = json.loads(request.POST['workflow']) workflow_name = request.POST['workflow_name'] workflow_enabled = request.POST['workflow_enabled'] == "true" if (workflow_name == ""): raise InvalidWorkflowError(_("A name is required")) workflow_obj.enabled = workflow_enabled workflow_obj.name = workflow_name workflow_obj.workflow_json = workflow workflow_obj.save() old_workflow_acls = WorkflowACL.objects.filter(workflow=workflow_obj) old_workflow_acls.delete() for step in workflow: if step['data']['type'] == "frontend": frontend = Frontend.objects.get(pk=step['data']['object_id']) workflow_obj.frontend = frontend if frontend.mode == "http": workflow_obj.fqdn = step['data']['fqdn'] if not validators.domain(workflow_obj.fqdn): raise InvalidWorkflowError( _("This FQDN is not valid.")) workflow_obj.public_dir = step['data']['public_dir'] if workflow_obj.public_dir and len( workflow_obj.public_dir): if workflow_obj.public_dir[0] != '/': workflow_obj.public_dir = '/' + workflow_obj.public_dir if workflow_obj.public_dir[-1] != '/': workflow_obj.public_dir += '/' workflow_obj.save() elif step['data']['type'] == 'backend': backend = Backend.objects.get(pk=step['data']['object_id']) workflow_obj.backend = backend workflow_obj.save() elif step['data']['type'] == "acl": access_control = AccessControl.objects.get( pk=step['data']['object_id']) workflow_acl = WorkflowACL( access_control=access_control, action_satisfy=step['data']['action_satisfy'], action_not_satisfy=step['data']['action_not_satisfy'], redirect_url_satisfy=step['data']['redirect_url_satisfy'], redirect_url_not_satisfy=step['data'] ['redirect_url_not_satisfy'], before_policy=before_policy, workflow=workflow_obj, order=order) order += 1 workflow_acls.append(workflow_acl) workflow_acl.save() elif step['data']['type'] == "waf": if step['data']['object_id']: defender_policy = DefenderPolicy.objects.get( pk=step['data']['object_id']) workflow_obj.defender_policy = defender_policy else: workflow_obj.defender_policy = None workflow_obj.save() before_policy = False order = 1 if not workflow_obj.backend: raise InvalidWorkflowError(_("You need to select a backend")) # Reloading configuration nodes = workflow_obj.frontend.reload_conf() workflow_obj.backend.reload_conf() # We need to reload the new defender policy configuration if: # we set a defender policy and (if we create an object, or if we updated the policy, the FQDN or the # public directory) if workflow_obj.defender_policy: logger.info( "Need to reload the Defender Policy SPOE configuration") Cluster.api_request( "darwin.defender_policy.policy.write_defender_backend_conf", workflow_obj.defender_policy.pk) Cluster.api_request( "darwin.defender_policy.policy.write_defender_spoe_conf", workflow_obj.defender_policy.pk) # We need to reload the old defender policy configuration if: # we set a defender policy previously, and the previous configuration is different from the new one if old_defender_policy_id and old_defender_policy_id != workflow_obj.defender_policy.pk: logger.info( "Old Defender policy {} SPOE configuration will be reloaded". format(old_defender_policy_id)) Cluster.api_request( "darwin.defender_policy.policy.write_defender_spoe_conf", old_defender_policy_id) # Reload HAProxy on concerned nodes for node in nodes: api_res = node.api_request( "services.haproxy.haproxy.restart_service") if not api_res.get('status'): logger.error("Workflow::edit: API error while trying to " "restart HAProxy service : {}".format( api_res.get('message'))) raise InvalidWorkflowError(api_res.get('message')) return JsonResponse({'status': True}) except InvalidWorkflowError as e: if not object_id: for workflow_acl in workflow_acls: workflow_acl.delete() try: workflow_obj.delete() except Exception: pass return JsonResponse({'status': False, 'error': str(e)}) except Exception as e: if settings.DEV_MODE: raise logger.critical(e, exc_info=1) return JsonResponse({ 'status': False, 'error': _('An error has occured') })