Exemplo n.º 1
0
    def write_backend_conf(self):
        logger.info("write_backend_conf")
        new_conf = self.generate_haproxy_backend_conf()

        if not new_conf:
            return False

        filename = self.get_backend_filename()

        try:
            with open(filename, 'r') as conf_file:
                current_conf = conf_file.read()
        except Exception as error:
            logger.warning(error)
            current_conf = None

        # If we don't have any current configuration, or if our new configuration differs
        if current_conf is None or new_conf != current_conf:
            try:
                write_conf(logger,
                           [filename, new_conf, HAPROXY_OWNER, HAPROXY_PERMS])
                return True
            except Exception as e:
                raise VultureSystemConfigError(
                    "log viewer: error while writing haproxy configuration (backend) for DefenderPolicy"
                )

        return False
Exemplo n.º 2
0
def build_conf(node_logger, frontend_id=None):
    """ Generate conf of rsyslog inputs, based on all frontends LOG
    ruleset of frontend
    outputs of all frontends
    :param node_logger: Logger sent to all API requests
    :param frontend_id: The name of the frontend in conf file
    :return: 
    """
    result = ""
    """ Firstly, try to retrieve Frontend with given id """
    if frontend_id:
        try:
            frontend = Frontend.objects.get(pk=frontend_id)
            """ Generate ruleset conf of asked frontend """
            frontend_conf = frontend.generate_rsyslog_conf()
            """ And write-it """
            write_conf(node_logger, [frontend.get_rsyslog_filename(), frontend_conf, RSYSLOG_OWNER, RSYSLOG_PERMS])
            result += "Frontend '{}' conf written.\n".format(frontend_id)
        except ObjectDoesNotExist:
            raise VultureSystemError("Frontend with id {} not found, failed to generate conf.".format(frontend_id),
                                     "build rsyslog conf", traceback=" ")

    """ Generate inputs configutation """
    service = RsyslogService()
    """ If frontend was given we cannot check if its conf has changed to restart service
     and if reload_conf is True, conf has changed so restart service
    """
    if service.reload_conf() or frontend_id:
        result += "Rsyslog conf updated. Restarting service."
        result += service.restart()
    else:
        result += "Rsyslog conf hasn't changed."
    return result
Exemplo n.º 3
0
 def write_template(self, tpl_name, **kwargs):
     """ This method has to be called by api_request (Vultured) """
     filename = self.tpl_filename(tpl_name)
     write_conf(logger, [
         self.tpl_filename(tpl_name, kwargs.get('portal_id')),
         HAPROXY_HEADER + self.render_template(tpl_name, **kwargs),
         HAPROXY_OWNER, HAPROXY_PERMS
     ])
     return "Template {} successfully written".format(filename)
Exemplo n.º 4
0
def configure_pstats(node_logger):
    """ Pstats configuration """
    node = Cluster.get_current_node()
    jinja2_env = Environment(loader=FileSystemLoader(JINJA_PATH))
    pstats_template = jinja2_env.get_template("pstats.conf")
    write_conf(node_logger, ["{}/pstats.conf".format(RSYSLOG_PATH),
                             pstats_template.render({'node': node}),
                             RSYSLOG_OWNER, RSYSLOG_PERMS])
    return "Rsyslog configuration 'pstats.conf' written.\n"
Exemplo n.º 5
0
 def set_rc_conf(self, yes_or_no, service_name=""):
     """
     Set service_enable="YES" or "NO"
      in /RC_CONF_DIR/service
     :param  yes_or_no:  True if "YES", False if "NO"
     :param  service_name:  Service name to use if different than self.service_name
     """
     service_name2 = service_name or self.service_name
     filename = "{}/{}".format(RC_CONF_DIR, service_name2)
     write_conf(logger, [
         filename, "{}_enable=\"{}\"".format(
             service_name2, "YES" if yes_or_no else "NO"), RC_CONF_OWNERS,
         RC_CONF_PERMS
     ])
     return "{} successfully written.".format(filename)
Exemplo n.º 6
0
def build_conf(node_logger, frontend_id):
    """ Generate conf of rsyslog inputs, based on all frontends LOG
    ruleset of frontend
    outputs of all frontends
    :param node_logger: Logger sent to all API requests
    :param frontend_id: The name of the frontend in conf file
    :return:
    """
    result = ""
    node = Cluster.get_current_node()
    reload = False
    """ Firstly, try to retrieve Frontend with given id """
    from services.frontend import models  # because of circular imports

    try:
        frontend = models.Frontend.objects.get(pk=frontend_id)
        """ Generate ruleset conf of asked frontend """
        tmp = frontend.generate_conf()
        if frontend.configuration[node.name] != tmp:
            frontend.configuration[node.name] = tmp
            reload = True
        """ And write-it """

        write_conf(node_logger, [
            frontend.get_filename(), frontend.configuration[node.name],
            models.FRONTEND_OWNER, models.FRONTEND_PERMS
        ])
        result += "Frontend '{}' conf written.\n".format(frontend_id)
    except ObjectDoesNotExist:
        raise VultureSystemError(
            "Frontend with id {} not found, failed to generate conf.".format(
                frontend_id),
            "build HAProxy conf",
            traceback=" ")
    """ Generate inputs configuration """
    service = HaproxyService()
    """ If frontend was given we cannot check if its conf has changed to restart service
     and if reload_conf is True, conf has changed so restart service
    """
    if reload:
        result = "HAProxy conf updated. Restarting service."
        result += service.restart()
    else:
        result += "HAProxy conf hasn't changed."
    return result
Exemplo n.º 7
0
def write_policy_conf(node_logger, policy_id):
    logger.info("writing policy conf for filterpolicy id {}".format(policy_id))
    policy = FilterPolicy.objects.get(pk=policy_id)

    logger.info("writing policy '{}' conf".format(policy.filter.name))

    conf_path = "{darwin_path}/f{filter_name}/f{filter_name}_{darwin_policy_id}.conf".format(
        darwin_path=DARWIN_PATH,
        filter_name=policy.filter.name,
        darwin_policy_id=policy.policy.pk)

    write_conf(node_logger, [
        conf_path, "{}\n".format(
            json_dumps(policy.config, sort_keys=True, indent=4)),
        DARWIN_OWNERS, DARWIN_PERMS
    ])

    return "{} successfully written.".format(conf_path)
Exemplo n.º 8
0
def configure_node(node_logger):
    """ Generate and write netdata conf files """
    result = ""

    node = Cluster.get_current_node()
    global_config = Cluster.get_global_config()
    """ For each Jinja templates """
    jinja2_env = Environment(loader=FileSystemLoader(JINJA_PATH))
    for template_name in jinja2_env.list_templates():
        """ Perform only "rsyslog_template_*.conf" templates """
        match = re_search("^rsyslog_template_([^\.]+)\.conf$", template_name)
        if not match:
            continue
        template = jinja2_env.get_template(template_name)
        template_path = "{}/05-tpl-01-{}.conf".format(RSYSLOG_PATH,
                                                      match.group(1))
        """ Generate and write the conf depending on all nodes, and current node """
        write_conf(node_logger, [
            template_path,
            template.render({
                'node': node,
                'global_config': global_config
            }), RSYSLOG_OWNER, RSYSLOG_PERMS
        ])
        result += "Rsyslog template '{}' written.\n".format(template_path)
    """ PF configuration for Rsyslog """
    pf_template = jinja2_env.get_template("pf.conf")
    write_conf(node_logger, [
        "{}/pf.conf".format(RSYSLOG_PATH),
        pf_template.render({'mongodb_uri': MongoBase.get_replicaset_uri()}),
        RSYSLOG_OWNER, RSYSLOG_PERMS
    ])
    result += "Rsyslog template 'pf.conf' written.\n"
    """ If this method has been called, there is a reason - a Node has been modified
          so we need to restart Rsyslog because at least PF conf has been changed 
    """
    # if Frontend.objects.filter(enable_logging=True).count() > 0:
    #    node_logger.debug("Logging enabled, reload of Rsyslog needed.")
    restart_service(node_logger)
    node_logger.info("Rsyslog service restarted.")
    result += "Rsyslogd service restarted."

    return result
Exemplo n.º 9
0
    def reload_conf(self):
        """
        Write new PF configuration, if needed
        :return: True / False
        """
        conf_reloaded = super().reload_conf()

        config_model = Cluster.get_global_config()
        """ Check if firehol and vulture netsets exist """
        filepath = DATABASES_PATH + "/firehol_level1.netset"
        if not os_path.isfile(filepath):
            write_conf(logger,
                       [filepath, "", DATABASES_OWNER, DATABASES_PERMS])

        filepath = DATABASES_PATH + "/vulture-v4.netset"
        if not os_path.isfile(filepath):
            write_conf(logger,
                       [filepath, "", DATABASES_OWNER, DATABASES_PERMS])

        filepath = DATABASES_PATH + "/vulture-v6.netset"
        if not os_path.isfile(filepath):
            write_conf(logger,
                       [filepath, "", DATABASES_OWNER, DATABASES_PERMS])
        """ Check if Whitelist and Blacklist has changed """
        wl_bl = {
            'pf.whitelist.conf': config_model.pf_whitelist,
            'pf.blacklist.conf': config_model.pf_blacklist,
        }

        for filename, liste in wl_bl.items():
            file_path = '{}{}'.format(PF_PATH, filename)
            config = "\n".join(liste.split(','))
            md5_config = md5(config.encode('utf-8')).hexdigest().strip()
            md5sum = ""

            try:
                result = check_output(['/sbin/md5', file_path],
                                      stderr=PIPE).decode('utf8')
                md5sum = result.strip().split('= ')[1]

            except CalledProcessError as e:
                stderr = e.stderr.decode('utf8')
                logger.error("Failed to md5sum file '{}' : {}".format(
                    filename, stderr))
            """ If there was an error, bad permissions on file, rewrite-it with correct ones """
            if md5_config != md5sum:
                conf_reloaded = True
                logger.info(
                    'Packet Filter {} need to be rewrite'.format(filename))
                write_conf(logger, [file_path, config, PF_OWNERS, PF_PERMS])

        return conf_reloaded
Exemplo n.º 10
0
def configure_node(node_logger):
    """ Generate and write netdata conf files """
    node = Cluster.get_current_node()
    nodes = Node.objects.all()
    """ For each Jinja templates """
    jinja2_env = Environment(loader=FileSystemLoader(JINJA_PATH))
    for template_name in jinja2_env.list_templates():
        template = jinja2_env.get_template(template_name)
        conf_path = CONF_PATH
        """ fping.conf has different directory """
        if template_name != "fping.conf":
            """ Other files than fping are in python.d """
            conf_path += "python.d/"
        """ Generate and write the conf depending on all nodes, and current node """
        write_conf(node_logger, [
            conf_path + template_name,
            template.render({
                'nodes': nodes,
                'node': node
            }), NETDATA_OWNER, NETDATA_PERMS
        ])
Exemplo n.º 11
0
def write_policy_conf(node_logger, policy_id):
    policy = DarwinPolicy.objects.get(pk=policy_id)
    logger.info("writing policy conf '{}'".format(policy.name))

    for filter in policy.filterpolicy_set.all():

        if filter.enabled:
            logger.info("writing filter '{}' conf".format(filter.name))

            conf_path = get_darwin_conf_path(policy.id, filter.filter.name)

            try:
                write_conf(node_logger, [
                    conf_path, "{}\n".format(
                        json_dumps(filter.config, sort_keys=True, indent=4)),
                    DARWIN_OWNERS, DARWIN_PERMS
                ])
            except Exception as e:
                logger.error(
                    "Darwin::write_policy_conf:: error while writing conf: {}".
                    format(e))
                continue

        else:
            logger.info(
                "filter '{}' not enabled, deleting potential conf".format(
                    filter.name))

            try:
                delete_conf_file(node_logger, filter.conf_path)
            except (VultureSystemConfigError, VultureSystemError):
                continue
            except Exception as e:
                logger.error(
                    "Darwin::write_policy_conf:: error while removing disabled filter config: {}"
                    .format(e))
Exemplo n.º 12
0
 def write_conf(self, content, owners=None, perms=None):
     write_conf(logger, [
         self.jinja_template['tpl_path'], content, owners or "root:wheel",
         perms or "644"
     ])