def check_internal_tasks():
    try:
        # Run this crontab only on master node
        node = Cluster.get_current_node()
        if not node.is_master_mongo:
            return

        # Deleting done internal tasks older than a month
        last_month_date = make_aware(datetime.datetime.now() - datetime.timedelta(days=30))
        MessageQueue.objects.filter(status="done", date_add__lte=last_month_date).delete()

        # Checking if a node has not executing his duty since a while.
        # If so, removing it from the cluster
        message_queue_not_finished = MessageQueue.objects.filter(date_add__lt=last_month_date, status="new")

        node_to_remove = []
        for message in message_queue_not_finished:
            if message.node not in node_to_remove:
                node_to_remove.append(message.node)

            message.delete()

        for n in node_to_remove:
            logger.info('[REMOVING DEAD NODE FROM CLUSTER] Node: {}'.format(n.name))
            c = MongoBase()
            c.connect_primary()
            c.repl_remove(n.name + ":9091")

    except Exception as e:
        logger.error("Crontab::check_internal_tasks: {}".format(e), exc_info=1)
        raise
示例#2
0
def rss_fetch():
    if not Cluster.get_current_node().is_master_mongo:
        logger.debug(
            "Crontab::rss_fetch: Not the master node, passing RSS fetch")
        return

    proxy = get_proxy()
    try:
        rss_uri = "https://predator.vultureproject.org/news.json"
        infos = requests.get(rss_uri, proxies=proxy).json()
        logger.debug("Crontab::rss_fetch: Received {} RSS feed".format(
            len(infos)))
        for info in infos:
            try:
                RSS.objects.get(title=info['title'])
            except RSS.DoesNotExist:
                RSS.objects.create(title=info['title'],
                                   date=timezone.make_aware(
                                       datetime.datetime.strptime(
                                           info['timestamp'],
                                           "%d/%m/%Y %H:%M:%S")),
                                   level=info['level'],
                                   content=info["content"])

    except Exception as e:
        logger.error("Crontab::rss_fetch: {}".format(e), exc_info=1)
        raise
示例#3
0
def configure_pstats(node_logger):
    """ Pstats configuration """
    node = Cluster.get_current_node()
    jinja2_env = Environment(loader=FileSystemLoader(JINJA_PATH))
    pstats_template = jinja2_env.get_template("pstats.conf")
    write_conf(node_logger, ["{}/pstats.conf".format(RSYSLOG_PATH),
                             pstats_template.render({'node': node}),
                             RSYSLOG_OWNER, RSYSLOG_PERMS])
    return "Rsyslog configuration 'pstats.conf' written.\n"
示例#4
0
def update_crl():
    """
    :return: Update internal vulture's CRL
    """
    if Cluster.get_current_node().is_master_mongo:
        for cert in X509Certificate.objects.filter(status='V'):
            cert.gen_crl()

    return True
示例#5
0
def cluster_add(request):

    slave_ip = request.POST.get('slave_ip')
    slave_name = request.POST.get('slave_name')

    # FIXME: improve security check (valid IPv4 / IPv6 and valid name)
    if not slave_name or not slave_ip:
        return JsonResponse({'status': False, 'message': 'Invalid call'})
    """ Make the slave_name resolvable """
    node = Cluster.get_current_node()
    node.api_request("toolkit.network.network.make_hostname_resolvable",
                     (slave_name, slave_ip))
    """ Now the slave should be in the cluster:
        Add it's management IP """
    node = Node()
    node.name = slave_name
    node.management_ip = slave_ip
    node.internet_ip = slave_ip
    node.save()

    # We need to wait for the VultureD daemon to reload PF Conf
    time.sleep(6)
    """ Add NEW node into the REPLICASET, as a pending member """
    c = MongoBase()
    c.connect()
    cpt = 0
    response = None
    while not response:
        try:
            logger.debug("Adding {} to replicaset".format(slave_name))
            response = c.repl_add(slave_name + ':9091')
        except Exception as e:
            logger.error(
                "Cannot connect to slave for the moment : {}".format(e))
            cpt += 1
            if cpt > 10:
                logger.error(
                    "Failed to connect to the slave 10 times, aborting.")
                return JsonResponse({
                    'status':
                    False,
                    'message':
                    'Error during repl_add. Check logs'
                })
        logger.info("Waiting for next connection to slave ...")
        time.sleep(1)

    if response:
        node.api_request('toolkit.network.network.refresh_nic')

        return JsonResponse({'status': True, 'message': 'ok'})
    else:
        return JsonResponse({
            'status': False,
            'message': 'Error during repl_add. Check logs'
        })
示例#6
0
def refresh_nic(logger):
    """
    Used by API calls to update mongodb with new system NIC / addresses
    :param node: node
    :return:
    """

    from system.cluster.models import Cluster
    node = Cluster.get_current_node()
    return node.synchronizeNICs()
示例#7
0
def fetch_rules(request):
    node = Cluster.get_current_node()
    if node:
        res = node.api_request("toolkit.yara.yara.fetch_yara_rules")
        if res.get('status'):
            return JsonResponse({
                'status': res.get('status'),
                'message': "successfully started update task"
            })
        else:
            logger.error(res.get('message'))
            return JsonResponse({
                'status': False,
            })
    else:
        return JsonResponse({'status': False, 'error': _('An error occurred')})
def forwards_func(apps, schema_editor):
    logomhiredis_model = apps.get_model("applications", "LogOMHIREDIS")
    db_alias = schema_editor.connection.alias
    node = Cluster.get_current_node()
    # If node not boostrapped
    if not node:
        return
    logomhiredis_objects = logomhiredis_model.objects.using(db_alias)

    try:
        redis_internal_dashboard = logomhiredis_objects.get(
            name="Internal_Dashboard", internal=True)
        node.pstats_forwarders.add(redis_internal_dashboard)
        node.save()
    except:
        print("Internal_Dashboard forwarder not found.")
示例#9
0
def create_snapshot(logger, dataset):
    """
        Destroy the ZFS Snapshot identifier by the given dataset name
    """

    this_node = Cluster.get_current_node()
    now = datetime.datetime.now()
    snapshot = dataset+'@'+this_node.name+'-'+str(now.year)+str(now.month)+str(now.day)+'.'+str(now.hour)+str(now.minute)

    proc = Popen(['/sbin/zfs', 'snapshot', snapshot], stdout=PIPE, stderr=PIPE)
    success, error = proc.communicate()
    if not error:
        logger.info ("ZFS Snapshot {} created".format(snapshot))
        return True
    else:
        logger.error ("ZFS Snapshot {} NOT created. Error is: {}".format(snapshot, error))
        return False
示例#10
0
def vm_update_status():

    proc = Popen(['/usr/local/bin/sudo', '/usr/local/sbin/vm', 'list'],
                 stdout=PIPE,
                 stderr=PIPE)
    success, error = proc.communicate()
    if not error:

        for s in success.decode('utf-8').split('\n'):
            #['Kali', 'default', 'grub', '2', '2G', '-', 'Yes', '[1]', 'Running', '(29774)']
            tmp = " ".join(s.split()).split(" ")
            if tmp[0] == "NAME" or tmp[0] == "":
                continue

            logger.debug("Updating VM {}".format(tmp[0]))
            vm, created = VM.objects.get_or_create(
                node=Cluster.get_current_node(), name=tmp[0])
            vm.name = tmp[0]
            vm.datastore = tmp[1]
            vm.loader = tmp[2]
            vm.cpu = tmp[3]
            vm.ram = tmp[4]
            vm.vnc = tmp[5]
            vm.autostart = tmp[6]
            if tmp[6] == "No":
                vm.status = tmp[7]
            else:
                vm.status = tmp[8]

            vm.save()

        #Delete VM that no longer exists in system
        for vm in VM.objects.all():
            is_good = False
            for s in success.decode('utf-8').split('\n'):
                tmp = " ".join(s.split()).split(" ")
                if tmp[0] == "NAME":
                    continue

                if vm.name == tmp[0]:
                    is_good = True
                    break

            if not is_good:
                logger.info("VM has disapear: {}".format(vm.name))
                vm.delete()
示例#11
0
def build_conf(node_logger, frontend_id):
    """ Generate conf of rsyslog inputs, based on all frontends LOG
    ruleset of frontend
    outputs of all frontends
    :param node_logger: Logger sent to all API requests
    :param frontend_id: The name of the frontend in conf file
    :return:
    """
    result = ""
    node = Cluster.get_current_node()
    reload = False
    """ Firstly, try to retrieve Frontend with given id """
    from services.frontend import models  # because of circular imports

    try:
        frontend = models.Frontend.objects.get(pk=frontend_id)
        """ Generate ruleset conf of asked frontend """
        tmp = frontend.generate_conf()
        if frontend.configuration[node.name] != tmp:
            frontend.configuration[node.name] = tmp
            reload = True
        """ And write-it """

        write_conf(node_logger, [
            frontend.get_filename(), frontend.configuration[node.name],
            models.FRONTEND_OWNER, models.FRONTEND_PERMS
        ])
        result += "Frontend '{}' conf written.\n".format(frontend_id)
    except ObjectDoesNotExist:
        raise VultureSystemError(
            "Frontend with id {} not found, failed to generate conf.".format(
                frontend_id),
            "build HAProxy conf",
            traceback=" ")
    """ Generate inputs configuration """
    service = HaproxyService()
    """ If frontend was given we cannot check if its conf has changed to restart service
     and if reload_conf is True, conf has changed so restart service
    """
    if reload:
        result = "HAProxy conf updated. Restarting service."
        result += service.restart()
    else:
        result += "HAProxy conf hasn't changed."
    return result
示例#12
0
def configure_node(node_logger):
    """ Generate and write netdata conf files """
    result = ""

    node = Cluster.get_current_node()
    global_config = Cluster.get_global_config()
    """ For each Jinja templates """
    jinja2_env = Environment(loader=FileSystemLoader(JINJA_PATH))
    for template_name in jinja2_env.list_templates():
        """ Perform only "rsyslog_template_*.conf" templates """
        match = re_search("^rsyslog_template_([^\.]+)\.conf$", template_name)
        if not match:
            continue
        template = jinja2_env.get_template(template_name)
        template_path = "{}/05-tpl-01-{}.conf".format(RSYSLOG_PATH,
                                                      match.group(1))
        """ Generate and write the conf depending on all nodes, and current node """
        write_conf(node_logger, [
            template_path,
            template.render({
                'node': node,
                'global_config': global_config
            }), RSYSLOG_OWNER, RSYSLOG_PERMS
        ])
        result += "Rsyslog template '{}' written.\n".format(template_path)
    """ PF configuration for Rsyslog """
    pf_template = jinja2_env.get_template("pf.conf")
    write_conf(node_logger, [
        "{}/pf.conf".format(RSYSLOG_PATH),
        pf_template.render({'mongodb_uri': MongoBase.get_replicaset_uri()}),
        RSYSLOG_OWNER, RSYSLOG_PERMS
    ])
    result += "Rsyslog template 'pf.conf' written.\n"
    """ If this method has been called, there is a reason - a Node has been modified
          so we need to restart Rsyslog because at least PF conf has been changed 
    """
    # if Frontend.objects.filter(enable_logging=True).count() > 0:
    #    node_logger.debug("Logging enabled, reload of Rsyslog needed.")
    restart_service(node_logger)
    node_logger.info("Rsyslog service restarted.")
    result += "Rsyslogd service restarted."

    return result
示例#13
0
def configure_node(node_logger):
    """ Generate and write netdata conf files """
    node = Cluster.get_current_node()
    nodes = Node.objects.all()
    """ For each Jinja templates """
    jinja2_env = Environment(loader=FileSystemLoader(JINJA_PATH))
    for template_name in jinja2_env.list_templates():
        template = jinja2_env.get_template(template_name)
        conf_path = CONF_PATH
        """ fping.conf has different directory """
        if template_name != "fping.conf":
            """ Other files than fping are in python.d """
            conf_path += "python.d/"
        """ Generate and write the conf depending on all nodes, and current node """
        write_conf(node_logger, [
            conf_path + template_name,
            template.render({
                'nodes': nodes,
                'node': node
            }), NETDATA_OWNER, NETDATA_PERMS
        ])
示例#14
0
    def get_conf(self):
        """ Generate conf from mongo object and Jinja template
        :return     Generated configuration, to write into file
                    If error, raise ServiceConfigError
        """
        try:
            path_config = os_path.join(settings.BASE_DIR, 'services', 'config')
            jinja2_env = Environment(loader=FileSystemLoader(path_config))

            template = jinja2_env.get_template(self.jinja_template['tpl_name'])

            return template.render({
                'node': Cluster.get_current_node(),
                **self.get_dict_conf()
            })
        except ServiceExit:
            raise
        except Exception as e:
            raise
            logger.error(e)
            raise ServiceConfigError(
                "Failed to generate jinja template: {}".format(str(e)),
                self.service_name)
示例#15
0
def refresh(self):


    """ Refresh ZFS datasets """
    proc = Popen(['/sbin/zfs', 'list', '-t', 'all'], stdout=PIPE)
    proc2 = Popen(['/usr/bin/grep', '-v', '-e', 'MOUNTPOINT', '-e', 'none'], stdin=proc.stdout, stdout=PIPE, stderr=PIPE)
    proc.stdout.close()
    success, error = proc2.communicate()

    if not error:
        for s in success.decode('utf-8').split('\n'):
            tmp = " ".join(s.split()).split(" ")
            try:
                #This is because the last tmp entry is None
                test=tmp[1]

                zfs_dataset, created = ZFS.objects.get_or_create (node=Cluster.get_current_node(), name=tmp[0])
                zfs_dataset.used = tmp[1]
                zfs_dataset.avail = tmp[2]
                zfs_dataset.refer = tmp[3]
                zfs_dataset.mount = tmp[4]
                zfs_dataset.save()
            except:
                continue
示例#16
0
    def reconcile(self):
        node = Cluster.get_current_node()
        if not node.is_master_mongo:
            return False

        mongo = MongoBase()
        if not mongo.connect():
            return False
        mongo.connect_primary()

        redis = RedisBase()
        master_node = redis.get_master()
        redis = RedisBase(node=master_node)

        filepath = ALERTS_FILE

        # Pops alerts produced when vulture was down
        # Do not retry, as there is likely no cache for remaining alerts in current Redis
        self.pops(mongo, redis, filepath, max_tries=1)
        if self.shutdown_flag.is_set():
            return True

        redis_channel = REDIS_CHANNEL
        listener = redis.redis.pubsub()
        listener.subscribe([redis_channel])

        logger.info(
            "Reconcile: start listening {} channel.".format(redis_channel))
        while not self.shutdown_flag.is_set():
            alert = listener.get_message(ignore_subscribe_messages=True,
                                         timeout=2)
            # If we have no messages, alert is None
            if alert:
                # Only use the channel to trigger popping alerts
                self.pops(mongo, redis, filepath)
        return True
示例#17
0
def security_update(node_logger=None, tenant_id=None):
    """
    :return: Update Vulture's security databases
    """
    # Get proxy first
    proxies = get_proxy()
    """ Every node needs to be up2date """
    try:
        logger.info("Crontab::security_update: calling pkg update...")
        res = subprocess.check_output([
            "/usr/local/bin/sudo", "/usr/sbin/pkg", "-ohttp_proxy={}".format(
                proxies.get('http', "")), "-ohttps_proxy={}".format(
                    proxies.get('https', "")), "-oftp_proxy={}".format(
                        proxies.get('ftp', "")), "update"
        ],
                                      stderr=subprocess.PIPE).decode("utf-8")
        if "All repositories are up to date" not in res:
            logger.error("Crontab::security_update: Unable to update pkg")
        else:
            logger.info(
                "Crontab::security_update: All repositories are up to date")
    except subprocess.CalledProcessError as e:
        logger.error("Failed to update pkg packages : {}".format(
            str(e.stderr.decode('utf8'))))
    except Exception as e:
        logger.error("Failed to update pkg packages : {}".format(str(e)))
    """ Do we have something urgent to update ? """
    try:
        logger.info("Crontab::security_update: calling pkg upgrade...")
        res = subprocess.check_output([
            "/usr/local/bin/sudo", "/usr/sbin/pkg", "-ohttp_proxy={}".format(
                proxies.get('http', "")), "-ohttps_proxy={}".format(
                    proxies.get('https', "")), "-oftp_proxy={}".format(
                        proxies.get('ftp', "")), "audit", "-F"
        ],
                                      stderr=subprocess.PIPE).decode('utf8')
        if "0 problem" in res:
            logger.info("Crontab::security_update: No vulnerability found.")
        elif "is vulnerable" in res:
            logger.info(
                "Crontab::security_update: Security problem found : {}".format(
                    res))
            security_alert(
                "Security problem found on node {}".format(get_hostname()),
                "danger", res)
    except subprocess.CalledProcessError as e:
        if e.stdout.decode("utf-8").startswith("0 problem"):
            logger.info("Crontab::security_update: No vulnerability found.")
        elif "is vulnerable" in e.stdout.decode("utf-8"):
            logger.info(
                "Crontab::security_update: Security problem found : {}".format(
                    e.stdout.decode('utf-8')))
            security_alert(
                "Security problem found on node {}".format(get_hostname()),
                "danger", e.stdout.decode("utf-8"))
        else:
            logger.error(
                "Crontab::security_update: Failed to retrieve vulnerabilities : "
                "{}".format(str(e)))
    except Exception as e:
        logger.error(
            "Crontab::security_update: Failed to retrieve vulnerabilities : {}"
            .format(e))

    # If tenant id given, try to retrieve the tenant
    if tenant_id:
        try:
            tenant = Tenants.objects.get(pk=tenant_id)
        except:
            logger.error(
                "Security_update: Failed to retrieve reputation database with asked id {}"
                .format(tenant_id))
            raise Exception("Tenant not found")

    # If it is the master node, retrieve the databases
    if Cluster.get_current_node().is_master_mongo:
        # If tenant id given, retrieve the predator api key
        if tenant_id:
            predator_tokens = [tenant.predator_apikey]
        else:
            predator_tokens = Tenants.objects.mongo_distinct("predator_apikey")
        # Loop over predator api keys configured over Multi-Tenants configs
        for predator_token in predator_tokens:
            """ Download newest reputation databases list """
            try:
                logger.info(
                    "Crontab::security_update: get Vulture's ipsets...")
                infos = requests.get(IPSET_VULTURE + "index.json",
                                     headers={
                                         'Authorization': predator_token
                                     },
                                     proxies=proxies,
                                     timeout=5).json()
            except Exception as e:
                logger.error(
                    "Crontab::security_update: Unable to download Vulture's ipsets: {}"
                    .format(e))
                return False

            infos.append({
                'filename': "firehol_level1.netset",
                'label': "Firehol Level 1 netset",
                'description': "Firehol IPSET Level 1",
                'type': "ipv4_netset",
                'url': IPSET_VULTURE + "firehol_level1.netset"
            })
            infos.append({
                'filename': "vulture-v4.netset",
                'label': "Vulture Cloud IPv4",
                'description': "Vulture Cloud IPv4",
                'type': "ipv4_netset",
                'url': IPSET_VULTURE + "firehol_level1.netset"
            })
            infos.append({
                'filename': "vulture-v6.netset",
                'label': "Vulture Cloud IPv6",
                'description': "Vulture Cloud IPv6",
                'type': "ipv6_netset",
                'url': IPSET_VULTURE + "vulture-v6.netset"
            })

            for info in infos:
                label = info['label']
                description = info['description']
                entry_type = info['type']
                url = info.get('url', IPSET_VULTURE + info['filename'])
                nb_netset = info.get('nb_netset', 0)
                nb_unique = info.get('nb_unique', 0)
                # Add predator api key in filename
                encoded_token = b64encode(
                    predator_token.encode('utf8')).decode('utf8')
                filename = ".".join(info['filename'].split('.')[:-1]) + "_" + encoded_token + "." + \
                           info['filename'].split('.')[-1]
                """ Create/update object """
                try:
                    reputation_ctx = ReputationContext.objects.get(
                        filename=filename)
                except Exception as e:
                    reputation_ctx = ReputationContext(filename=filename)
                reputation_ctx.name = label
                reputation_ctx.url = url
                reputation_ctx.db_type = entry_type
                reputation_ctx.label = label
                reputation_ctx.description = description
                reputation_ctx.nb_netset = nb_netset
                reputation_ctx.nb_unique = nb_unique
                reputation_ctx.internal = True
                # Use predator_apikey only for predator requests
                if "predator.vultureproject.org" in reputation_ctx.url:
                    reputation_ctx.custom_headers = {
                        'Authorization': predator_token
                    }
                else:
                    reputation_ctx.custom_headers = {}
                reputation_ctx.save()
                logger.info("Reputation context {} created.".format(label))

    # On ALL nodes, write databases on disk
    # All internal reputation contexts are retrieved and created if needed
    # We can now download and write all reputation contexts
    # If tenant id given, only write on disk related reputation databases
    if tenant_id:
        encoded_token = b64encode(
            tenant.predator_apikey.encode('utf8')).decode('utf8')
        reputation_ctxs = ReputationContext.mongo_find({
            "enable_hour_download": "true",
            "filename": {
                "$regex": ".*_{}.[a-z]+$".format(encoded_token)
            }
        })
    else:
        reputation_ctxs = ReputationContext.objects.filter(
            enable_hour_download=True)
    for reputation_ctx in reputation_ctxs:
        try:
            content = reputation_ctx.download()
        except VultureSystemError as e:
            if "404" in str(e) or "403" in str(e) and reputation_ctx.internal:
                logger.info(
                    "Security_update::info: Reputation context '{}' is now unavailable ({}). "
                    "Deleting it.".format(str(e), reputation_ctx))
                reputation_ctx.delete()
            else:
                logger.error(
                    "Security_update::error: Failed to download reputation database '{}' : {}"
                    .format(reputation_ctx.name, e))
            continue
        except Exception as e:
            logger.error(
                "Security_update::error: Failed to download reputation database '{}' : {}"
                .format(reputation_ctx.name, e))
            continue
        try:
            tmp_filename = "{}{}".format("/tmp/", get_random_string())
            with open(tmp_filename, "wb") as f:
                f.write(content)
            """ Immediatly reload the rsyslog service to prevent crash on MMDB access """
            # Filename is a variable of us (not injectable)
            reload_rsyslog = subprocess.run([
                '/usr/local/bin/sudo /bin/mv {} {}'
                '&& /usr/local/bin/sudo /usr/sbin/jexec '
                'rsyslog /usr/sbin/service rsyslogd reload'.format(
                    tmp_filename, reputation_ctx.absolute_filename)
            ],
                                            stdout=subprocess.PIPE,
                                            stderr=subprocess.PIPE,
                                            shell=True)
            if reload_rsyslog.returncode == 1:
                if "rsyslogd not running" in reload_rsyslog.stderr.decode(
                        'utf8'):
                    logger.info(
                        "Crontab::security_update: Database written and rsyslogd not runing."
                    )
                else:
                    logger.error(
                        "Crontab::security_update: It seems that the database cannot be written : {}"
                        .format(e))
            elif reload_rsyslog.returncode == 0:
                logger.info(
                    "Crontab::security_update: Database written and rsyslogd reloaded."
                )
            else:
                logger.error(
                    "Crontab::security_update: Database write failure : "
                    "stdout={}, stderr={}".format(
                        reload_rsyslog.stdout.decode('utf8'),
                        reload_rsyslog.stderr.decode('utf8')))
            logger.info(
                "Crontab::security_update: Reputation database named '{}' (file '{}') successfully written."
                .format(reputation_ctx.name, reputation_ctx.absolute_filename))
        except Exception as e:
            logger.error(
                "Security_update::error: Failed to write reputation database '{}' : {}"
                .format(reputation_ctx.name, e))

    logger.info("Security_update done.")

    return True
示例#18
0
# Django setup part
sys.path.append('/home/vlt-os/vulture_os')
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'vulture_os.settings')

import django
from django.conf import settings
django.setup()

from system.cluster.models import Cluster
from darwin.policy.models import FilterPolicy


if __name__ == "__main__":

    node = Cluster.get_current_node()
    if not node:
        print("Current node not found. Maybe the cluster has not been initiated yet.")
    else:
        for filter in FilterPolicy.objects.all():
            filter.config["redis_socket_path"] = "/var/sockets/redis/redis.sock"
            filter.config["alert_redis_list_name"] = "darwin_alerts"
            filter.config["alert_redis_channel_name"] = "darwin.alerts"
            filter.config["log_file_path"] = "/var/log/darwin/alerts.log"

            if filter.filter.name == 'tanomaly':
                if filter.config.get("redis_list_name", None):
                    del filter.config['redis_list_name']
                filter.mmdarwin_enabled = False
                filter.mmdarwin_parameters = []
示例#19
0
def security_update(node_logger=None):
    """
    :return: Update Vulture's security databases
    """
    # Get proxy first
    proxies = get_proxy()
    """ Every node needs to be up2date """
    try:
        logger.info("Crontab::security_update: calling pkg update...")
        res = subprocess.check_output([
            "/usr/local/bin/sudo", "/usr/sbin/pkg", "-ohttp_proxy={}".format(
                proxies.get('http', "")), "-ohttps_proxy={}".format(
                    proxies.get('https', "")), "-oftp_proxy={}".format(
                        proxies.get('ftp', "")), "update"
        ],
                                      stderr=subprocess.PIPE).decode("utf-8")
        if "All repositories are up to date" not in res:
            logger.error("Crontab::security_update: Unable to update pkg")
        else:
            logger.info(
                "Crontab::security_update: All repositories are up to date")
    except subprocess.CalledProcessError as e:
        logger.error("Failed to update pkg packages : {}".format(
            str(e.stderr.decode('utf8'))))
    except Exception as e:
        logger.error("Failed to update pkg packages : {}".format(str(e)))
    """ Do we have something urgent to update ? """
    try:
        logger.info("Crontab::security_update: calling pkg upgrade...")
        res = subprocess.check_output([
            "/usr/local/bin/sudo", "/usr/sbin/pkg", "-ohttp_proxy={}".format(
                proxies.get('http', "")), "-ohttps_proxy={}".format(
                    proxies.get('https', "")), "-oftp_proxy={}".format(
                        proxies.get('ftp', "")), "audit", "-F"
        ],
                                      stderr=subprocess.PIPE).decode('utf8')
        if "0 problem" in res:
            logger.info("Crontab::security_update: No vulnerability found.")
        elif "is vulnerable" in res:
            logger.info(
                "Crontab::security_update: Security problem found : {}".format(
                    res))
            security_alert(
                "Security problem found on node {}".format(get_hostname()),
                "danger", res)
    except subprocess.CalledProcessError as e:
        if e.stdout.decode("utf-8").startswith("0 problem"):
            logger.info("Crontab::security_update: No vulnerability found.")
        elif "is vulnerable" in e.stdout.decode("utf-8"):
            logger.info(
                "Crontab::security_update: Security problem found : {}".format(
                    e.stdout.decode('utf-8')))
            security_alert(
                "Security problem found on node {}".format(get_hostname()),
                "danger", e.stdout.decode("utf-8"))
        else:
            logger.error(
                "Crontab::security_update: Failed to retrieve vulnerabilities : "
                "{}".format(str(e)))
    except Exception as e:
        logger.error(
            "Crontab::security_update: Failed to retrieve vulnerabilities : {}"
            .format(e))

    # If it is the master node, retrieve the databases
    if Cluster.get_current_node().is_master_mongo:
        # Retrieve predator_token
        predator_token = Cluster.get_global_config().predator_apikey
        """ If we are the master node, download newest reputation databases """
        try:
            logger.info("Crontab::security_update: get Vulture's ipsets...")
            infos = requests.get(IPSET_VULTURE + "index.json",
                                 headers={
                                     'Authorization': predator_token
                                 },
                                 proxies=proxies,
                                 timeout=5).json()
        except Exception as e:
            logger.error(
                "Crontab::security_update: Unable to download Vulture's ipsets: {}"
                .format(e))
            return False

        infos.append({
            'filename':
            "GeoLite2-Country.mmdb",
            'label':
            "Geolite2 Country",
            'description':
            "Maxmind DB's Geoip country database",
            'type':
            "GeoIP",
            'url':
            "https://updates.maxmind.com/geoip/databases/GeoLite2-Country/update"
        })
        infos.append({
            'filename':
            "GeoLite2-City.mmdb",
            'label':
            "Geolite2 City",
            'description':
            "Maxmind DB's Geoip city database",
            'type':
            "GeoIP",
            'url':
            "https://updates.maxmind.com/geoip/databases/GeoLite2-City/update"
        })
        infos.append({
            'filename': "firehol_level1.netset",
            'label': "Firehol Level 1 netset",
            'description': "Firehol IPSET Level 1",
            'type': "ipv4_netset",
            'url': IPSET_VULTURE + "firehol_level1.netset"
        })
        infos.append({
            'filename': "vulture-v4.netset",
            'label': "Vulture Cloud IPv4",
            'description': "Vulture Cloud IPv4",
            'type': "ipv4_netset",
            'url': IPSET_VULTURE + "firehol_level1.netset"
        })
        infos.append({
            'filename': "vulture-v6.netset",
            'label': "Vulture Cloud IPv6",
            'description': "Vulture Cloud IPv6",
            'type': "ipv6_netset",
            'url': IPSET_VULTURE + "vulture-v6.netset"
        })

        for info in infos:
            filename = info['filename']
            label = info['label']
            description = info['description']
            entry_type = info['type']
            url = info.get('url', IPSET_VULTURE + filename)
            nb_netset = info.get('nb_netset', 0)
            nb_unique = info.get('nb_unique', 0)
            """ Create/update object """
            try:
                reputation_ctx = ReputationContext.objects.get(
                    filename=filename)
            except Exception as e:
                reputation_ctx = ReputationContext(filename=filename)
            reputation_ctx.name = label
            reputation_ctx.url = url
            reputation_ctx.db_type = entry_type
            reputation_ctx.label = label
            reputation_ctx.description = description
            reputation_ctx.nb_netset = nb_netset
            reputation_ctx.nb_unique = nb_unique
            reputation_ctx.internal = True
            # Use predator_apikey only for predator requests
            if "predator.vultureproject.org" in reputation_ctx.url:
                reputation_ctx.custom_headers = {
                    'Authorization': predator_token
                }
            else:
                reputation_ctx.custom_headers = {}
            reputation_ctx.save()
            logger.info("Reputation context {} created.".format(label))

    # On ALL nodes, write databases on disk
    # All internal reputation contexts are retrieved and created if needed
    # We can now download and write all reputation contexts
    for reputation_ctx in ReputationContext.objects.all():
        try:
            content = reputation_ctx.download()
        except Exception as e:
            logger.error(
                "Security_update::error: Failed to download reputation database '{}' : {}"
                .format(reputation_ctx.name, e))
            continue
        try:
            tmp_filename = "{}{}".format("/tmp/", get_random_string())
            with open(tmp_filename, "wb") as f:
                f.write(content)
            """ Immediatly reload the rsyslog service to prevent crash on MMDB access """
            # Filename is a variable of us (not injectable)
            reload_rsyslog = subprocess.run([
                '/usr/local/bin/sudo /bin/mv {} {}'
                '&& /usr/local/bin/sudo /usr/sbin/jexec '
                'rsyslog /usr/sbin/service rsyslogd reload'.format(
                    tmp_filename, reputation_ctx.absolute_filename)
            ],
                                            stdout=subprocess.PIPE,
                                            stderr=subprocess.PIPE,
                                            shell=True)
            if reload_rsyslog.returncode == 1:
                if "rsyslogd not running" in reload_rsyslog.stderr.decode(
                        'utf8'):
                    logger.info(
                        "Crontab::security_update: Database written and rsyslogd not runing."
                    )
                else:
                    logger.error(
                        "Crontab::security_update: It seems that the database cannot be written : {}"
                        .format(e))
            elif reload_rsyslog.returncode == 0:
                logger.info(
                    "Crontab::security_update: Database written and rsyslogd reloaded."
                )
            else:
                logger.error(
                    "Crontab::security_update: Database write failure : "
                    "stdout={}, stderr={}".format(
                        reload_rsyslog.stdout.decode('utf8'),
                        reload_rsyslog.stderr.decode('utf8')))
            logger.info(
                "Crontab::security_update: Reputation database named '{}' (file '{}') successfully written."
                .format(reputation_ctx.name, reputation_ctx.absolute_filename))
        except Exception as e:
            logger.error(
                "Security_update::error: Failed to write reputation database '{}' : {}"
                .format(reputation_ctx.name, e))

    logger.info("Security_update done.")

    return True
示例#20
0
def address_cleanup(logger):
    """
        Call ifconfig DOWN and delete useless network interfaces

    :param logger: A logger handler
    :return: True / False
    """

    from system.cluster.models import Cluster, NetworkInterfaceCard
    node = Cluster.get_current_node()

    ret = True
    for nic in NetworkInterfaceCard.objects.filter(node=node):
        """ Do an ifconfig delete on running IP Address that
        do not exists anymore """
        for addr in nic.get_running_addresses():
            ip, prefix = addr.split("/")
            if ":" in ip:
                family = "inet6"
            else:
                family = "inet"

            found = False
            for netif in node.addresses(nic):
                if netif.ip == ip:
                    if "/" in netif.prefix_or_netmask:
                        netif.prefix = netif.prefix_or_netmask[1:]
                    else:
                        netif.prefix = netmask2prefix(netif.prefix_or_netmask)
                        if netif.prefix == 0:
                            netif.prefix = netif.prefix_or_netmask

                    if netif.family == "inet" and str(
                            netif.prefix) == str(prefix):
                        logger.debug(
                            "Node::address_cleanup(): IPv4 {}/{} has been found on {}"
                            .format(ip, prefix, nic.dev))
                        found = True
                    elif netif.family == "inet6" and str(prefix) == str(
                            netif.prefix_or_netmask):
                        logger.debug(
                            "Node::address_cleanup(): IPv6 {}/{} has been found on {}"
                            .format(ip, prefix, nic.dev))
                        found = True
            """ IP Address not found: Delete it """
            if not found:
                logger.info(
                    "Node::address_cleanup(): Deleting {}/{} on {}".format(
                        ip, prefix, nic.dev))

                logger.debug(
                    'Node::address_cleanup() /usr/local/bin/sudo /sbin/ifconfig {} {} {} delete'
                    .format(nic.dev, family,
                            str(ip) + "/" + str(prefix)))

                proc = subprocess.Popen([
                    '/usr/local/bin/sudo', '/sbin/ifconfig', nic.dev, family,
                    str(ip) + "/" + str(prefix), 'delete'
                ],
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.PIPE)

                success, error = proc.communicate()
                if error:
                    logger.error("Node::address_cleanup(): {}".format(
                        str(error)))
                """ Delete the permanent configuration file """
                for conf in glob.glob("/etc/rc.conf.d/netaliases*"):
                    with open(conf, 'r') as f:
                        delete = False
                        for line in f:
                            if nic.dev in line and str(ip) + "/" + str(
                                    prefix) in line and family in line:
                                delete = True
                                logger.debug(
                                    "Node::address_cleanup(): Line to delete {}"
                                    .format(line))

                    if delete:
                        logger.info(
                            "Node::address_cleanup(): Deleting {}".format(
                                conf))
                        proc = subprocess.Popen(
                            ['/usr/local/bin/sudo', '/bin/rm', conf],
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE)
                        success, error = proc.communicate()
                        if error:
                            logger.error("Node::address_cleanup(): {}".format(
                                str(error)))
                            ret = False
                            continue
                        else:
                            logger.info(
                                "Node::address_cleanup() {}: Ok".format(conf))

    return ret
示例#21
0
def write_network_config(logger):
    """ Synchronize network configuration on disk

     :param logger: A logger handler
    :param netif_id: The _id of the related Network Address we are working on
    :return: True / False
    """

    from system.cluster.models import (Cluster, NetworkInterfaceCard,
                                       NetworkAddressNIC)
    node = Cluster.get_current_node()

    ret = True

    i = 0
    has_system = False
    for nic in NetworkInterfaceCard.objects.filter(node=node):
        j = 0
        for address_nic in NetworkAddressNIC.objects.filter(nic=nic):
            address = address_nic.network_address

            # Aliases address
            if address.is_system is False:
                config = address.rc_config(nic.dev).format(j)
                args = 'netaliases{}{}'.format(i, j)
                loop = "0"
            else:
                # System address
                config = address.rc_config(nic.dev, True)
                args = 'network'
                if not has_system:
                    loop = "0"
                    has_system = True
                else:
                    loop = str(i + j)

            try:
                proc = subprocess.Popen([
                    '/usr/local/bin/sudo',
                    '/home/vlt-os/scripts/write_netconfig.sh', config, args,
                    loop
                ],
                                        stdout=subprocess.PIPE,
                                        stderr=subprocess.PIPE)
                success, error = proc.communicate()

                if error:
                    logger.error(
                        "Node::write_network_config() {}:{}: {}".format(
                            nic.dev, address.ip_cidr, str(error)))
                    ret = False
                    continue
                else:
                    j = j + 1
                    logger.info(
                        "Node::write_network_config() {}:{}: Ok".format(
                            nic.dev, address.ip_cidr))
                    continue

            except Exception as e:
                logger.error("Node::write_network_config(): {}".format(str(e)))
                ret = False
                continue

        i = i + 1
    """ Network IP address are configured: Handle routing """
    config = "_EOL"
    config += "gateway_enable=\"YES\" _EOL"
    config += "ipv6_gateway_enable=\"YES\" _EOL"
    if node.gateway:
        config += "defaultrouter=\"{}\" _EOL".format(node.gateway)

    if node.gateway_ipv6:
        config += "ipv6_defaultrouter=\"{}\" _EOL".format(node.gateway_ipv6)

    if node.static_routes:
        config += "{}_EOL".format(node.static_routes.replace("\n", "_EOL"))

    try:
        proc = subprocess.Popen([
            '/usr/local/bin/sudo', '/home/vlt-os/scripts/write_netconfig.sh',
            config, 'routing',
            str(0)
        ],
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
        success, error = proc.communicate()

        if error:
            logger.error("Node::write_network_config(routing) : {}".format(
                str(error)))
            ret = False
        else:
            logger.info("Node::write_network_config(routing) : Ok")

    except Exception as e:
        logger.error("Node::write_network_config(routing): {}".format(str(e)))
        ret = False

    return ret
示例#22
0
def monitor():

    node = Cluster.get_current_node()

    def get_service_status(service_class):
        """ Get a service_class (eg HaproxyService) 
        :return  a dict {'name':service_name, 'status': status} """
        service_inst = service_class()
        service_status = ServiceStatus.objects.filter(name=service_inst.service_name).first() \
                         or ServiceStatus(name=service_inst.service_name)
        service_status.status = service_inst.status()[0]
        service_status.friendly_name = service_inst.friendly_name
        service_status.save()
        return service_status

    """ Initialize date and Monitor object """
    date = datetime.now().strftime('%Y-%m-%d %H:%M:00')
    date = datetime.strptime(date, '%Y-%m-%d %H:%M:00')
    mon = Monitor(date=make_aware(date), node=node)
    mon.services_id = set()

    for service in [
            HaproxyService, DarwinService, PFService, NetdataService,
            StrongswanService, OpenvpnService, RsyslogService
    ]:

        # Get some statuses outside for reusing variable later
        if service == StrongswanService:
            strongswan_status = get_service_status(StrongswanService)
        elif service == OpenvpnService:
            openvpn_status = get_service_status(OpenvpnService)
        elif service == RsyslogService:
            rsyslogd_status = get_service_status(RsyslogService)

        mon.services.add(get_service_status(service))
    """ Get status of Redis, Mongod and Sshd """
    # Instantiate mother class to get status easily
    service = Service()
    for service_name in ("redis", "mongod", "sshd"):
        service_status = ServiceStatus.objects.filter(name=service_name).first() \
                         or ServiceStatus(name=service_name)
        service_status.status = service.status(service_name)[0]
        mon.services.add(service_status)

    mon.save()
    """ HAPROXY """
    frontends = Frontend.objects.all().only('name', 'status', 'enabled',
                                            'mode', 'listening_mode')
    backends = Backend.objects.all().only('name', 'status', 'enabled')
    if frontends.count() > 0 or backends.count() > 0:
        statuses = {}
        try:
            # Return a dict { frontend_name: frontend_status, backend_name: backend_status, ... }
            statuses = get_stats()

        except ServiceError as e:
            logger.error(str(e))
        except Exception as e:
            logger.error("Failed to retrieve status of HAProxy: {}".format(
                str(e)))
            logger.exception(e)
        """ FRONTENDS """
        for frontend in frontends:
            if node in frontend.get_nodes():
                if not frontend.enabled:
                    status = "DISABLED"
                elif frontend.rsyslog_only_conf:
                    status = {
                        'UP': "OPEN",
                        'DOWN': "STOP"
                    }.get(rsyslogd_status.status, rsyslogd_status.status)
                else:
                    status = statuses.get("FRONTEND",
                                          {}).get(frontend.name, "ERROR")
                logger.debug("Status of frontend '{}': {}".format(
                    frontend.name, status))

                if status != frontend.status.get(node.name):
                    frontend.status[node.name] = status
                    frontend.save()

            elif frontend.status.get(node.name):
                frontend.status.pop(node.name, None)
                frontend.save()
        """ BACKENDS """
        for backend in backends:
            status = "DISABLED" if not backend.enabled else statuses.get(
                "BACKEND", {}).get(backend.name, "ERROR")
            logger.debug("Status of backend '{}': {}".format(
                backend.name, status))
            if backend.status[node.name] != status:
                backend.status[node.name] = status
                backend.save()
    """ STRONGSWAN """
    try:
        strongswan = Strongswan.objects.get(node=node)

    except ObjectDoesNotExist:
        # If there is no IPSEC conf on that node, pass
        pass
    else:
        default = ("STOP", "")

        try:
            statusall, tunnel_statuses, ups, connectings = get_ipsec_tunnels_stats(
            )
        except ServiceError as e:
            logger.exception(e)
            default = ("ERROR", str(e))
            statusall, tunnel_statuses, ups, connectings = "ERROR", {}, 0, 0

        for network in strongswan.ipsec_rightsubnet.split(','):
            strongswan.tunnels_status[network] = tunnel_statuses.get(
                network, default)
            logger.debug("Status of IPSEC Tunnel '{}' : {}".format(
                network, strongswan.tunnels_status[network]))

        strongswan.status = strongswan_status.status
        strongswan.statusall = statusall
        strongswan.tunnels_up = ups
        strongswan.tunnels_connecting = connectings
        strongswan.save()
    """ OPENVPN """
    try:
        openvpn = Openvpn.objects.get(node=node)
    except ObjectDoesNotExist:
        # If there is no VPNSSL conf on that node, pass
        pass
    else:

        openvpn.tunnels_status = get_ssl_tunnels_stats()
        openvpn.status = openvpn_status.status
        openvpn.save()
    """ DARWIN """
    filters = FilterPolicy.objects.all()
    if filters.count() > 0:
        filter_statuses = {}
        default = "DOWN"
        try:
            filter_statuses = monitor_darwin_filters()

        except ServiceError as e:
            logger.error(str(e))
            default = "ERROR"

        for dfilter in filters:
            dfilter.status[node.name] = default

            filter_status = filter_statuses.get(dfilter.name, False)
            if not dfilter.enabled:
                dfilter.status[node.name] = "DISABLED"
            elif filter_status is None:
                dfilter.status[node.name] = "ERROR"
            elif filter_statuses.get(dfilter.name,
                                     {}).get('status') is not None:
                dfilter.status[node.name] = filter_statuses.get(
                    dfilter.name).get('status').upper()
            dfilter.save()

    # Delete old monitoring
    last_date = (timezone.now() - timedelta(days=30))
    for m in Monitor.objects.filter(date__lte=last_date):
        m.delete()

    #Update Bhyve status
    vm_update_status()

    return True
示例#23
0
 def try_compile(self):
     node = Cluster.get_current_node()
     if node:
         node.api_request("toolkit.yara.yara.try_compile_yara_rules",
                          self.id)