示例#1
0
def access_control_get(request):
    try:
        log_id = request.POST.get('log_id')

        if log_id:
            client = MongoBase()
            log_line = client.execute_request("logs",
                                              "haproxy",
                                              {'_id': ObjectId(log_id)},
                                              first=True)
            log_line['_id'] = str(log_line['_id'])

            return JsonResponse({'status': True, 'log_line': log_line})

        pk = request.POST.get('pk')
        if pk:
            try:
                acl = AccessControl.objects.get(pk=pk).to_template()
            except AccessControl.DoesNotExist:
                return JsonResponse({
                    'status': False,
                    'error': _('ACL does not exist')
                })
        else:
            acl = [a.to_template() for a in AccessControl.objects.all()]

        return JsonResponse({'status': True, 'acl': acl})

    except Exception as e:
        logger.critical(e, exc_info=1)
        if settings.DEV_MODE:
            raise

        return JsonResponse({'status': False, 'error': _('An error occurred')})
示例#2
0
def cluster_add(request):

    slave_ip = request.POST.get('slave_ip')
    slave_name = request.POST.get('slave_name')

    # FIXME: improve security check (valid IPv4 / IPv6 and valid name)
    if not slave_name or not slave_ip:
        return JsonResponse({'status': False, 'message': 'Invalid call'})
    """ Make the slave_name resolvable """
    node = Cluster.get_current_node()
    node.api_request("toolkit.network.network.make_hostname_resolvable",
                     (slave_name, slave_ip))
    """ Now the slave should be in the cluster:
        Add it's management IP """
    node = Node()
    node.name = slave_name
    node.management_ip = slave_ip
    node.internet_ip = slave_ip
    node.save()

    # We need to wait for the VultureD daemon to reload PF Conf
    time.sleep(6)
    """ Add NEW node into the REPLICASET, as a pending member """
    c = MongoBase()
    c.connect()
    cpt = 0
    response = None
    while not response:
        try:
            logger.debug("Adding {} to replicaset".format(slave_name))
            response = c.repl_add(slave_name + ':9091')
        except Exception as e:
            logger.error(
                "Cannot connect to slave for the moment : {}".format(e))
            cpt += 1
            if cpt > 10:
                logger.error(
                    "Failed to connect to the slave 10 times, aborting.")
                return JsonResponse({
                    'status':
                    False,
                    'message':
                    'Error during repl_add. Check logs'
                })
        logger.info("Waiting for next connection to slave ...")
        time.sleep(1)

    if response:
        node.api_request('toolkit.network.network.refresh_nic')

        return JsonResponse({'status': True, 'message': 'ok'})
    else:
        return JsonResponse({
            'status': False,
            'message': 'Error during repl_add. Check logs'
        })
def remove_session_and_logs_filters(apps, schema_editor):
    # Manually delete all Darwin filters to prevent migration issue, they will be re-created in loaddata
    m = MongoBase()
    m.connect_primary()
    # If the node is not yet installed, no need to drop collections
    if m.db and m.db['vulture']:
        coll = m.db['vulture']['darwin_filterpolicy']
        if coll:
            coll.delete_many({})
        coll = m.db['vulture']['darwin_darwinfilter']
        if coll:
            coll.delete_many({})
示例#4
0
    def is_standalone(self):
        """
        Check if the current Node is a member of mongoDB
        :return: True / False, or None in case of a failure
        """
        c = MongoBase()
        ok = c.connect()
        if ok:
            c.connect_primary()
            config = c.db.admin.command("replSetGetConfig")['config']
            return len(config['members']) == 1

        return True
示例#5
0
    def __init__(self, type_logs):
        """
        Open the specified file and use it as the stream for logging.
        """
        # keep the absolute path, otherwise derived classes which use this
        # may come a cropper when the current directory changes
        self._name = "Database Handler"
        self.filters = []
        self.lock = None

        self.database = "logs"
        self.collection = "internal"

        self.mongo = MongoBase()
示例#6
0
def reconcile():
    # MONGO #
    m = MongoBase()
    if not m.connect():
        return False
    m.connect_primary()

    # REDIS #
    r = RedisBase()
    master_node = r.get_master()
    r = RedisBase(node=master_node)

    redis_list_name = "logs_darwin"
    ignored_alerts = list()

    rangeLen = r.redis.llen(redis_list_name)
    alerts = r.redis.lrange(redis_list_name, "0", str(rangeLen - 1))
    r.redis.ltrim(redis_list_name, str(rangeLen), "-1")

    for alert in alerts:
        alert = str(alert, "utf-8")
        a = json.loads(alert)
        evt_id = a.get("evt_id")
        if evt_id is None:
            ignored_alerts.append(a)
            continue
        query = {"darwin_id": evt_id}
        newvalue = {
            "$set": {
                "darwin_alert_details": a,
                "darwin_is_alert": True
            }
        }
        m.update_one("logs", query, newvalue)
    return True
示例#7
0
    def is_master_mongo(self):
        """
        Check if the current Node is master or not
        :return: True / False, or None in case of a failure
        """
        c = MongoBase()
        ok = c.connect()
        if ok:
            primary_node = c.get_primary()
        else:
            return None

        if ok and primary_node == self.name + ':9091':
            return True
        elif ok:
            return False

        return None
示例#8
0
def cluster_join(request, object_id, api=False):
    """ Join a node into the MongoDB replicaset
    This is an API request
    """
    if not request.is_ajax() and not api:
        return HttpResponseBadRequest()

    try:
        node_model = Node.objects.get(pk=object_id)
    except ObjectDoesNotExist:
        return HttpResponseForbidden("Injection detected")

    c = MongoBase()
    c.connect()

    # Automagically connect to the primary node
    status, message = c.repl_add(node_model.name + ':9091')

    return JsonResponse({'status': status, 'message': message})
def check_internal_tasks():
    try:
        # Run this crontab only on master node
        node = Cluster.get_current_node()
        if not node.is_master_mongo:
            return

        # Deleting done internal tasks older than a month
        last_month_date = make_aware(datetime.datetime.now() - datetime.timedelta(days=30))
        MessageQueue.objects.filter(status="done", date_add__lte=last_month_date).delete()

        # Checking if a node has not executing his duty since a while.
        # If so, removing it from the cluster
        message_queue_not_finished = MessageQueue.objects.filter(date_add__lt=last_month_date, status="new")

        node_to_remove = []
        for message in message_queue_not_finished:
            if message.node not in node_to_remove:
                node_to_remove.append(message.node)

            message.delete()

        for n in node_to_remove:
            logger.info('[REMOVING DEAD NODE FROM CLUSTER] Node: {}'.format(n.name))
            c = MongoBase()
            c.connect_primary()
            c.repl_remove(n.name + ":9091")

    except Exception as e:
        logger.error("Crontab::check_internal_tasks: {}".format(e), exc_info=1)
        raise
示例#10
0
def cluster_stepdown(request, object_id, api=False):
    """
    Ask the primary node to become secondary so that a promotion can occur
    This is an API request
    :param request:
    :param object_id:
    :param api:
    :return:
    """
    if not request.is_ajax() and not api:
        return HttpResponseBadRequest()

    try:
        node_model = Node.objects.get(pk=object_id)
    except ObjectDoesNotExist:
        if api:
            return JsonResponse({'error': _("Object does not exist.")},
                                status=404)
        return HttpResponseForbidden("Injection detected")

    c = MongoBase()
    c.connect()

    # If the asked node is not primary, return error
    if c.get_primary() != node_model.name + ":9091":
        return JsonResponse({
            'status': False,
            'error': _("Cannot step down a non-primary node.")
        })

    status, message = c.repl_set_step_down(
    )  # Automagically connect to the primary node

    return JsonResponse({'status': status, 'message': message})
示例#11
0
    def __init__(self, params):
        super().__init__()

        self.type_logs = params['type_logs']
        self.rules = params['rules']
        self.frontend_name = params.get('frontend_name')
        self.frontend = params.get('frontend')
        self.columns = params.get('columns')

        self.start = params.get('start')
        self.length = params.get('length')

        self.sorting = "time"
        self.type_sorting = 1
        if self.columns:
            self.sorting = self.columns[params['sorting']]
            self.type_sorting = self.TYPE_SORTING[params['type_sorting']]

        self.startDate = datetime.datetime.strptime(params['startDate'],
                                                    "%Y-%m-%dT%H:%M:%S%z")

        self.endDate = datetime.datetime.strptime(params['endDate'],
                                                  "%Y-%m-%dT%H:%M:%S%z")

        self.time_field = self.TIME_FIELD[self.type_logs]

        type_logs = self.type_logs
        if self.frontend:
            if self.frontend.mode == 'tcp':
                type_logs += "_" + self.frontend.mode

        if type_logs == "message_queue":
            self.DATABASE = const.MESSAGE_QUEUE_DATABASE
        else:
            self.DATABASE = const.LOGS_DATABASE

        self.COLLECTION = self.COLLECTIONS_NAME[type_logs]
        self.client = MongoBase()
示例#12
0
def update_repo_attributes(apps, schema_editor):
    # Manually delete all Darwin filters to prevent migration issue, they will be re-created in loaddata
    m = MongoBase()
    m.connect_primary()
    # If the node is not yet installed, no need to drop collections
    if m.db and m.db['vulture']:
        coll = m.db['vulture']['authentication_userauthentication']
        if coll:
            for portal in coll.find():
                repo_attributes = [{
                    'condition_var_kind': "constant",
                    'condition_var_name': "1",
                    'condition_criterion': "equals",
                    'condition_match': "1",
                    'action_var_name': r['key'],
                    'action_var_kind': r['source_attr'],
                    'action_var': r['key']
                } for r in portal.get('repo_attributes', [])]
                coll.update_one({'id': portal['id']},
                                {'$set': {
                                    'repo_attributes': repo_attributes
                                }})
                print("Portal {} updated".format(portal['name']))
示例#13
0
    def reconcile(self):
        node = Cluster.get_current_node()
        if not node.is_master_mongo:
            return False

        mongo = MongoBase()
        if not mongo.connect():
            return False
        mongo.connect_primary()

        redis = RedisBase()
        master_node = redis.get_master()
        redis = RedisBase(node=master_node)

        filepath = ALERTS_FILE

        # Pops alerts produced when vulture was down
        # Do not retry, as there is likely no cache for remaining alerts in current Redis
        self.pops(mongo, redis, filepath, max_tries=1)
        if self.shutdown_flag.is_set():
            return True

        redis_channel = REDIS_CHANNEL
        listener = redis.redis.pubsub()
        listener.subscribe([redis_channel])

        logger.info(
            "Reconcile: start listening {} channel.".format(redis_channel))
        while not self.shutdown_flag.is_set():
            alert = listener.get_message(ignore_subscribe_messages=True,
                                         timeout=2)
            # If we have no messages, alert is None
            if alert:
                # Only use the channel to trigger popping alerts
                self.pops(mongo, redis, filepath)
        return True
示例#14
0
def configure_node(node_logger):
    """ Generate and write netdata conf files """
    result = ""

    node = Cluster.get_current_node()
    global_config = Cluster.get_global_config()
    """ For each Jinja templates """
    jinja2_env = Environment(loader=FileSystemLoader(JINJA_PATH))
    for template_name in jinja2_env.list_templates():
        """ Perform only "rsyslog_template_*.conf" templates """
        match = re_search("^rsyslog_template_([^\.]+)\.conf$", template_name)
        if not match:
            continue
        template = jinja2_env.get_template(template_name)
        template_path = "{}/05-tpl-01-{}.conf".format(RSYSLOG_PATH,
                                                      match.group(1))
        """ Generate and write the conf depending on all nodes, and current node """
        write_conf(node_logger, [
            template_path,
            template.render({
                'node': node,
                'global_config': global_config
            }), RSYSLOG_OWNER, RSYSLOG_PERMS
        ])
        result += "Rsyslog template '{}' written.\n".format(template_path)
    """ PF configuration for Rsyslog """
    pf_template = jinja2_env.get_template("pf.conf")
    write_conf(node_logger, [
        "{}/pf.conf".format(RSYSLOG_PATH),
        pf_template.render({'mongodb_uri': MongoBase.get_replicaset_uri()}),
        RSYSLOG_OWNER, RSYSLOG_PERMS
    ])
    result += "Rsyslog template 'pf.conf' written.\n"
    """ If this method has been called, there is a reason - a Node has been modified
          so we need to restart Rsyslog because at least PF conf has been changed 
    """
    # if Frontend.objects.filter(enable_logging=True).count() > 0:
    #    node_logger.debug("Logging enabled, reload of Rsyslog needed.")
    restart_service(node_logger)
    node_logger.info("Rsyslog service restarted.")
    result += "Rsyslogd service restarted."

    return result
示例#15
0
def set_enrichment_tags(apps, schema_editor):
    mongo = MongoBase()
    if not mongo.connect():
        print("[ERROR] could not connect to mongo to update data !!")
        return
    if not mongo.connect_primary():
        print(
            "[ERROR] could not connect to mongo primary, please reload migration"
        )
        return

    mongo.update_many('vulture', 'darwin_filterpolicy', {},
                      {"$set": {
                          "enrichment_tags": []
                      }})
示例#16
0
class DatabaseHandler(logging.StreamHandler):
    """
    A handler class which writes formatted logging records to disk files.
    """
    def __init__(self, type_logs):
        """
        Open the specified file and use it as the stream for logging.
        """
        # keep the absolute path, otherwise derived classes which use this
        # may come a cropper when the current directory changes
        self._name = "Database Handler"
        self.filters = []
        self.lock = None

        self.database = "logs"
        self.collection = "internal"

        self.mongo = MongoBase()

    def emit(self, record):
        """
        Emit a record.
        Save the log into the repository
        """

        if record.levelname != "ERROR":
            return
        try:

            return self.mongo.insert(
                self.database, self.collection, {
                    'timestamp': timezone.now(),
                    'log_level': record.levelname,
                    'filename': record.filename,
                    'message': record.msg,
                    'source': record.name,
                    'node': get_hostname()
                })

        except Exception:
            pass
示例#17
0
 def set_logs_ttl(self):
     """ Set keep-time of internal logs database
          by setting MongoDB indexes on PF, messageQueues and Internal logs
     """
     # Connect to mongodb
     mongo = MongoBase()
     mongo.connect()
     # Call the current node, it will connect to primary automatically
     res, mess = mongo.set_index_ttl("logs", "pf", "time", self.logs_ttl)
     if not res:
         return res, mess
     res, mess = mongo.set_index_ttl("logs", "internal", "timestamp",
                                     self.logs_ttl)
     if not res:
         return res, mess
     res, mess = mongo.set_index_ttl("vulture", "system_messagequeue",
                                     "modified", self.logs_ttl)
     if not res:
         return res, mess
     res, mess = mongo.set_index_ttl("vulture", "system_messagequeue",
                                     "date_add", self.logs_ttl)
     if not res:
         return res, mess
     return True, ""
示例#18
0
class LogViewerMongo:

    COLLECTIONS_NAME = {
        'pf': 'pf',
        'internal': 'internal',
        'access': 'haproxy',
        'access_tcp': 'haproxy_tcp',
        'impcap': 'impcap',
        'darwin': DARWIN_MONGO_COLLECTION,
        'message_queue': 'system_messagequeue'
    }

    TIME_FIELD = {
        'pf': 'time',
        'access': 'time',
        'access_tcp': 'time',
        'internal': 'timestamp',
        'impcap': 'time',
        'darwin': 'alert_time',
        'message_queue': 'date_add'
    }

    TYPE_SORTING = {'asc': 1, 'desc': -1}

    def __init__(self, params):
        super().__init__()

        self.type_logs = params['type_logs']
        self.rules = params['rules']
        self.frontend_name = params.get('frontend_name')
        self.frontend = params.get('frontend')
        self.columns = params.get('columns')

        self.start = params.get('start')
        self.length = params.get('length')

        self.sorting = "time"
        self.type_sorting = 1
        if self.columns:
            self.sorting = self.columns[params['sorting']]
            self.type_sorting = self.TYPE_SORTING[params['type_sorting']]

        self.startDate = datetime.datetime.strptime(params['startDate'],
                                                    "%Y-%m-%dT%H:%M:%S%z")

        self.endDate = datetime.datetime.strptime(params['endDate'],
                                                  "%Y-%m-%dT%H:%M:%S%z")

        self.time_field = self.TIME_FIELD[self.type_logs]

        type_logs = self.type_logs
        if self.frontend:
            if self.frontend.mode == 'tcp':
                type_logs += "_" + self.frontend.mode

        if type_logs == "message_queue":
            self.DATABASE = const.MESSAGE_QUEUE_DATABASE
        else:
            self.DATABASE = const.LOGS_DATABASE

        self.COLLECTION = self.COLLECTIONS_NAME[type_logs]
        self.client = MongoBase()

    def _prepare_search(self):
        startDate = self.startDate
        endDate = self.endDate

        query = {self.time_field: {'$gte': startDate, '$lte': endDate}}

        if self.frontend_name:
            query.update({'frontend_name': self.frontend_name})

        if self.rules and (len(self.rules.get('$and', []))
                           or len(self.rules.get('$or', []))):
            query.update(self.rules)

        logger.debug(query)
        return query

    def search(self):
        self.query = self._prepare_search()

        nb_res, results = self.client.execute_request(
            database=self.DATABASE,
            collection=self.COLLECTION,
            query=self.query,
            start=self.start,
            length=self.length,
            sorting=self.sorting,
            type_sorting=self.type_sorting)

        data = []
        for i, res in enumerate(results):
            res['_id'] = str(res['_id'])

            if 'timestamp_app' in res.keys():
                try:
                    res['timestamp_app'] = datetime.datetime.utcfromtimestamp(
                        float(res['timestamp_app']))
                except ValueError:
                    pass

            if 'unix_timestamp' in res.keys():
                res['unix_timestamp'] = datetime.datetime.utcfromtimestamp(
                    float(res['unix_timestamp']))

            # FIXME Temporary darwin details aggregation
            for darwin_filter_details in [
                    'yara_match', 'anomaly', 'connection', 'domain', 'host'
            ]:
                if darwin_filter_details in res.keys():
                    res['details'] = res[darwin_filter_details]
                    break

            # FIXME Temporary aggregation for DGA certitude
            if "dga_prob" in res.keys():
                res['certitude'] = res['dga_prob']

            for c in self.columns:
                if not res.get(c):
                    res[c] = ""

            data.append(res)

        return nb_res, data

    def graph(self):
        self.query = self._prepare_search()

        match = {"$match": self.query}

        src_ip = "$" + const.MAPPING_GRAPH[self.type_logs]['src_ip']
        dst_ip = "$" + const.MAPPING_GRAPH[self.type_logs]['dst_ip']

        agg = {
            "$group": {
                "_id": {
                    "src_ip": src_ip,
                    "dst_ip": dst_ip,
                },
                "count": {
                    '$sum': 1
                }
            }
        }

        tmp_data = self.client.execute_aggregation(database=self.DATABASE,
                                                   collection=self.COLLECTION,
                                                   agg=[match, agg])

        data = []
        for d in tmp_data:
            try:
                data.append({
                    'src_ip': d['_id']['src_ip'],
                    'dst_ip': d['_id']['dst_ip'],
                    'count': d['count'],
                })
            except KeyError:
                pass

        return data

    def timeline(self):
        delta = self.endDate - self.startDate
        nb_min = delta.seconds / 60
        nb_hour = nb_min / 60

        logger.debug("days: {}".format(delta.days))
        logger.debug("hour: {}".format(nb_hour))
        logger.debug("min: {}".format(nb_min))
        logger.debug("seconds: {}".format(delta.seconds))

        match = {"$match": self.query}

        agg = {
            '$group': {
                "_id": {
                    'year': {
                        '$year': '${}'.format(self.time_field)
                    },
                    'month': {
                        '$month': '${}'.format(self.time_field)
                    },
                    'dayOfMonth': {
                        '$dayOfMonth': '${}'.format(self.time_field)
                    }
                },
                "count": {
                    "$sum": 1
                }
            }
        }

        if delta.days > 1:
            agg_by = "day"
        else:
            if nb_min > 1000:
                agg_by = "hour"
                agg['$group']['_id']['hour'] = {
                    '$hour': '${}'.format(self.time_field)
                }
            else:
                agg_by = "minute"
                agg['$group']['_id']['hour'] = {
                    '$hour': '${}'.format(self.time_field)
                }
                agg['$group']['_id']['minute'] = {
                    '$minute': '${}'.format(self.time_field)
                }

        tmp_data = self.client.execute_aggregation(database=self.DATABASE,
                                                   collection=self.COLLECTION,
                                                   agg=[match, agg])

        data = {}
        for tmp in tmp_data:
            tmp = dict(tmp)

            if agg_by == "day":
                date = "{}-{}-{}".format(
                    tmp['_id']['year'],
                    tmp['_id']['month'],
                    tmp['_id']['dayOfMonth'],
                )

                date = datetime.datetime.strptime(
                    date, "%Y-%m-%d").strftime('%Y-%m-%dT%H:%M')
            elif agg_by == "hour":
                date = "{}-{}-{} {}".format(
                    tmp['_id']['year'],
                    tmp['_id']['month'],
                    tmp['_id']['dayOfMonth'],
                    tmp['_id']['hour'],
                )
                date = datetime.datetime.strptime(
                    date, "%Y-%m-%d %H").strftime('%Y-%m-%dT%H:%M')
            elif agg_by == "minute":
                date = "{}-{}-{} {}:{}".format(tmp['_id']['year'],
                                               tmp['_id']['month'],
                                               tmp['_id']['dayOfMonth'],
                                               tmp['_id']['hour'],
                                               tmp['_id']['minute'])
                date = datetime.datetime.strptime(
                    date, "%Y-%m-%d %H:%M").strftime('%Y-%m-%dT%H:%M')

            data[date] = tmp['count']

        return fill_data(self.startDate, self.endDate, data, agg_by), agg_by
示例#19
0
    def post(self, request, object_id, **kwargs):
        confirm = request.POST.get('confirm')
        if confirm == 'yes':
            try:
                obj_inst = self.obj.objects.get(pk=object_id)
            except ObjectDoesNotExist:
                return HttpResponseForbidden("Injection detected")
            """ Before Deleting the node we need to remove it from mongoDB """
            c = MongoBase()
            c.connect()
            c.connect_primary()
            c.repl_remove(obj_inst.name + ":9091")
            """ Before Deleting the node we need to remove it from Redis """
            c = RedisBase(obj_inst.management_ip)
            c.slave_of('NO', 'ONE')

            # Fixme: Cleanup Sentinel ?
            """ Let's rock """
            obj_inst.delete()

        return HttpResponseRedirect(self.redirect_url)
示例#20
0
def cluster_join(master_hostname, master_ip, secret_key, ca_cert=None, cert=None, key=None):
    """
    Join an existing cluster

    :param master_hostname: Master hostname
    :param master_ip: Master management Ip address
    :param secret_key: The node's secret key
    :param ca_cert: The CA Certificate (optional: can be retrieved automagically)
    :param cert: The node certificate (optional: can be retrieved automagically)
    :param key: The node private key (optional: can be retrieved automagically)
    :return: True / False
    """

    """ We are coming from the CLI interface """
    try:
        infos = requests.get(
            "https://{}:8000/api/v1/system/cluster/info".format(master_ip),
            headers={'Cluster-api-key': secret_key},
            verify=False
        ).json()

        if not infos['status']:
            raise Exception('Error at API Request Cluster Info: {}'.format(infos['data']))

        time_master = infos['data'][master_hostname]['unix_timestamp']

        time_now = time.time()
        if abs(time_now - time_master) > 60 or abs(time_now + time_master) < 60:
            logger.info('Nodes not at the same date. Please sync with NTP Server')
            print('Nodes not at the same date. Please sync with NTP Server')
            return False

    except Exception as e:
        logger.error("Error at API Request Cluster Info: {} Invalid API KEY ?".format(e))
        return False

    if not ca_cert:
        try:
            infos = requests.post(
                "https://{}:8000/api/system/pki/get_ca".format(master_ip),
                headers={'Cluster-api-key': secret_key},
                verify=False
            ).json()

            ca_cert = infos.get('ca_cert')
        except Exception as e:
            logger.error("Unable to retrieve CA certificate: {}".format(e))
            return False

    """ We are coming from the CLI interface """
    if not cert or not key:
        try:
            infos = requests.post(
                "https://{}:8000/api/system/pki/get_cert/".format(master_ip),
                headers={'Cluster-api-key': secret_key},
                data={'node_name': get_hostname()},
                verify=False
            ).json()

            cert = infos.get('cert')
            key = infos.get('key')
        except Exception as e:
            logger.error("Unable to retrieve Node certificate: {}".format(e))
            return False

    if cert and key:
        bundle = cert + key
    else:
        logger.error("Unable to retrieve Node certificate and key, check secret key")
        return False

    with open("/var/tmp/ca.pem", "w") as f:
        f.write(ca_cert)

    with open("/var/tmp/node.cert", "w") as f:
        f.write(cert)

    with open("/var/tmp/node.key", "w") as f:
        f.write(key)

    with open("/var/tmp/node.pem", "w") as f:
        f.write(bundle)

    """ At this point we should have valid certificates:
    Save them on system """
    subprocess.check_output([
        '/home/vlt-os/scripts/write_cert.sh'
    ])

    """ At this point, certificates have been overwritten
        => We need to destroy replicaset & restart mongoDB
    """
    logger.info("replDestroy: Restart Mongodb with new certificates")
    mongo = MongoBase()
    mongo.repl_destroy()

    """ Ask primary to join us on our management IP """
    # TODO: verify to true
    infos = requests.post(
        "https://{}:8000/api/system/cluster/add/".format(master_ip),
        headers={'Cluster-api-key': secret_key},
        data={'slave_ip': get_management_ip(), 'slave_name': get_hostname()},
        verify=False
    )

    if infos.status_code != 200:
        raise Exception("Error at API Call on /system/cluster/add/  Response code: {}".format(infos.status_code))

    infos = infos.json()

    if not infos.get('status'):
        logger.error("Error during API Call to add node to cluster: {}".format(infos.get('message')))
        return False

    """ Join our redis server to the redis master """
    c = RedisBase()
    redis_master_node = c.get_master(master_ip)
    c.slave_of(redis_master_node, 6379)

    """ Tell local sentinel to monitor local redis server """
    c = RedisBase(get_management_ip(), 26379)
    c.sentinel_monitor()

    """ Sleep a few seconds in order for the replication to occur """
    time.sleep(3)

    """ Create the local node """
    try:
        node = Node.objects.get(
            name=get_hostname(),
            management_ip=get_management_ip()
        )
    except Exception:
        logger.error("cluster_join:: Unable to find slave node !")
        logger.error("cluster_join:: Unable to find slave node !")
        return False

    """ Update uri of internal Log Forwarder """
    logfwd = LogOMMongoDB.objects.get()
    logfwd.uristr = mongo.get_replicaset_uri()
    logfwd.save()

    """ Save certificates on new node """
    for cert in X509Certificate.objects.exclude(is_vulture_ca=True):
        cert.save_conf()

    """ Read network config and store it into mongo """
    """ No rights to do that in jail - API request """
    node.api_request('toolkit.network.network.refresh_nic')
    """ Read ZFS file systems """
    node.api_request('system.zfs.zfs.refresh')

    """ Download reputation databases before crontab """
    node.api_request("gui.crontab.feed.security_update")

    """ And configure + restart netdata """
    logger.debug("API call to netdata configure_node")
    # API call to Cluster - to refresh nodes on each node conf
    Cluster.api_request('services.netdata.netdata.configure_node')

    logger.debug("API call to restart netdata service")
    node.api_request('services.netdata.netdata.restart_service')

    logger.debug("API call to configure HAProxy")
    node.api_request("services.haproxy.haproxy.configure_node")

    logger.debug("API call to write Darwin policies conf")
    for policy in DarwinPolicy.objects.all():
        node.api_request("services.darwin.darwin.write_policy_conf", policy.pk)

    logger.debug("API call to configure Darwin")
    node.api_request("services.darwin.darwin.build_conf")

    # API call to while Cluster - to refresh Nodes list in conf
    logger.debug("API call to update configuration of Apache GUI")
    Cluster.api_request("services.apache.apache.reload_conf")

    """ The method configure restart rsyslog if needed """
    logger.debug("API call to configure rsyslog")
    # API call to whole Cluster - to refresh mongodb uri in pf logs
    Cluster.api_request("services.rsyslogd.rsyslog.configure_node")

    logger.debug("API call to configure logrotate")
    node.api_request("services.logrotate.logrotate.reload_conf")

    return True
示例#21
0
        If 0 arguments : Refresh MongoDB replicaset config
    """

    old_hostname = ""
    new_hostname = ""
    # Check args
    if len(sys.argv) != 1 and len(sys.argv) != 3:
        print("Replica_rename:: Usage: {} [old_hostname new_hostname]".format(
            sys.argv[0]))
        exit(1)
    elif len(sys.argv) == 3:
        old_hostname = sys.argv[1]
        new_hostname = sys.argv[2]

    # MongoDB - rename of node
    c = MongoBase()
    # Connect to the current renamed node
    c.connect(node=new_hostname + ":9091", primary=False)
    c.connect(node=new_hostname + ":9091", primary=False)
    print("Connected to {}".format(new_hostname))
    # Automagically connect to the primary node
    res = c.repl_rename(old_hostname, new_hostname)
    print("Node renamed.")

    node = Node.objects.get(name=old_hostname)
    node.name = new_hostname
    node.save()

    logfwd = LogOMMongoDB.objects.get(internal=True)
    logfwd.uristr = c.get_replicaset_uri()
    logfwd.save()