Ejemplo n.º 1
0
    def elastefy_events(self, cluster, event_list):

        cluster_id = cluster["id"]

        event_count = len(event_list)
        if event_count > MAX_EVENTS:
            log.info(
                f"Cluster {cluster_id} has {event_count} event records, logging only {MAX_EVENTS}"
            )
            event_list = event_list[:MAX_EVENTS]

        metadata_json = self.get_metadata_json(cluster)

        if self.backup_destination:
            self.save_new_backup(cluster_id, event_list, metadata_json)

        cluster_bash_data = process_metadata(metadata_json)
        event_names = get_cluster_object_names(cluster_bash_data)

        for event in event_list[::-1]:
            if process.is_event_skippable(event):
                continue
            doc_id = get_doc_id(event)
            cluster_bash_data["no_name_message"] = get_no_name_message(
                event["message"], event_names)
            process_event_doc(event, cluster_bash_data)
            ret = self.log_doc(cluster_bash_data, doc_id)
            if not ret:
                break
Ejemplo n.º 2
0
    def process_and_log_events(self,
                               cluster_bash_data,
                               event_list,
                               event_names,
                               only_new_events=True):
        for event in event_list[::-1]:
            if process.is_event_skippable(event):
                continue

            doc_id = get_doc_id(event)
            cluster_bash_data["no_name_message"] = get_no_name_message(
                event["message"], event_names)
            cluster_bash_data["inventory_url"] = self.inventory_url

            if "props" in event:
                event["event.props"] = json.loads(event["props"])

            process_event_doc(event, cluster_bash_data)
            ret = self.log_doc(cluster_bash_data, doc_id)

            for key in event:
                _ = cluster_bash_data.pop(key, None)

            if not ret and only_new_events:
                break
Ejemplo n.º 3
0
    def process_and_log_events(self, cluster_bash_data, event_list, event_names, only_new_events=True):
        for event in event_list[::-1]:
            if process.is_event_skippable(event):
                continue

            doc_id = get_doc_id(event)
            cluster_bash_data["no_name_message"] = get_no_name_message(event["message"], event_names)
            cluster_bash_data["inventory_url"] = self.inventory_url

            process_event_doc(event, cluster_bash_data)
            with self.enrich_event(event, cluster_bash_data):
                ret = self.log_doc(cluster_bash_data, doc_id)
            if not ret and only_new_events:
                break
Ejemplo n.º 4
0
    def does_cluster_needs_full_update(self, cluster_id, event_list):
        # check if cluster is missing past events
        cluster_events_count = self.cache_event_count_per_cluster.get(cluster_id, None)
        relevant_event_count = len([event for event in event_list if not process.is_event_skippable(event)])

        if cluster_events_count and cluster_events_count == relevant_event_count:
            return False
        else:
            cluster_events_count_from_db = self.get_cluster_event_count_on_es_db(cluster_id)
            self.cache_event_count_per_cluster[cluster_id] = cluster_events_count_from_db
        if cluster_events_count_from_db < relevant_event_count:
            missing_events = relevant_event_count - cluster_events_count_from_db
            logging.info(f"cluster {cluster_id} is missing {missing_events} events")
            return True
        else:
            return False