def _prepare_alert_from_event(self, event: SocEvent) -> Alert:
        logger.info("Parse message with SocEventParser: %s", str(event.id))
        with metrics.timer("thehive_alert_preparing",
                           reservoir_type='sliding_time_window'):
            alert = SocEventParser.prepare_thehive_alert(event)

        logger.info("Complement event data with raw from HBase")
        with metrics.timer("hbase_loading_time",
                           reservoir_type='sliding_time_window'):
            try:
                raw_logs = self.hbase_event_loader.get_raw_events(
                    [item for item in event.data.rawIds])
                logger.info("Receive raw logs: %s", str(raw_logs))
                alert.customFields.update({
                    'raw': {
                        'string': ';\n'.join(raw_logs),
                        'order': len(alert.customFields)
                    }
                })
                metrics.notify("enriched_by_hbase_alerts", 1)
            except Exception as err:
                # TODO: specify type of exceptions that should be caught
                metrics.notify('hbase_errors', 1)
                logger.warning(
                    "Some exception have been raised by HBaseEnricher: %s",
                    str(err))
                pass
        return alert
 def create_case_from_alert(self, alert_id: str) -> NoReturn:
     try:
         self.api.promote_alert_to_case(alert_id)
     except (TheHiveException, HTTPError) as exc:
         metrics.notify('thehive_api_errors', 1)
         logger.error("TheHive create case from alert error: %s", str(exc))
         raise exc
Beispiel #3
0
def benchmark(new_fun, duration):
    name = "bench"
    new_fun(name)

    work_time = 0.0
    overhead = 0.0

    started_on = time.time()
    while True:
        now = time.time()
        if now - started_on >= duration:
            break

        latency = random.randint(MIN_LATENCY, MAX_LATENCY) / 1000.0
        time.sleep(latency)

        t1 = time.time()
        metrics.notify(name, latency)
        t2 = time.time()
        overhead += (t2 - t1)

        work_time += latency

    elapsed = time.time() - started_on

    metric_value = metrics.get(name)

    metrics.delete_metric(name)

    return elapsed, work_time, overhead, metric_value
Beispiel #4
0
def benchmark(new_fun, duration):
    name = "bench"
    new_fun(name)

    work_time = 0.0
    overhead = 0.0

    started_on = time.time()
    while True:
        now = time.time()
        if now - started_on >= duration:
            break

        latency = random.randint(MIN_LATENCY, MAX_LATENCY) / 1000.0
        time.sleep(latency)

        t1 = time.time()
        metrics.notify(name, latency)
        t2 = time.time()
        overhead += (t2 - t1)

        work_time += latency

    elapsed = time.time() - started_on

    metric_value = metrics.get(name)

    metrics.delete_metric(name)

    return elapsed, work_time, overhead, metric_value
 def merge_alerts_in_case(self, case_id: str, alert_ids: List[str]) -> Dict:
     try:
         response = self.api.merge_alerts_into_case(case_id, alert_ids)
         response.raise_for_status()
     except (TheHiveException, HTTPError) as exc:
         metrics.notify('thehive_api_errors', 1)
         logger.error("TheHive create case from alert error: %s", str(exc))
         raise exc
     return response.json()
 def send_alert(self, alert: Alert) -> Dict:
     try:
         response = self.api.create_alert(alert)
         response.raise_for_status()
     except (TheHiveException, HTTPError) as exc:
         metrics.notify('thehive_api_errors', 1)
         logger.error("TheHive create alert error: %s", str(exc))
         raise exc
     return response.json()
Beispiel #7
0
def metric_notify_counter(app_module, metric_name, count=None, is_init=False):
    metric_name = generate_metric_name(app_module, metric_name)
    if not metrics.REGISTRY.get(metric_name):
        metrics.new_counter(metric_name)
    if is_init:
        return
    if not count:
        metrics.notify(metric_name, 1)
    else:
        metrics.notify(metric_name, count)
Beispiel #8
0
 def _get_events_from_hbase(self, event_ids: List[str],
                            full_table_name: str) -> List[bytes]:
     if not event_ids:
         return []
     try:
         with self.hbase_pool.connection() as conn:
             table = conn.table(full_table_name)
             table.scan()
             result = table.rows(
                 [event_id.encode() for event_id in event_ids],
                 columns=[b'n:e'])
     except (NoConnectionsAvailable, TException, socket.timeout) as err:
         logger.warning("HBase request raised an error: %s", str(err))
         metrics.notify("hbase_errors", 1)
         raise err
     return [data[b'n:e'] for _, data in result]
 def load_raw_events(self, incident: Incident) -> List[RawEvent]:
     logger.info("Try to get raw events from HBase")
     with metrics.timer("hbase_loading_time",
                        reservoir_type='sliding_time_window'):
         try:
             raw_events = self.hbase_event_loader.get_raw_events(
                 [item for item in incident.correlationEvent.data.rawIds])
             logger.info("Receive raw events: %s", len(raw_events))
             metrics.notify("loaded_hbase_raw_events", len(raw_events))
         except Exception as err:
             metrics.notify('hbase_errors', 1)
             logger.warning(
                 "Some unknown exception have been raised by HBaseEventLoader: %s",
                 str(err))
             raw_events = []
             pass
     return raw_events
 def load_normalized_events(self, incident: Incident) -> List[SocEvent]:
     logger.info("Try to get normalized events from HBase")
     with metrics.timer("hbase_loading_time",
                        reservoir_type='sliding_time_window'):
         try:
             normalized_events = self.hbase_event_loader.get_normalized_events(
                 [
                     item.value for item in
                     incident.correlationEvent.correlation.eventIds
                 ])
             logger.info("Receive normalized events: %s",
                         len(normalized_events))
             metrics.notify("loaded_hbase_normalized_events",
                            len(normalized_events))
         except Exception as err:
             metrics.notify('hbase_errors', 1)
             logger.warning(
                 "Some unknown exception have been raised by HBaseEventLoader: %s",
                 str(err))
             normalized_events = []
             pass
     return normalized_events
def main(settings_file_path: str = 'data/settings.yaml'):
    settings_file_path = os.getenv("APP_CONFIG_PATH", settings_file_path)
    settings = get_settings(settings_file_path)
    prepare_logging(settings)
    register_app_metrics()
    logger.info("Application start")
    logger.info("Load config from %s", settings_file_path)

    from modules.pusher import TheHivePusher
    pusher = TheHivePusher(settings['thehive'], settings['hbase_event_loader'])

    from modules.kafka_consumer import prepare_consumer
    consumer = prepare_consumer(settings)
    consumer.create_consumer()

    from modules.app_metrics import run_metrics_webserver
    metrics_thread = threading.Thread(target=run_metrics_webserver,
                                      daemon=True)
    metrics_thread.start()

    try:
        for message in consumer.read_topic():
            logger.info("Read message from topic %s: %s", message.topic,
                        str(message.value))
            metrics.notify('received_kafka_messages', 1)
            pusher.push(message.value)
            logger.info("Successfully processed message")
            consumer.consumer.commit()
    except Exception as err:
        logger.error(
            "Exception, which type is %s, is detecting during consuming messages: %s",
            type(err), str(err))
        sys.exit(1)
    except (KeyboardInterrupt, StopIteration) as err:
        logger.warning("Unexpected processing interruption: %s", str(err))
        sys.exit(0)
    except BaseException as e:
        logger.error("Some wtf shit is happened: %s", str(e))
        sys.exit(42)
class TheHivePusher:
    def __init__(self, thehive_settings: Dict,
                 hbase_event_loader_settings: Dict):
        logger.info("Create THive API client with settings: %s",
                    str(thehive_settings))
        self.api = CustomTheHiveApi(**thehive_settings)
        self.hbase_event_loader = HbaseEventsLoader(
            hbase_pool, hbase_event_loader_settings['namespace'],
            hbase_event_loader_settings['raw_table_name'],
            hbase_event_loader_settings['normalized_table_name'])

    @retry((TheHiveException, HTTPError), tries=5, delay=2)
    @metrics.with_histogram("send_alert", reservoir_type='sliding_time_window')
    def send_alert(self, alert: Alert) -> Dict:
        try:
            response = self.api.create_alert(alert)
            response.raise_for_status()
        except (TheHiveException, HTTPError) as exc:
            metrics.notify('thehive_api_errors', 1)
            logger.error("TheHive create alert error: %s", str(exc))
            raise exc
        return response.json()

    @retry((TheHiveException, HTTPError), tries=5, delay=2)
    def create_case_from_alert(self, alert_id: str) -> NoReturn:
        try:
            self.api.promote_alert_to_case(alert_id)
        except (TheHiveException, HTTPError) as exc:
            metrics.notify('thehive_api_errors', 1)
            logger.error("TheHive create case from alert error: %s", str(exc))
            raise exc

    @retry((TheHiveException, HTTPError), tries=10, delay=6)
    @metrics.with_histogram("create_case",
                            reservoir_type='sliding_time_window')
    def create_case(self, case: Case) -> Dict:
        try:
            response = self.api.create_case(case)
            response.raise_for_status()
        except (TheHiveException, HTTPError) as exc:
            metrics.notify('thehive_api_errors', 1)
            logger.error("TheHive create case error: %s", str(exc))
            raise exc
        return response.json()
    def push(self, message: Dict):
        with metrics.timer("thehive_case_preparing",
                           reservoir_type='sliding_time_window'):
            try:
                ea_incident = ParseDict(message,
                                        Incident(),
                                        ignore_unknown_fields=True)
            except ParseError as err:
                logger.warning("Message %s is not valid. Raised %s",
                               str(message), str(err))
                return
            case = SocEventParser.prepare_thehive_case(ea_incident)
            raw_events = self.load_raw_events(ea_incident)
            case.customFields.update({
                'raw': {
                    'string': ';\n'.join(raw_events),
                    'order': len(case.customFields)
                }
            })
        r = self.create_case(case)
        case.id = r['id']
        metrics.notify('created_thehive_cases', 1)
        logger.info("Successfully create case from event into THive: %s",
                    str(r))

        normalized_events = self.load_normalized_events(ea_incident)

        alert_ids = []
        for event in normalized_events:
            alert = self._prepare_alert_from_event(event)
            logger.info("Try to send alert to theHive: %s", str(alert))
            try:
                r = self.send_alert(alert)
            except HTTPError as err:
                if err.response.status_code == 400:
                    continue
                raise err
            logger.info("Successfully push alert to THive: %s", str(r))
            metrics.notify('created_thehive_alerts', 1)
            alert_ids.append(r['id'])

        if alert_ids:
            self.merge_alerts_in_case(case.id, alert_ids)
        self.set_final_tag(case)

        logger.info("Successfully process ea message")
        metrics.notify("successfully_processed_messages", 1)
        except (TheHiveException, HTTPError) as exc:
            metrics.notify('thehive_api_errors', 1)
            logger.error("TheHive create case from alert error: %s", str(exc))
            raise exc
        return response.json()

    @retry((TheHiveException, HTTPError), tries=5, delay=2)
    @metrics.with_histogram("set_final_tag",
                            reservoir_type='sliding_time_window')
    def set_final_tag(self, case: Case) -> Dict:
        case.tags.append('FINAL')
        try:
            response = self.api.update_case(case, fields=['tags'])
            response.raise_for_status()
        except (TheHiveException, HTTPError) as exc:
            metrics.notify('thehive_api_errors', 1)
            logger.error("TheHive set tag final error: %s", str(exc))
            raise exc
        return response.json()

    @metrics.with_histogram("full_processing_time",
                            reservoir_type='sliding_time_window')
    def push(self, message: Dict):
        with metrics.timer("thehive_case_preparing",
                           reservoir_type='sliding_time_window'):
            try:
                ea_incident = ParseDict(message,
                                        Incident(),
                                        ignore_unknown_fields=True)
            except ParseError as err:
                logger.warning("Message %s is not valid. Raised %s",
Beispiel #15
0
def metric_notify_gauge(app_module, metric_name, value):
    metric_name = generate_metric_name(app_module, metric_name)
    if not metrics.REGISTRY.get(metric_name):
        metrics.new_gauge(metric_name)
    metrics.notify(metric_name, value)