예제 #1
0
def search_alerts(ctx, query, date_range, columns, extend):
    """Search detection engine alerts with KQL."""
    from eql.table import Table
    from .eswrap import MATCH_ALL, add_range_to_dsl

    kibana = ctx.obj['kibana']
    start_time, end_time = date_range
    kql_query = kql.to_dsl(query) if query else MATCH_ALL
    add_range_to_dsl(kql_query['bool'].setdefault('filter', []), start_time,
                     end_time)

    with kibana:
        alerts = [
            a['_source']
            for a in Signal.search({'query': kql_query})['hits']['hits']
        ]

    table_columns = [
        'host.hostname', 'signal.rule.name', 'signal.status',
        'signal.original_time'
    ]
    if columns:
        columns = list(columns)
        table_columns = table_columns + columns if extend else columns
    click.echo(Table.from_list(table_columns, alerts))
    return alerts
예제 #2
0
def search_rules(query, columns, language, verbose=True):
    """Use KQL or EQL to find matching rules."""
    from kql import get_evaluator
    from eql.table import Table
    from eql.build import get_engine
    from eql import parse_query
    from eql.pipes import CountPipe

    flattened_rules = []

    for file_name, rule_doc in rule_loader.load_rule_files().items():
        flat = {"file": os.path.relpath(file_name)}
        flat.update(rule_doc)
        flat.update(rule_doc["metadata"])
        flat.update(rule_doc["rule"])
        attacks = [
            threat for threat in rule_doc["rule"].get("threat", [])
            if threat["framework"] == "MITRE ATT&CK"
        ]
        techniques = [
            t["id"] for threat in attacks for t in threat.get("technique", [])
        ]
        tactics = [threat["tactic"]["name"] for threat in attacks]
        flat.update(techniques=techniques, tactics=tactics)
        flattened_rules.append(flat)

    flattened_rules.sort(key=lambda dct: dct["name"])

    filtered = []
    if language == "kql":
        evaluator = get_evaluator(query) if query else lambda x: True
        filtered = list(filter(evaluator, flattened_rules))
    elif language == "eql":
        parsed = parse_query(query, implied_any=True, implied_base=True)
        evaluator = get_engine(parsed)
        filtered = [
            result.events[0].data for result in evaluator(flattened_rules)
        ]

        if not columns and any(
                isinstance(pipe, CountPipe) for pipe in parsed.pipes):
            columns = ["key", "count", "percent"]

    if columns:
        columns = ",".join(columns).split(",")
    else:
        columns = ["rule_id", "file", "name"]

    table = Table.from_list(columns, filtered)

    if verbose:
        click.echo(table)

    return filtered
예제 #3
0
파일: main.py 프로젝트: weizn11/eqllib
def print_table(results, columns):
    """Print a list of results as a table."""
    if not results:
        print("No results found")
        return

    columns = re.split(r"[,\s]+", columns)

    if columns == ["*"]:
        columns = list(sorted(set(k for result in results for k in result)))

    table = Table.from_list(columns, results)
    print(table)
예제 #4
0
파일: ml.py 프로젝트: zha0/detection-rules
def setup_bundle(ctx, model_tag, repo, model_dir):
    """Upload ML model and dependencies to enrich data."""
    es_client: Elasticsearch = ctx.obj['es']

    if model_tag:
        dga_client = MachineLearningClient.from_release(es_client=es_client,
                                                        release_tag=model_tag,
                                                        repo=repo)
    elif model_dir:
        dga_client = MachineLearningClient.from_directory(es_client=es_client,
                                                          directory=model_dir)
    else:
        return client_error(
            'model-tag or model-dir required to download model files')

    dga_client.verify_license()
    status = dga_client.setup()

    results = []
    for file_type, response in status.items():
        for name, result in response.items():
            if file_type == 'model':
                status = 'success' if result.get(
                    'create_time') else 'potential_failure'
                results.append({
                    'file_type': file_type,
                    'name': name,
                    'status': status
                })
                continue
            results.append({
                'file_type': file_type,
                'name': name,
                'status': result
            })

    fields = ['file_type', 'name', 'status']
    table = Table.from_list(fields, results)
    click.echo(table)

    click.echo(
        'Associated rules and jobs can be found under ML-experimental-detections releases in the repo'
    )
    click.echo('To upload rules, run: kibana upload-rule <ml-rule.toml>')
    click.echo(
        'To upload ML jobs, run: es experimental upload-ml-job <ml-job.json>')
예제 #5
0
파일: ml.py 프로젝트: zha0/detection-rules
def remove_scripts_pipelines(ctx: click.Context, **ml_types):
    """Remove ML scripts and pipeline files."""
    selected_types = [k for k, v in ml_types.items() if v]
    assert selected_types, f'Specify ML types to remove: {list(ml_types)}'
    status = MachineLearningClient.remove_ml_scripts_pipelines(
        es_client=ctx.obj['es'], ml_type=selected_types)

    results = []
    for file_type, response in status.items():
        for name, result in response.items():
            results.append({
                'file_type': file_type,
                'name': name,
                'status': result
            })

    fields = ['file_type', 'name', 'status']
    table = Table.from_list(fields, results)
    click.echo(table)
    return status
예제 #6
0
파일: ml.py 프로젝트: zha0/detection-rules
def remove_model(ctx: click.Context, model_id):
    """Remove ML model files."""
    es_client = MlClient(ctx.obj['es'])
    model_ids = MachineLearningClient.get_existing_model_ids(ctx.obj['es'])

    if not model_id:
        model_id = click.prompt('Model ID to remove',
                                type=click.Choice(model_ids))

    try:
        result = es_client.delete_trained_model(model_id)
    except elasticsearch.ConflictError as e:
        click.echo(f'{e}: try running `remove-scripts-pipelines` first')
        ctx.exit(1)

    table = Table.from_list(['model_id', 'status'], [{
        'model_id': model_id,
        'status': result
    }])
    click.echo(table)
    return result
예제 #7
0
    def _test_interactive_shell(self):
        """Test that commands can be executed via the interactive shell."""
        class Arguments(object):
            config = None
            file = None

        actual_stdin = io.StringIO(
            to_unicode("\n".join([
                "input %s" % EVENTS_FILE,
                "table process_path parent_process_path",
                "search\nprocess where serial_event_id in (32, 33);",
            ])))

        expected_stdout_text = "\n".join([
            BANNER,
            "eql> input %s" % EVENTS_FILE,
            "Using file %s with %d events" %
            (EVENTS_FILE, len(TestEngine.get_events())),
            "eql> table process_path parent_process_path",
            "eql> search process where serial_event_id in (32, 33)",
            Table([[
                "C:\\Windows\\System32\\sppsvc.exe",
                "C:\\Windows\\System32\\services.exe"
            ],
                   [
                       "C:\\Windows\\System32\\dwm.exe",
                       "C:\\Windows\\System32\\svchost.exe"
                   ]],
                  names=["process_path", "parent_process_path"]).__unicode__()
        ])

        actual_stdout = []

        # Now actually run with redirected stdout and stdin
        with mock.patch('sys.stdin', new=actual_stdin):
            shell_main(Arguments())

        actual_stdout_lines = "\n".join(actual_stdout).splitlines()
        self.assertListEqual(actual_stdout_lines,
                             expected_stdout_text.splitlines())
예제 #8
0
파일: ml.py 프로젝트: zha0/detection-rules
def check_files(ctx):
    """Check ML model files on an elasticsearch instance."""
    files = MachineLearningClient.get_all_ml_files(ctx.obj['es'])

    results = []
    for file_type, data in files.items():
        if file_type == 'model':
            continue
        for name in list(data):
            results.append({'file_type': file_type, 'name': name})

    for model_name, model in files['model'].items():
        results.append({
            'file_type': 'model',
            'name': model_name,
            'related_release': model['release'].tag_name
        })

    fields = ['file_type', 'name', 'related_release']
    table = Table.from_list(fields, results)
    click.echo(table)
    return files
예제 #9
0
def search_rules(query,
                 columns,
                 language,
                 count,
                 verbose=True,
                 rules: Dict[str, dict] = None,
                 pager=False):
    """Use KQL or EQL to find matching rules."""
    from kql import get_evaluator
    from eql.table import Table
    from eql.build import get_engine
    from eql import parse_query
    from eql.pipes import CountPipe

    flattened_rules = []
    rules = rules or rule_loader.load_rule_files(verbose=verbose)

    for file_name, rule_doc in rules.items():
        flat = {"file": os.path.relpath(file_name)}
        flat.update(rule_doc)
        flat.update(rule_doc["metadata"])
        flat.update(rule_doc["rule"])

        tactic_names = []
        technique_ids = []
        subtechnique_ids = []

        for entry in rule_doc['rule'].get('threat', []):
            if entry["framework"] != "MITRE ATT&CK":
                continue

            techniques = entry.get('technique', [])
            tactic_names.append(entry['tactic']['name'])
            technique_ids.extend([t['id'] for t in techniques])
            subtechnique_ids.extend([
                st['id'] for t in techniques
                for st in t.get('subtechnique', [])
            ])

        flat.update(techniques=technique_ids,
                    tactics=tactic_names,
                    subtechniques=subtechnique_ids,
                    unique_fields=Rule.get_unique_query_fields(
                        rule_doc['rule']))
        flattened_rules.append(flat)

    flattened_rules.sort(key=lambda dct: dct["name"])

    filtered = []
    if language == "kql":
        evaluator = get_evaluator(query) if query else lambda x: True
        filtered = list(filter(evaluator, flattened_rules))
    elif language == "eql":
        parsed = parse_query(query, implied_any=True, implied_base=True)
        evaluator = get_engine(parsed)
        filtered = [
            result.events[0].data for result in evaluator(flattened_rules)
        ]

        if not columns and any(
                isinstance(pipe, CountPipe) for pipe in parsed.pipes):
            columns = ["key", "count", "percent"]

    if count:
        click.echo(f'{len(filtered)} rules')
        return filtered

    if columns:
        columns = ",".join(columns).split(",")
    else:
        columns = ["rule_id", "file", "name"]

    table = Table.from_list(columns, filtered)

    if verbose:
        click.echo_via_pager(table) if pager else click.echo(table)

    return filtered
예제 #10
0
def rule_survey(ctx: click.Context,
                query,
                date_range,
                dump_file,
                hide_zero_counts,
                hide_errors,
                elasticsearch_client: Elasticsearch = None,
                kibana_client: Kibana = None):
    """Survey rule counts."""
    from eql.table import Table
    from kibana.resources import Signal
    from . import rule_loader
    from .main import search_rules

    survey_results = []
    start_time, end_time = date_range

    if query:
        rule_paths = [
            r['file']
            for r in ctx.invoke(search_rules, query=query, verbose=False)
        ]
        rules = rule_loader.load_rules(rule_loader.load_rule_files(
            paths=rule_paths, verbose=False),
                                       verbose=False)
        rules = rules.values()
    else:
        rules = rule_loader.load_rules(verbose=False).values()

    click.echo(f'Running survey against {len(rules)} rules')
    click.echo(f'Saving detailed dump to: {dump_file}')

    collector = CollectEvents(elasticsearch_client)
    details = collector.search_from_rule(*rules,
                                         start_time=start_time,
                                         end_time=end_time)
    counts = collector.count_from_rule(*rules,
                                       start_time=start_time,
                                       end_time=end_time)

    # add alerts
    with kibana_client:
        range_dsl = {'query': {'bool': {'filter': []}}}
        add_range_to_dsl(range_dsl['query']['bool']['filter'], start_time,
                         end_time)
        alerts = {
            a['_source']['signal']['rule']['rule_id']: a['_source']
            for a in Signal.search(range_dsl)['hits']['hits']
        }

    for rule_id, count in counts.items():
        alert_count = len(alerts.get(rule_id, []))
        if alert_count > 0:
            count['alert_count'] = alert_count

        details[rule_id].update(count)

        search_count = count['search_count']
        if not alert_count and (hide_zero_counts and search_count == 0) or (
                hide_errors and search_count == -1):
            continue

        survey_results.append(count)

    fields = ['rule_id', 'name', 'search_count', 'alert_count']
    table = Table.from_list(fields, survey_results)

    if len(survey_results) > 200:
        click.echo_via_pager(table)
    else:
        click.echo(table)

    os.makedirs(get_path('surveys'), exist_ok=True)
    with open(dump_file, 'w') as f:
        json.dump(details, f, indent=2, sort_keys=True)

    return survey_results