コード例 #1
0
    def test_valid_schema_fields(self):
        """Test that schema errors are never raised for valid schema usage."""
        valid = [
            'process where process_name == "test" and command_line == "test" and pid > 0',
            'file where file_path == "abc" and data == 1',
            'file where file_path == "abc" and data == "fdata.exe"',
            'file where file_path == "abc" and data != null',
            'file where file_path == "abc" and length(data) > 0 | filter file_path == "abc"',
            'sequence [file where pid=1] [process where pid=2] | filter events[0].file_name = events[1].process_name',
            'sequence by pid [file where true] [process where true] ' +
            '| filter events[0].file_name = events[1].process_name',
            'join by pid [file where true] [process where true] | filter events[0].file_name = events[1].process_name',
            'join [file where true] by pid [process where true] by pid until [complex where false] by nested.num'
            '| filter events[0].file_name = events[1].process_name',
            'complex where string_arr[3] != null',
            'complex where wideopen.a.b[0].def == 1',
            'complex where length(nested.arr) > 0',
            'complex where nested.arr[0] == 1',
            'complex where nested.double_nested.nn == 5',
            'complex where nested.double_nested.triplenest != null',
            'complex where nested.double_nested.triplenest.m == 5',
            'complex where nested.  double_nested.triplenest.b != null',
        ]

        with Schema(self.schema):
            for query in valid:
                parse_query(query)
コード例 #2
0
    def test_comments(self):
        """Test that comments are valid syntax but stripped from AST."""
        match = parse_query("process where pid=4 and ppid=0")

        query = parse_query(
            """process where pid = 4 /* multi\nline\ncomment */ and ppid=0""")
        self.assertEqual(match, query)

        query = parse_query(
            """process where pid = 4 // something \n and ppid=0""")
        self.assertEqual(match, query)

        query = parse_query("""process where pid
            = 4 and ppid=0
        """)
        self.assertEqual(match, query)

        query = parse_query("""process where
            // test
            //
            //line
            //comments
            pid = 4 and ppid = 0
        """)
        self.assertEqual(match, query)

        match = parse_expression("true")
        query = parse_expression(
            "true // something else \r\n /* test\r\n something \r\n*/")
        self.assertEqual(match, query)

        commented = parse_definitions(
            "macro test() pid = 4 and /* comment */ ppid = 0")
        macro = parse_definitions("macro test() pid = 4 and ppid = 0")
        self.assertEqual(commented, macro)
コード例 #3
0
ファイル: test_python_engine.py プロジェクト: joswr1ght/eql
    def test_special_pipes(self):
        """Make sure that the extra pipes are working as intended."""
        query = 'process where true | unique opcode | count'
        config = {'flatten': True}
        results = self.get_output(queries=[parse_query(query)], config=config)
        self.assertEqual(results[0].data['count'], 3,
                         "Expected 3 unique process opcodes")

        query = 'process where true | count opcode'
        results = self.get_output(queries=[parse_query(query)], config=config)
        opcodes = set(event.data['key'] for event in results)
        self.assertEqual(len(results), 3,
                         "Expected 3 unique process opcodes in the data set")
        self.assertEqual(opcodes, set([1, 2, 3]), "Some opcodes were missing")

        query = 'process where true | unique unique_pid | count opcode'
        results = self.get_output(queries=[parse_query(query)], config=config)
        opcodes = [event.data['key'] for event in results]
        self.assertEqual(len(results), 2, "Expected 2 opcodes")
        self.assertEqual(opcodes, [1, 3], "Received or missing opcodes")

        query = 'process where true | filter process_name == "svchost.exe"'
        results = self.get_output(queries=[parse_query(query)], config=config)
        self.assertGreater(len(results), 1, "Filter pipe failed")
        for event in results:
            self.assertEqual(event.data['process_name'].lower(), "svchost.exe")

        query = 'process where length(md5) > 0 | count md5 command_line'
        results = self.get_output(queries=[parse_query(query)], config=config)
        self.assertGreater(len(results), 1, "Count pipe returned no results")
        sorted_results = list(
            sorted(results, key=lambda e: (e.data['count'], e.data['key'])))
        self.assertListEqual(sorted_results, results,
                             "Count didn't output expected results")
コード例 #4
0
    def test_valid_schema_fields(self):
        """Test that schema errors are being raised separately."""
        valid = [
            'process where process_name == "test" and command_line == "test" and not pid',
            'file where file_path == "abc" and data == 1',
            'file where file_path == "abc" and data == "fdata.exe"',
            'file where file_path == "abc" and not data',
            'file where file_path == "abc" and length(data) | filter file_path == "abc"',
            'sequence [file where pid=1] [process where pid=2] | filter events[0].file_name = events[1].process_name',
            'sequence by pid [file where 1] [process where 1] | filter events[0].file_name = events[1].process_name',
            'join by pid [file where 1] [process where 1] | filter events[0].file_name = events[1].process_name',
            'join [file where 1] by pid [process where 1] by pid until [complex where 0] by nested.num'
            '| filter events[0].file_name = events[1].process_name',
            'complex where string_arr[3]',
            'complex where wideopen.a.b[0].def == 1',
            'complex where nested.arr',
            'complex where nested.arr[0] == 1',
            'complex where nested.double_nested.nn == 5',
            'complex where nested.double_nested.triplenest',
            'complex where nested.double_nested.triplenest.m == 5',
            'complex where nested.  double_nested.triplenest.b',
        ]

        with Schema(self.schema):
            for query in valid:
                parse_query(query)
コード例 #5
0
    def test_valid_schema_event(self):
        """Test that schema errors are being raised separately."""
        valid = [
            'process where true', 'file where true', 'complex where true',
            'any where true', 'generic where true'
        ]

        with Schema(self.schema, allow_generic=True, allow_any=True):
            for query in valid:
                parse_query(query)
コード例 #6
0
    def test_strict_schema_success(self):
        """Check that fields can't be compared to null under strict schemas."""
        queries = [
            "process where command_line != 'abc.exe'",
            "process where elevated != true",
            "process where not elevated",
        ]

        with strict_field_schema, Schema(self.schema):
            for query in queries:
                parse_query(query)
コード例 #7
0
    def test_count_schemas(self):
        """Test that schemas are updated with counts in pipes."""
        queries = [
            "process where true | count | filter key == 'total' and percent < 0.5 and count > 0",
            "process where true | unique_count process_name | filter count > 5 and process_name == '*.exe'",
            "sequence[file where 1][process where 1] | unique_count events[0].process_name"
            + " | filter count > 5 and events[1].elevated",
        ]

        with Schema(self.schema):
            for query in queries:
                parse_query(query)
コード例 #8
0
ファイル: test_python_engine.py プロジェクト: joswr1ght/eql
    def test_custom_functions(self):
        """Custom functions in python."""
        config = {'flatten': True}
        query = "process where echo(process_name) == \"SvcHost.*\" and command_line == \"* -k *NetworkRes*d\""
        output = self.get_output(queries=[parse_query(query)], config=config)
        event_ids = [event.data['serial_event_id'] for event in output]
        self.validate_results(event_ids, [15, 16, 25],
                              "Custom function 'echo' failed")

        query = "process where length(user_domain)>0 and reverse(echo(user_domain)) = \"YTIROHTUA TN\" | tail 3"
        output = self.get_output(queries=[parse_query(query)], config=config)
        event_ids = [event.data['serial_event_id'] for event in output]
        self.validate_results(event_ids, [43, 45, 52],
                              "Custom function 'reverse'")
コード例 #9
0
ファイル: test_parser.py プロジェクト: y3n11/eql
    def test_invalid_time_unit(self):
        """Test that error messages are raised and formatted when time units are missing."""
        with self.assertRaisesRegex(
                EqlSemanticError,
                "Unknown time unit. Recognized units are: ms, s, m, h, d."):
            parse_query(
                "sequence with maxspan=150 zz [foo where true] [bar where true]"
            )

        with self.assertRaisesRegex(
                EqlSemanticError,
                "Unknown time unit. Recognized units are: ms, s, m, h, d."):
            parse_query(
                "sequence with maxspan=150 hours [foo where true] [bar where true]"
            )
コード例 #10
0
 def test_query_type(self):
     """Check eql.utils.get_query_type."""
     self.assertEqual(get_query_type(parse_query("any where true")),
                      "event")
     self.assertEqual(get_query_type(parse_query("process where true")),
                      "event")
     self.assertEqual(
         get_query_type(
             parse_query(
                 "sequence [process where true] [network where true]")),
         "sequence")
     self.assertEqual(
         get_query_type(
             parse_query("join [process where true] [network where true]")),
         "join")
コード例 #11
0
ファイル: test_python_engine.py プロジェクト: joswr1ght/eql
    def test_nested_data(self):
        """Test that highly structured is also searchable."""
        event_1 = {
            'top': [{
                'middle': {
                    'abc': 0
                }
            }, {
                'middle2': ['def', 'ghi']
            }]
        }
        event_2 = {
            'top': [{
                'middle': {
                    'abc': 123
                }
            }, {
                'middle2': ['tuv', 'wxyz']
            }]
        }
        events = [
            Event(EVENT_TYPE_GENERIC, 1, event_1),
            Event(EVENT_TYPE_GENERIC, 2, event_2)
        ]

        query = parse_query('generic where top[0].middle.abc == 123')
        results = self.get_output(queries=[query],
                                  events=events,
                                  config={'flatten': True})
        self.assertEqual(len(results), 1, "Missing or extra results")
        self.assertEqual(results[0].data, event_2,
                         "Failed to match on correct event")
コード例 #12
0
    def test_strict_schema_success(self):
        """Check that fields can't be compared to null under strict schemas."""
        queries = [
            "process where command_line != 'abc.exe'",
            "process where elevated != true",
            "process where not elevated",

            # functions that may return null, even with non-null inputs can be compared
            "process where indexOf(process_name, 'foo') != null",

            # since indexOf can be passed into other functions, any can be null
            "process where substring(process_name, indexOf(process_name, 'foo')) == null",
        ]

        with strict_booleans, non_nullable_fields, Schema(self.schema):
            for query in queries:
                parse_query(query)
コード例 #13
0
 def test_array_functions(self):
     """Test that array functions match array fields."""
     valid = [
         "complex where arrayContains(string_arr, 'thesearchstring')",
         "complex where arrayContains(string_arr, 'thesearchstring', 'anothersearchstring')",
         # this should pass until generics/templates are handled better
         "complex where arrayContains(string_arr, 1)",
         "complex where arrayContains(string_arr, 1, 2, 3)",
         "complex where arraySearch(string_arr, x, x == '*subs*')",
         "complex where arraySearch(objarray, x, x.key == 'k')",
         "complex where arraySearch(objarray, x, arraySearch(x, y, y.key == true))",
         "complex where arraySearch(nested.arr, x, x == '*subs*')",
         "complex where arrayContains(objarray, 1)",
         "complex where arrayContains(objarray, 1, 2, 3)",
     ]
     with Schema(self.schema):
         for query in valid:
             parse_query(query)
コード例 #14
0
 def test_invalid_schema_fields(self):
     """Test that schema errors are being raised separately."""
     invalid = [
         'process where not bad_field',
         'process where file_path',
         'file where command_line',
         'process where wideopen.a.b.c',
         'any where invalid_field',
         'complex where nested.  double_nested.b',
         'file where file_path == "abc" and length(data) | unique missing_field == "abc"',
         'sequence [file where pid=1] [process where pid=2] | filter events[0].file_name = events[1].bad',
         'sequence [file where 1] by pid [process where 1] by pid until [complex where 0] by pid'
         '| unique events[0].file_name = events[1].process_name',
     ]
     with Schema(self.schema):
         for query in invalid:
             with self.assertRaises(EqlSchemaError):
                 parse_query(query)
コード例 #15
0
    def test_custom_macro(self):
        """Test python custom macro expansion."""
        def optimize_length(args, walker):
            arg, = args  # only 1 allowed
            if isinstance(arg, String):
                return Number(len(arg.value))
            else:
                return FunctionCall('length', [arg])

        macro = CustomMacro('LENGTH', optimize_length)
        engine = PreProcessor([macro])

        example = parse_query(
            'process where LENGTH("python.exe") == LENGTH(process_name)')
        expected = parse_query('process where 10 == length(process_name)')

        output = engine.expand(example)
        self.assertEqual(output, expected,
                         "Custom macro LENGTH was not properly expanded")

        example = parse_query('process where LENGTH("abc", "def")')
        self.assertRaisesRegexp(ValueError, "too many values to unpack",
                                engine.expand, example)
コード例 #16
0
ファイル: build.py プロジェクト: sgnls/eql
def render_query(query, engine_type, config=None):
    """Render the full script for an EQL query.

    :param str|PipedQuery query: The query text or parsed query
    :param str engine_type: The target scripting engine
    :param dict config: The configuration for PythonEngine
    """
    metadata = {}
    if not isinstance(query, PipedQuery):
        metadata['_source'] = query
        query = parse_query(query)

    analytic = EqlAnalytic(query=query, metadata=metadata)
    rendered = render_analytic(analytic, engine_type=engine_type, config=config, analytics_only=False)
    return rendered
コード例 #17
0
    def test_stateless_checks(self):
        """Check that :func:`~utils.is_stateful` is identifying stateless queries."""
        stateless_queries = [
            "process where true | filter command_line='* https://*' | tail 10",
            "process where user_name='system' | unique parent_process_name | head 500",
            "file where file_name='*.txt' and (process_name='cmd.exe' or parent_process_name='net.exe')",
            "registry where length(user_name) == 500",
            "network where string(destination_port) == '500' | unique process_name",
        ]

        for query in stateless_queries:
            ast = parse_query(query)
            self.assertFalse(
                is_stateful(ast),
                "{} was not recognized as stateless".format(query))
コード例 #18
0
    def test_stateful_checks(self):
        """Check that :func:`~utils.is_stateful` is identifying stateful queries."""
        stateful_queries = [
            "sequence [process where process_name='net.exe']  [process where process_name='net.exe']",
            "join [process where process_name='net.exe']  [process where process_name='net.exe']",
            "file where file_name='*.txt' and descendant of [process where pid=4]",
            "file where child of [process where pid=4]",
            "registry where event of [process where pid=4]",
            "process where true | unique_count process_name | filter count < 5",
            "any where true | count user_name",
        ]

        for query in stateful_queries:
            ast = parse_query(query)
            self.assertTrue(is_stateful(ast),
                            "{} was not recognized as stateful".format(query))
コード例 #19
0
ファイル: test_python_engine.py プロジェクト: joswr1ght/eql
    def test_raises_errors(self):
        """Confirm that exceptions are raised when expected."""
        queries = [
            # ('process where bad_field.sub_field == 100', AttributeError),
            ('process where length(0)', TypeError),
            # ('file where file_name.abc', AttributeError),
            # ('file where pid.something', AttributeError),
            ('registry where invalidFunction(pid, ppid)', KeyError),
        ]

        # Make sure that these all work as expected queries
        for query, expected_error in queries:
            parsed_query = parse_query(query)
            self.assertRaises(expected_error,
                              self.get_output,
                              queries=[parsed_query])
コード例 #20
0
    def test_required_event_types(self):
        """Test that ancestry checks are detected."""
        self.assertSetEqual(
            get_required_event_types(parse_query("file where true")), {"file"})

        self.assertSetEqual(
            get_required_event_types(
                parse_query("any where event of [process where true]")),
            {"any", "process"})
        self.assertSetEqual(
            get_required_event_types(
                parse_query("any where descendant of [process where true]")),
            {"any", "process"})

        self.assertSetEqual(
            get_required_event_types(
                parse_query("""
        sequence
            [file where true]
            [process where true]
            [network where true]
        """)), {"file", "process", "network"})

        self.assertSetEqual(
            get_required_event_types(
                parse_query("""
        join
            [file where true]
            [process where true]
            [network where true]
        """)), {"file", "process", "network"})

        self.assertSetEqual(
            get_required_event_types(
                parse_query("""
        file where descendant of [
            dns where child of
                [registry where true]]
        """)), {"file", "dns", "registry"})

        self.assertSetEqual(
            get_required_event_types(
                parse_query("""
        sequence
            [file where descendant of [dns where child of [registry where true]]]
            [process where true]
            [network where true]
        """)), {"file", "dns", "network", "process", "registry"})
コード例 #21
0
ファイル: build.py プロジェクト: sgnls/eql
def get_post_processor(query, config=None, query_multiple=True):
    """Run an EQL query or analytic over a list of events and get the results.

    :param str|PipedQuery query: The query text or parsed query
    :param dict config: The configuration for PythonEngine
    :param bool query_multiple: Query over multiple events instead of just the first event
    """
    if not isinstance(query, PipedQuery):
        query = parse_query(query, implied_base=True, implied_any=True)

    def run_engine(inputs):
        results = []
        engine = PythonEngine(config)
        engine.add_post_processor(query, query_multiple=query_multiple)
        engine.add_output_hook(results.append)
        engine.reduce_events(inputs, finalize=True)
        return results

    return run_engine
コード例 #22
0
ファイル: build.py プロジェクト: sgnls/eql
def get_reducer(query, config=None):
    """Get a reducer to aggregate results from distributed EQL queries.

    :param str|dict|EqlAnalytic|PipedQuery query: The query text or parsed query
    :param dict config: The configuration for PythonEngine
    """
    if isinstance(query, dict):
        query = parse_analytic(query)
    elif is_string(query):
        query = parse_query(query, implied_base=True, implied_any=True)

    def reducer(inputs):
        results = []
        engine = PythonEngine(config)
        engine.add_reducer(query)
        engine.add_output_hook(results.append)

        engine.reduce_events(inputs, finalize=True)
        return results

    return reducer
コード例 #23
0
    def test_uses_ancestry(self):
        """Test that ancestry checks are detected."""
        self.assertFalse(uses_ancestry(parse_query("any where true")))
        self.assertTrue(
            uses_ancestry(parse_query("any where child of [any where true]")))
        self.assertTrue(
            uses_ancestry(
                parse_query("any where descendant of [any where true]")))
        self.assertTrue(
            uses_ancestry(parse_query("any where event of [any where true]")))

        self.assertFalse(
            uses_ancestry(
                parse_query(
                    "sequence [process where true] [network where true]")))
        self.assertTrue(
            uses_ancestry(
                parse_query("""
        sequence
            [process where child of [file where true]]
            [network where true]
        """)))
        self.assertTrue(
            uses_ancestry(
                parse_query("""
        join
            [process where event of [file where true]]
            [network where true]
        """)))
        self.assertTrue(
            uses_ancestry(
                parse_query("""
        join
            [process where descendant of [file where true]]
            [network where true]
        """)))
コード例 #24
0
ファイル: build.py プロジェクト: sgnls/eql
def get_engine(query, config=None):
    """Run an EQL query or analytic over a list of events and get the results.

    :param str|dict|EqlAnalytic|PipedQuery query: The query text or parsed query
    :param dict config: The configuration for PythonEngine
    """
    if isinstance(query, dict):
        query = parse_analytic(query)
    elif is_string(query):
        query = parse_query(query, implied_base=True, implied_any=True)

    def run_engine(inputs):
        results = []
        engine = PythonEngine(config)
        if isinstance(query, PipedQuery):
            engine.add_query(query)
        else:
            engine.add_analytic(query)
        engine.add_output_hook(results.append)
        engine.stream_events(inputs, finalize=True)
        return results

    return run_engine
コード例 #25
0
ファイル: main.py プロジェクト: sgnls/eql
def query(args):
    """Query over an input file."""
    if args.file:
        stream = stream_file_events(args.file, args.format, args.encoding)
    else:
        stream = stream_stdin_events(args.format)

    config = {'print': True}
    if args.config:
        config.update(load_dump(args.config))

    engine = PythonEngine(config)
    try:
        eql_query = parse_query(args.query,
                                implied_any=True,
                                implied_base=True)
        engine.add_query(eql_query)
    except EqlError as e:
        print(e, file=sys.stderr)
        sys.exit(2)

    engine.stream_events(stream, finalize=False)
    engine.finalize()
コード例 #26
0
ファイル: test_python_engine.py プロジェクト: joswr1ght/eql
    def test_map_reduce_queries(self):
        """Test map reduce functionality of python engines."""
        input_events = defaultdict(list)
        host_results = []

        for i, host in enumerate("abcdefghijklmnop"):
            events = []
            for event_number in range(10):
                data = {
                    'number': event_number,
                    'a': host + '-a-' + str(event_number),
                    'b': -event_number
                }
                events.append(Event.from_data(data))
            input_events[host] = events

        query_text = 'generic where true | sort a | head 5 | sort b'
        query = parse_query(query_text)
        host_engine = get_engine(query, {'flatten': True})

        # Map across multiple 'hosts'
        for hostname, host_events in input_events.items():
            for event in host_engine(host_events):
                event.data['hostname'] = hostname
                host_results.append(event)

        # Reduce across multiple 'hosts'
        reducer = get_reducer(query)
        reduced_results = reducer(host_results)

        expected_a = ['a-a-{}'.format(value) for value in range(10)][:5][::-1]
        actual_a = [
            event.data['a'] for result in reduced_results
            for event in result.events
        ]
        self.validate_results(actual_a, expected_a, query_text)
コード例 #27
0
ファイル: varna.py プロジェクト: sudheer-jerry/varna
def run_eql(eql_text, json_fn=None):
    engines = []
    for e in eql_text:

        def out():
            store = []

            def save_event(event):
                for event in event.events:
                    store.append(event.data)

            return (store, save_event)

        result, sa_event = out()

        config = {"print": False, "hooks": [sa_event]}

        engine = PythonEngine(config)

        eql_query = parse_query(e, implied_any=True, implied_base=True)
        engine.add_query(eql_query)
        engines.append((engine, result))

    for event in json_fn:
        if not isinstance(event, Event):
            event = Event.from_data(event)
        for engine, _results in engines:
            engine.stream_event(event)

    results = []

    for engine, result in engines:
        engine.finalize()
        results.append(result)

    return results
コード例 #28
0
ファイル: test_python_engine.py プロジェクト: joswr1ght/eql
    def test_engine_load(self):
        """Check that various queries can be converted and loaded into the python engine."""
        engine = PythonEngine()
        engine.add_custom_function('myFn', lambda x, y, z: 100)
        queries = [
            'process where process_name == "net.exe" and command_line == "* user*.exe"',
            'process where command_line == "~!@#$%^&*();\'[]{}\\\\|<>?,./:\\"-= \' "',
            'process where \n\n\npid ==\t 4',
            'process where process_name in ("net.exe", "cmd.exe", "at.exe")',
            'process where command_line == "*.exe *admin*" or command_line == "* a b*"',
            'process where pid in (1,2,3,4,5,6,7,8) and abc == 100 and def == 200 and ghi == 300 and jkl == x',
            'process where ppid != pid',
            'image_load where not x != y',
            'image_load where not x == y',
            'image_load where not not not not x < y',
            'image_load where not x <= y',
            'image_load where not x >= y',
            'image_load where not x > y',
            'process where pid == 4 or pid == 5 or pid == 6 or pid == 7 or pid == 8',
            'network where pid == 0 or pid == 4 or (ppid == 0 or ppid = 4) or (abc == defgh) and process_name == "*" ',
            'network where pid = 4',
            'join \t\t\t[process where process_name == "*"] [  file where file_path == "*"\n]',
            'join by pid [process where name == "*"] [file where path == "*"] until [process where opcode == 2]',
            'sequence [process where name == "*"] [file where path == "*"] until [process where opcode == 2]',
            'sequence by pid [process where name == "*"] [file where path == "*"] until [process where opcode == 2]',
            'join [process where process_name == "*"] by process_path [file where file_path == "*"] by image_path',
            'sequence [process where process_name == "*"] by process_path [file where file_path == "*"] by image_path',
            'sequence by pid [process where process_name == "*"] [file where file_path == "*"]',
            'sequence by pid with maxspan=2s [process where process_name == "*" ] [file where file_path == "*"]',
            'sequence by pid with maxspan=2sec [process where process_name == "*" ] [file where file_path == "*"]',
            'sequence by pid with maxspan=2seconds [process where process_name == "*" ] [file where file_path == "*"]',
            'sequence with maxspan=2.5m [process where x == x] by pid [file where file_path == "*"] by ppid',
            'sequence by pid with maxspan=2.0h [process where process_name == "*"] [file where file_path == "*"]',
            'sequence by pid with maxspan=2.0h [process where process_name == "*"] [file where file_path == "*"]',
            'sequence by pid with maxspan=1.0075d [process where process_name == "*"] [file where file_path == "*"]',
            'process where descendant of [process where process_name == "lsass.exe"] and process_name == "cmd.exe"',
            'dns where pid == 100 | head 100 | tail 50 | unique pid',
            'network where pid == 100 | unique command_line | count',
            'security where user_domain == "endgame" | count user_name | tail 5',
            'process where 1==1 | count user_name, unique_pid, myFn(field2,a,bc)',
            'process where 1==1 | unique user_name, myFn(field2,a,bc), field2',
            'process where true',
            'any where topField.subField[100].subsubField == 0',
            'process where true | filter true',
            'process where 1==1 | filter abc == def',
            'process where 1==1 | filter abc == def and 1 != 2',
            'process where 1==1 | count process_name | filter percent > 0.5',
            'process where a > 100000000000000000000000000000000',
        ]
        for query in queries:
            # Make sure every query can be converted without raising any exceptions
            parsed_query = parse_query(query)
            engine.add_query(parsed_query)

            # Also try to load it as an analytic
            parsed_analytic = parse_analytic({
                'metadata': {
                    'id': uuid.uuid4()
                },
                'query': query
            })
            engine.add_analytic(parsed_analytic)
コード例 #29
0
ファイル: test_python_engine.py プロジェクト: joswr1ght/eql
    def test_relationship_pid_collision(self):
        """Confirm that the field used for tracking lineage can be dynamically set."""
        config = {
            'flatten': True,
            'pid_key': 'unique_pid',
            'ppid_key': 'unique_ppid'
        }
        events = [
            Event.from_data(d) for d in [{
                "event_type": "process",
                "pid": 1001,
                "ppid": 1000,
                "unique_pid": "host1-1001",
                "unique_ppid": "host1-1000",
                "process_name": "explorer.exe",
                "subtype": "create"
            }, {
                "event_type": "process",
                "pid": 1002,
                "ppid": 1001,
                "unique_pid": "host1-1002",
                "unique_ppid": "host1-1001",
                "process_name": "powershell.exe",
                "subtype": "create"
            }, {
                "event_type": "process",
                "pid": 1003,
                "ppid": 1002,
                "unique_pid": "host1-1003",
                "unique_ppid": "host1-1002",
                "process_name": "whoami.exe",
                "subtype": "create"
            }, {
                "event_type": "process",
                "pid": 1001,
                "ppid": 1000,
                "unique_pid": "host2-1001",
                "unique_ppid": "host2-1000",
                "process_name": "explorer.exe",
                "subtype": "create"
            }, {
                "event_type": "process",
                "pid": 1002,
                "ppid": 1001,
                "unique_pid": "host2-1002",
                "unique_ppid": "host2-1001",
                "process_name": "cmd.exe",
                "subtype": "create"
            }, {
                "event_type": "process",
                "pid": 1003,
                "ppid": 1002,
                "unique_pid": "host2-1003",
                "unique_ppid": "host2-1002",
                "process_name": "whoami.exe",
                "subtype": "create"
            }]
        ]

        query = "process where child of [process where process_name == 'powershell.exe']"
        output = self.get_output(queries=[parse_query(query)],
                                 config=config,
                                 events=events)
        event_ids = [event.data['unique_pid'] for event in output]
        self.validate_results(event_ids, ['host1-1003'],
                              "Relationships failed due to pid collision")
コード例 #30
0
    def test_valid_queries(self):
        """Make sure that EQL queries are properly parsed."""
        valid = [
            'file where true',
            'file where true and true',
            'file where false or true',
            'registry where not pid',
            'process where process_name == "net.exe" and command_line == "* user*.exe"',
            'process where command_line == "~!@#$%^&*();\'[]{}\\\\|<>?,./:\\"-= \' "',
            'process where \n\n\npid ==\t 4',
            'process where process_name in ("net.exe", "cmd.exe", "at.exe")',
            'process where command_line == "*.exe *admin*" or command_line == "* a b*"',
            'process where pid in (1,2,3,4,5,6,7,8) and abc == 100 and def == 200 and ghi == 300 and jkl == x',
            'process where ppid != pid',
            'image_load where not x != y',
            'image_load where not x == y',
            'image_load where not not not not x < y',
            'image_load where not x <= y',
            'image_load where not x >= y',
            'image_load where not x > y',
            'process where _leadingUnderscore == 100',
            'network where 1 * 2 + 3 * 4 + 10 / 2 == 2 + 12 + 5',
            'file where 1 - -2',
            'file where 1 + (-2)',
            'file where 1 * (-2)',
            'file where 3 * -length(file_path)',
            'network where a * b + c * d + e / f == g + h + i',
            'network where a * (b + c * d) + e / f == g + h + i',
            'process where pid == 4 or pid == 5 or pid == 6 or pid == 7 or pid == 8',
            'network where pid == 0 or pid == 4 or (ppid == 0 or ppid = 4) or (abc == defgh) and process_name == "*" ',
            'network where pid = 4',
            'process where descendant of [process where process_name == "lsass.exe"] and process_name == "cmd.exe"',
            'join \t\t\t[process where process_name == "*"] [  file where file_path == "*"\n]',
            'join by pid [process where name == "*"] [file where path == "*"] until [process where opcode == 2]',
            'sequence [process where name == "*"] [file where path == "*"] until [process where opcode == 2]',
            'sequence by pid [process where name == "*"] [file where path == "*"] until [process where opcode == 2]',
            'join [process where process_name == "*"] by process_path [file where file_path == "*"] by image_path',
            'sequence [process where process_name == "*"] by process_path [file where file_path == "*"] by image_path',
            'sequence by pid [process where process_name == "*"] [file where file_path == "*"]',
            'sequence by pid with maxspan=200 [process where process_name == "*" ] [file where file_path == "*"]',
            'sequence by pid with maxspan=2s [process where process_name == "*" ] [file where file_path == "*"]',
            'sequence by pid with maxspan=2sec [process where process_name == "*" ] [file where file_path == "*"]',
            'sequence by pid with maxspan=2seconds [process where process_name == "*" ] [file where file_path == "*"]',
            'sequence with maxspan=2.5m [process where x == x] by pid [file where file_path == "*"] by ppid',
            'sequence by pid with maxspan=2.0h [process where process_name == "*"] [file where file_path == "*"]',
            'sequence by pid with maxspan=2.0h [process where process_name == "*"] [file where file_path == "*"]',
            'sequence by pid with maxspan=1.0075d [process where process_name == "*"] [file where file_path == "*"]',
            'dns where pid == 100 | head 100 | tail 50 | unique pid',
            'network where pid == 100 | unique command_line | count',
            'security where user_domain == "endgame" | count user_name a b | tail 5',
            'process where 1==1 | count user_name, unique_pid, concat(field2,a,bc)',
            'process where 1==1 | unique user_name, concat(field2,a,bc), field2',
            'registry where a.b',
            'registry where a[0]',
            'registry where a.b.c.d.e',
            'registry where a.b.c[0]',
            'registry where a[0].b',
            'registry where a[0][1].b',
            'registry where a[0].b[1]',
            'registry where topField.subField[100].subsubField == 0',
            'process where true | filter true',
            'process where 1==1 | filter abc == def',
            'process where 1==1 | filter abc == def and 1 != 2',
            'process where 1==1 | count process_name | filter percent > 0.5',
            'process where a > 100000000000000000000000000000000',
            'any where true | unique a b c | sort a b c | count',
            'any where true | unique a, b,   c | sort a b c | count',
            'any where true | unique a, b,   c | sort a,b,c | count',
            'file where child of [registry where true]',
            'file where event of [registry where true]',
            'file where event of [registry where true]',
            'file where descendant of [registry where true]',
            # multiple by values
            'sequence by field1  [file where true] by f1 [process where true] by f1',
            'sequence by a,b,c,d [file where true] by f1,f2 [process where true] by f1,f2',
            'sequence [file where 1] by f1,f2 [process where 1] by f1,f2 until [process where 1] by f1,f2',
            'sequence by f [file where true] by a,b [process where true] by c,d until [process where 1] by e,f',
            # sequence with named params
            'sequence by unique_pid [process where true] [file where true] fork',
            'sequence by unique_pid [process where true] [file where true] fork=true',
            'sequence by unique_pid [process where true] [file where true] fork=1',
            'sequence by unique_pid [process where true] [file where true] fork=false',
            'sequence by unique_pid [process where true] [file where true] fork=0 [network where true]',
            'sequence by unique_pid [process where true] [file where true] fork=0',
        ]

        datetime.datetime.now()

        for i, text in enumerate(valid):
            try:
                query = parse_query(text)
                rendered = query.render()
                self.assertEqual(text.split()[0], rendered.split()[0])

                # parse it again to make sure it's still valid and doesn't change
                parse_again = parse_query(rendered)
                rendered_again = parse_again.render()

                # repr + eval should also restore it properly
                # Test that eval + repr works
                actual_repr = repr(query)
                eval_actual = eval(actual_repr)

                self.assertEqual(query, parse_again,
                                 "Query didn't reparse correctly.")
                self.assertEqual(rendered, rendered_again)
                self.assertEqual(query, eval_actual)

            except (EqlSyntaxError, EqlSemanticError):
                ex_type, ex, tb = sys.exc_info()
                traceback.print_exc(ex)
                traceback.print_tb(tb)
                self.fail("Unable to parse query #{}: {}".format(i, text))