示例#1
0
def test_alert_native_graph(alert):
    alert = SecurityAlert(alert)
    graph = None
    for ent in alert.entities:
        if graph is None:
            graph = ent.to_networkx()
        else:
            if not graph.has_node(ent):
                graph = nx.compose(graph, ent.to_networkx())
    check.greater_equal(len(graph.nodes), 1)

    alert_ent = entities.Alert(alert)
    # alert_graph = alert_ent.to_networkx()
    # nx_alert_node = next(iter(alert_graph.nodes))
    # graph = nx.compose(alert_graph, graph)

    # Get the sets of components
    connected_components = list(nx.connected_components(graph)).copy()
    edges_to_add = []
    for sub_graph in connected_components:
        # connect alert to most connected entities
        node_neighbors = [(node, len(list(nx.neighbors(graph, node))))
                          for node in sub_graph]
        most_connected_nodes = [
            node for node, neighbors in node_neighbors if neighbors > 1
        ]
        node_list = most_connected_nodes or [
            node for node, count in node_neighbors
        ]
        for node in node_list:
            # if node != nx_alert_node:
            graph.add_edge(alert_ent, node, name=node.__class__.__name__)

    check.equal(len(list(nx.connected_components(graph))), 1)
示例#2
0
def test_pivot_funcs_df_merge(_create_pivot, join_type, test_case):
    """Test calling function with DF input attributes."""
    func = getattr(getattr(test_case.entity, test_case.provider),
                   test_case.pivot_func)
    # Test DF input
    val = enumerate(test_case.value.keys())
    in_df = pd.DataFrame(val, columns=["idx", test_case.src_df_col])
    in_df["extra_col1"] = "test1"
    in_df["extra_col2"] = "test2"
    result_no_merge_df = func(data=in_df, src_column=test_case.src_df_col)
    result_df = func(data=in_df,
                     src_column=test_case.src_df_col,
                     join=join_type)

    in_cols = in_df.shape[1]
    no_merge_cols = result_no_merge_df.shape[1]
    merge_cols = result_df.shape[1]
    # merged DF should have result + input cols - join key col
    check.greater_equal(no_merge_cols + in_cols, merge_cols)

    if join_type in ("left", "inner"):
        # inner and left joins should have same or greater length as input
        check.greater_equal(result_df.shape[0], in_df.shape[0])
        # all the keys from the input should be in the merged output
        for key in in_df[test_case.src_df_col]:
            check.is_in(key, result_df[test_case.key_col].values)
    if join_type == "right":
        # We don't know how many results we get back from right join
        # (although should not be zero)
        check.greater(len(result_df), 0)
        # but all of its key values should be present in input
        for key in result_df[test_case.key_col].values:
            check.is_in(key, in_df[test_case.src_df_col].values)
示例#3
0
def test_key_vault_editor(mp_conf_ctrl):
    """Items edit controls."""
    edit_comp = CEKeyVault(mp_controls=mp_conf_ctrl)

    check.is_not_none(edit_comp.help.html_help.value)
    check.is_not_none(edit_comp._DESCRIPTION)
    check.is_not_none(edit_comp._COMP_PATH)
    check.greater_equal(len(edit_comp._HELP_URI), 1)

    edit_comp.controls["TenantId"].value = "tenant"  # invalid UUID
    edit_comp.controls["SubscriptionId"].value = "sub"  # invalid UUID
    edit_comp.controls["ResourceGroup"].value = ""  # OK to have empty
    edit_comp.controls["AzureRegion"].value = ""  # OK to have empty
    edit_comp.controls["VaultName"].value = ""  # invalid to have empty
    edit_comp.btn_save.click()

    results = mp_conf_ctrl.validate_setting(f"{edit_comp._COMP_PATH}")
    check.equal(len(results), 3)

    edit_comp.controls["TenantId"].value = "40dcc8bf-0478-4f3b-b275-ed0a94f2c013"
    edit_comp.controls["SubscriptionId"].value = "40dcc8bf-0478-4f3b-b275-ed0a94f2c013"
    edit_comp.controls["ResourceGroup"].value = "resgroup"
    edit_comp.controls["AzureRegion"].value = "Europe"
    edit_comp.controls["VaultName"].value = "MyVault"
    edit_comp.controls["Authority"].value = "global"
    edit_comp.btn_save.click()

    results = mp_conf_ctrl.validate_setting(f"{edit_comp._COMP_PATH}")
    check.equal(len(results), 0)
示例#4
0
def test_mordor_download(mdr_driver: MordorDriver):
    """Test file download."""
    entry_id = "SDWIN-190319021158"
    entry = mdr_driver.mordor_data[entry_id]
    files = entry.get_file_paths()

    file_path = files[0]["file_path"]
    d_frame = download_mdr_file(file_path, save_folder=_SAVE_FOLDER2)

    check.is_instance(d_frame, pd.DataFrame)
    check.greater_equal(len(d_frame), 10)
    _cleanup_temp_files(_SAVE_FOLDER2)
示例#5
0
def _execute_item_editor_test(edit_comp, cases):
    check.is_not_none(edit_comp.help.html_help.value)
    check.is_not_none(edit_comp._DESCRIPTION)
    check.is_not_none(edit_comp._COMP_PATH)
    check.greater_equal(len(edit_comp._HELP_URI), 1)

    for test_opt in cases:
        print(f"Testing {edit_comp.__class__.__name__}, {test_opt}")
        opts = edit_comp.select_item.options
        n_opts = len(opts)

        # If this control has an options list - select the first of these
        prov_opts = getattr(edit_comp, "prov_options", None)
        if prov_opts and prov_opts.options:
            edit_comp.prov_options.value = test_opt

        # If there is an existing item, delete this
        if _is_current_option(test_opt, edit_comp.select_item):
            edit_comp.select_item.label = test_opt
            edit_comp.edit_buttons.btn_del.click()
            n_opts -= 1

        # Add a new one
        edit_comp.edit_buttons.btn_add.click()
        # Save the current item
        edit_comp.edit_buttons.btn_save.click()
        check.equal(len(edit_comp.select_item.options), n_opts + 1, "Item added")

        if isinstance(edit_comp, CEAzureSentinel):
            if _is_current_option("Default", edit_comp.select_item):
                edit_comp.select_item.label = "Default"
                edit_comp.edit_buttons.btn_del.click()
                n_opts -= 1

            edit_comp.btn_set_default.click()
            edit_comp.edit_buttons.btn_save.click()
            n_opts += 1
            check.equal(
                len(edit_comp.select_item.options), n_opts + 1, "AzSent default added"
            )

        if prov_opts and prov_opts.options:
            edit_comp.prov_options.value = test_opt
            edit_comp.edit_buttons.btn_add.click()
            # check that we didn't add a duplicate
            check.equal(
                len(edit_comp.select_item.options), n_opts + 1, "Dup item not added"
            )

        # delete whatever we've just added
        edit_comp.edit_buttons.btn_del.click()
        check.equal(len(edit_comp.select_item.options), n_opts, "New item deleted")
示例#6
0
def test_mordor_download(mdr_driver: MordorDriver):
    """Test file download."""
    global _SAVE_PATH
    entry_id = "SDWIN-190319021158"
    entry = mdr_driver.mordor_data[entry_id]
    files = entry.get_file_paths()

    file_path = files[0]["file_path"]
    d_frame = download_mdr_file(file_path, save_folder="mordor_test")
    _SAVE_PATH = file_path.split("/")[-1]

    check.is_instance(d_frame, pd.DataFrame)
    check.greater_equal(len(d_frame), 10)
示例#7
0
def test_pivot_providers_namespace(_create_pivot_ns, test_case):
    """
    Test pivot initialized from globals/namespace.

    Notes
    -----
    Test that the expected number of functions have been added to entities.

    """
    entity = getattr(entities, test_case.entity)
    query_contr = getattr(entity, test_case.container)
    check.is_not_none(query_contr)
    query_attrs = repr(query_contr).split("\n")
    check.greater_equal(len(query_attrs), test_case.funcs)
示例#8
0
def test_class_methods():
    """Test method."""
    for _, nblt in nblts.iter_classes():
        check.is_not_none(nblt.description())
        check.is_not_none(nblt.name())
        all_opts = len(nblt.all_options())
        check.greater_equal(all_opts, len(nblt.default_options()))
        check.greater(len(nblt.keywords()), 0)
        check.greater(len(nblt.entity_types()), 0)
        metadata = nblt.get_settings(print_settings=False)
        check.is_not_none(metadata)
        check.is_in("mod_name", metadata)
        check.is_in("default_options", metadata)
        check.is_in("keywords", metadata)
def test_add_queries_to_entities(entity, expected, azure_sentinel):
    """Test query functions successfully added to entities."""
    az_qry_funcs = PivotQueryFunctions(azure_sentinel)
    add_queries_to_entities(az_qry_funcs, "data", _get_timespan)

    check.is_true(hasattr(entity, "data"))
    f_container = getattr(entity, "data")
    check.is_instance(f_container, QueryContainer)
    funcs = [func for func in dir(f_container) if not func.startswith("_")]
    check.greater_equal(len(funcs), expected)

    for func_name in funcs:
        func = getattr(f_container, func_name)
        check.equal(func.__qualname__,
                    "_param_and_call_wrapper.<locals>.wrapped_query_func")
        check.is_in("Parameters", func.__doc__)
示例#10
0
    def test_completion_of_(self, task_name):
        env = MineCraftingEnv()
        task: TaskObtainItem = get_task_from_name(task_name, env.world)
        env.add_task(task, can_end=True)

        all_options = env.world.get_all_options()
        option_solving_task = all_options[f"Get {task.goal_item}"]

        observation = env.reset()
        done = False
        while not done:
            action = option_solving_task(observation)
            observation, _, done, _ = env.step(action)

        item_slot = env.world.item_id_to_slot[task.goal_item.item_id]
        check.greater_equal(env.player.inventory.content[item_slot], 1,
                            f"{task_name} completed.")
示例#11
0
def test_read_modules():
    """Test method."""
    nbklts = discover_modules()
    check.greater_equal(len(list(nbklts.iter_classes())), 4)

    # pylint: disable=no-member
    match, m_count = nblts.azsent.host.HostSummary.match_terms(
        "host, linux, azure")
    check.is_true(match)
    check.equal(m_count, 3)

    for key, value in nbklts.iter_classes():
        check.is_instance(key, str)
        check.is_true(issubclass(value, Notebooklet))

    find_res = find("host windows azure")
    check.greater(len(find_res), 0)
    not_found = find("monkey stew")
    check.equal(len(not_found), 0)
def test_query_functions_methods(azure_sentinel):
    """Test attributes of retrieved functions."""
    az_qry_funcs = PivotQueryFunctions(azure_sentinel)

    ip_addr_q_params = list(
        az_qry_funcs.get_queries_and_types_for_param("ip_address"))
    host_queries = list(az_qry_funcs.get_queries_for_param("host_name"))

    check.greater_equal(len(ip_addr_q_params), 4)
    check.greater_equal(len(host_queries), 20)

    func_name, func_family, func = [
        q_tup for q_tup in host_queries if q_tup[0] == "get_info_by_hostname"
    ][0]
    check.is_instance(func, partial)
    check.is_true(callable(func))

    q_params = az_qry_funcs.query_params.get(f"{func_family}.{func_name}")
    # expected results
    # all=['table', 'query_project', 'start', 'end', 'subscription_filter',
    #  'add_query_items', 'host_name', 'host_op'],
    # required=['host_name'],
    # full_required=['start', 'end', 'host_name']
    check.is_in("start", q_params.all)
    check.is_in("host_name", q_params.required)
    check.is_in("host_name", q_params.full_required)
    check.is_in("start", q_params.full_required)
    check.is_in("end", q_params.full_required)

    param_attrs = az_qry_funcs.get_param_attrs("ip_address")
    # Expected return
    # [ParamAttrs(type='str', query='get_info_by_ipaddress', family='Heartbeat',
    #  required=True),
    # ParamAttrs(type='str', query='list_logons_for_source_ip', family='LinuxSyslog',
    #  required=True),
    # ParamAttrs(type='str', query='get_host_for_ip', family='Network',
    #  required=True),
    # ParamAttrs(type='str', query='get_heartbeat_for_ip', family='Network',
    #  required=True)]
    check.is_in(param_attrs[0].type, ("str", "list", "datetime"))
    check.is_true(param_attrs[0].required)
    check.is_not_none(param_attrs[0].query)
    check.is_not_none(param_attrs[0].family)
示例#13
0
def test_mordor_search(mdr_driver: MordorDriver):
    """Test search functionality."""
    results = search_mdr_data(mdr_driver.mordor_data, "AWS")
    check.greater_equal(len(results), 1)

    subset = search_mdr_data(mdr_driver.mordor_data, "Empire")
    check.greater_equal(len(subset), 39)

    emp_power = search_mdr_data(mdr_driver.mordor_data, "Empire+Power")
    check.greater_equal(len(emp_power), 18)
    check.greater_equal(
        len(search_mdr_data(mdr_driver.mordor_data, "Empire, Windows")), 50
    )

    subset_search = search_mdr_data(mdr_driver.mordor_data, "Power", subset=subset)
    check.equal(len(emp_power), len(subset_search))

    result_set = mdr_driver.search_queries("AWS")
    check.greater_equal(len(list(result_set)), 1)
    check.is_true(any(hit for hit in result_set if "small.aws.collection" in hit))
示例#14
0
def test_read_custom_path():
    """Test method."""
    cust_nb_path = Path(TEST_DATA_PATH) / "custom_nb"
    nbklts = discover_modules(nb_path=str(cust_nb_path))
    check.greater_equal(len(list(nbklts.iter_classes())), 5)

    # pylint: disable=no-member
    match, m_count = nblts.custom_nb.host.CustomNB.match_terms("Custom")
    check.is_true(match)
    check.equal(m_count, 1)

    for key, value in nbklts.iter_classes():
        check.is_instance(key, str)
        check.is_true(issubclass(value, Notebooklet))

    find_res = find("banana")
    check.equal(len(find_res), 1)
    find_res = find("<<Test Marker>>")
    check.equal(len(find_res), 1)
    check.equal(find_res[0][0], "CustomNB")
    check.is_in("nblts.host.CustomNB", nb_index)
def test_mordor_query_provider(qry_provider):
    """Test query functions from query provider."""
    queries = qry_provider.list_queries()
    check.greater_equal(len(queries), 50)

    check.is_true(hasattr(qry_provider, "small"))
    check.is_true(hasattr(qry_provider, queries[0]))

    q_func = getattr(qry_provider, queries[2])
    output = io.StringIO()
    with contextlib.redirect_stdout(output):
        q_func("?")
    check.is_in("Query:", output.getvalue())
    check.is_in("Data source:  Mordor", output.getvalue())
    check.is_in("Mordor ID:", output.getvalue())
    check.is_in("Mitre Techniques:", output.getvalue())

    f_path = q_func("print")
    check.is_in("https://raw.githubusercontent.com/OTRF/mordor", f_path)

    d_frame = q_func()
    check.is_instance(d_frame, pd.DataFrame)
    check.greater_equal(len(d_frame), 10)
示例#16
0
def test_greater_equal():
    check.greater_equal(2, 1)
    check.greater_equal(1, 1)
def test_generate_new_heightmap():
    environment = model.Environment(h=10, w=10)
    check.is_not_none(environment.heightmap)
    check.greater_equal(np.min(environment.heightmap), 0)
    check.less_equal(np.max(environment.heightmap), 1)
def test_create_query_functions(azure_sentinel):
    """Test basic creation of query functions class."""
    az_qry_funcs = PivotQueryFunctions(azure_sentinel)

    check.greater_equal(len(az_qry_funcs.param_usage), 30)
    check.greater_equal(len(az_qry_funcs.query_params), 70)
示例#19
0
def test_pivot_funcs_df_merge(_create_pivot, join_type, test_case):
    """Test calling function with DF input attributes."""
    func = getattr(getattr(test_case.entity, test_case.provider), test_case.pivot_func)
    # Test DF input
    val = test_case.value
    in_df = pd.DataFrame(val, columns=[test_case.src_df_col])
    params = {test_case.func_param: test_case.src_df_col}
    in_df["extra_col1"] = "test1"
    in_df["extra_col2"] = "test2"
    result_no_merge_df = func(data=in_df, **params)

    if test_case.entity not in (entities.Account, entities.Host):
        # The IP test uses a list param so we cannot do index joins
        # with it
        with pytest.warns(UserWarning):
            result_df = func(data=in_df, **params, join=join_type)
        return

    # should work ok with Account and Host
    result_df = func(data=in_df, **params, join=join_type)

    in_cols = in_df.shape[1]
    no_merge_cols = result_no_merge_df.shape[1]
    merge_cols = result_df.shape[1]
    # merged DF should have result + input cols - join key col
    check.greater_equal(no_merge_cols + in_cols, merge_cols)

    if join_type in ("left", "inner"):
        # inner and left joins should have same or greater length as input
        check.greater_equal(result_df.shape[0], in_df.shape[0])
        # all the keys from the input should be in the merged output
        for row_val in in_df[test_case.src_df_col]:
            check.is_in(row_val, result_df[test_case.src_df_col].values)
    if join_type == "right":
        # We don't know how many results we get back from right join
        # (although should not be zero)
        check.greater(len(result_df), 0)
        # but all of its key values should be present in input
        for row_val in result_df[test_case.src_df_col].values:
            check.is_in(row_val, in_df[test_case.src_df_col].values)

    join_in_data = {
        0: "0x3e7",
        1: "0xc90e957",
        2: "0xc90ea44",
        3: "0xc912d62",
        4: "0xc913737",
        10: "0x3e3",
        14: "0x3e4",
        15: "0xaddd",
        16: "0xafff",
        17: "0x3e5",
        23: "no_match",
    }
    in_df = pd.DataFrame(
        pd.Series(join_in_data), columns=["TargetLogonId"]
    ).reset_index()
    result_no_merge_df = func(data=in_df, **params)
    result_df = func(
        data=in_df,
        **params,
        join=join_type,
        left_on="TargetLogonId",
        right_on="TargetLogonId",
    )
    check.is_not_none(result_df)

    if join_type in ("inner", "right"):
        check.equal(len(result_df), len(result_no_merge_df))
        for val in join_in_data.values():
            if val != "no_match":
                check.is_in(val, result_df["TargetLogonId"].values)
            else:
                check.is_not_in(val, result_df["TargetLogonId"].values)
    if join_type == "left":
        check.equal(len(result_df), len(result_no_merge_df) + 1)
        for val in join_in_data.values():
            check.is_in(val, result_df["TargetLogonId"].values)