def test_splunk_connect_errors(splunk_client): """Check connect failure errors.""" splunk_client.connect = cli_connect sp_driver = SplunkDriver() check.is_true(sp_driver.loaded) print("connected", sp_driver.connected) with pytest.raises(MsticpyConnectionError) as mp_ex: # [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="Test code")] sp_driver.connect( host="AuthError", username="******", password=_FAKE_STRING ) # nosec print("connected", sp_driver.connected) check.is_false(sp_driver.connected) check.is_in("Splunk connection", mp_ex.value.args) sp_driver = SplunkDriver() print("connected", sp_driver.connected) with pytest.raises(MsticpyConnectionError) as mp_ex: # [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="Test code")] sp_driver.connect( host="HTTPError", username="******", password=_FAKE_STRING ) # nosec print("connected", sp_driver.connected) check.is_false(sp_driver.connected) check.is_in("Splunk connection", mp_ex.value.args)
def test_call(self, mocker): """ should call reward on class call. """ mocker.patch("learnrl.playground.RewardHandler._reward", return_value=True) handler = RewardHandler() check.is_true(handler(**self.experience, logs={})) check.is_true(handler._reward.called)
def test_find_in_ahref_internal(): page = BasePage(TEST_HTML_PAGE, TEST_HTML_URL) search_string = "Python 3.8.6 is now available" internal_links = page.find_in_ahref(search_string, external=False) all_links = page.find_in_ahref(search_string) check.is_true(all_links) check.equal(len(internal_links), 0)
def test_obfuscate_df(): """Test obfuscation on DataFrame.""" win_procs = pd.read_pickle( Path(TEST_DATA_PATH).joinpath("win_proc_test.pkl")) out_df = data_obfus.obfuscate_df(win_procs) check.equal(len(out_df), len(win_procs)) for idx, row in win_procs.loc[:5].iterrows(): for mapped_col in win_procs.columns: if data_obfus.OBFUS_COL_MAP.get(mapped_col) == "sid": # SIDs can be unchanged if well-known SID continue if mapped_col in data_obfus.OBFUS_COL_MAP: check.not_equal(row[mapped_col], out_df.loc[idx][mapped_col]) else: check.equal(row[mapped_col], out_df.loc[idx][mapped_col]) comp_uc, comp_ch = data_obfus.check_obfuscation(out_df, win_procs, index=idx) n_changed = len([ col for col in win_procs.columns if col in data_obfus.OBFUS_COL_MAP ]) n_unchanged = len(win_procs.columns) - n_changed # number of unchanged might be one less since some SIDs are not hashed check.is_true(len(comp_uc) in [n_unchanged, n_unchanged + 1]) check.is_true(len(comp_ch) in [n_changed, n_changed - 1])
def test_normalRecharge(api, db, case_data): """充值加油卡""" url = '/gasStation/process' data_source_id = case_data.get('data_source_id') card_number = case_data.get('card_number') cardBalance = case_data.get('card_balance') # 环境检查 if db.check_card(card_number): print(f'卡号: {card_number} 已存在') else: db.add_card(card_number) #充值前卡余额 balance = db.check_cardBalance(card_number) print("充值前卡余额:", balance) json_data = { "dataSourceId": data_source_id, "methodId": "03A", "CardInfo": { "cardNumber": card_number, "cardBalance": cardBalance } } res_dict = api.post(url, json=json_data).json() # 响应断言 ck.equal(200, res_dict.get("code")) ck.equal("充值成功", res_dict.get("msg")) ck.is_true(res_dict.get('success')) # 数据库断言(充值后余额) ck.equal(balance + cardBalance, db.check_cardBalance(card_number))
def test_reward_shaping_all_useful(self, mocker: MockerFixture): """should give achivement value for every item in solving option unrolled graph.""" TaskObtainItem( world=self.dummy_world, item=self.dummy_item, reward_shaping=RewardShaping.ALL_USEFUL, shaping_value=self.shaping_value, ) is_called = {item.item_id: False for item in self.dummy_items} expected_called_items = [item.item_id for item in self.dummy_items[2:]] check.equal( self.add_achivement_mocker.call_args_list[0].args, (self.dummy_item, 10), ) check.equal( self.add_achivement_mocker.call_args_list[0].kwargs, {"end_task": True} ) for call in self.add_achivement_mocker.call_args_list[1:]: item_called: Item = call.args[0] is_called[item_called.item_id] = True check.equal(call.args[1], self.shaping_value) for item in self.dummy_items: item_is_called = is_called[item.item_id] if item.item_id in expected_called_items: check.is_true(item_is_called, f"{item} was not called when expected.") else: check.is_false(item_is_called, f"{item} was called when not expected.")
def test_normalConsumption(api, db, case_data): """加油卡消费""" url = '/gasStation/process' data_source_id = case_data.get('data_source_id') card_number = case_data.get('card_number') user_id = case_data.get('user_id') cardBalance = case_data.get('card_balance') # 环境检查 balance = db.check_cardBalance(card_number) if balance < cardBalance: balance = db.check_cardBalance(card_number, cardBalance) json_data = { "dataSourceId": data_source_id, "methodId": "04A", "CardUser": { "userId": user_id }, "CardInfo": { "cardNumber": card_number, "cardBalance": cardBalance } } res_dict = api.post(url, json=json_data).json() # 响应断言 ck.equal(200, res_dict.get("code")) ck.equal("消费成功!", res_dict.get("msg")) ck.is_true(res_dict.get('success')) # 数据库断言(消费后余额) ck.equal(balance - cardBalance, db.check_cardBalance(card_number))
def test_reward_shaping_all(self, mocker: MockerFixture): """should give achivement value for every world item.""" TaskObtainItem( world=self.dummy_world, item=self.dummy_item, reward_shaping=RewardShaping.ALL, shaping_value=self.shaping_value, ) is_called = {item.item_id: False for item in self.dummy_items} check.equal( self.add_achivement_mocker.call_args_list[0].args, (self.dummy_item, 10), ) check.equal( self.add_achivement_mocker.call_args_list[0].kwargs, {"end_task": True} ) for call in self.add_achivement_mocker.call_args_list[1:]: item_called: Item = call.args[0] is_called[item_called.item_id] = True check.equal(call.args[1], self.shaping_value) check.is_true(all(is_called.values()))
def test_logon_session_rarity_notebooklet(monkeypatch): """Test basic run of notebooklet.""" monkeypatch.setattr(data_providers, "GeoLiteLookup", GeoIPLiteMock) test_data = str(Path(TEST_DATA_PATH).absolute()) data_providers.init( query_provider="LocalData", LocalData_data_paths=[test_data], LocalData_query_paths=[test_data], ) d_path = Path(TEST_DATA_PATH).joinpath("processes_on_host.pkl") raw_data = pd.read_pickle(d_path) filt_sess = raw_data[raw_data["Account"] == "MSTICAlertsWin1\\MSTICAdmin"] data = pd.concat([raw_data.iloc[:1000], filt_sess]) check.is_true(hasattr(nblts.azsent.host, "LogonSessionsRarity")) if not hasattr(nblts.azsent.host, "LogonSessionsRarity"): print(nblts.azsent.host) test_nb = nblts.azsent.host.LogonSessionsRarity() result = test_nb.run(data=data) check.is_instance(result.process_clusters, pd.DataFrame) check.is_instance(result.processes_with_cluster, pd.DataFrame) check.is_instance(result.session_rarity, pd.DataFrame) result.list_sessions_by_rarity() result.plot_sessions_by_rarity() result.process_tree(account="MSTICAlertsWin1\\MSTICAdmin")
def test_azure_sentinel_editor(mp_conf_ctrl): """Items edit controls.""" edit_comp = CEAzureSentinel(mp_controls=mp_conf_ctrl) n_opts = len(edit_comp.select_item.options) edit_comp.edit_buttons.btn_add.click() check.equal(n_opts + 1, len(edit_comp.select_item.options)) new_ws = edit_comp.current_workspace result, _ = _validate_ws(new_ws, mp_conf_ctrl, edit_comp._COMP_PATH) check.is_false(result) edit_comp.edit_ctrls.children[1].value = "40dcc8bf-0478-4f3b-b275-ed0a94f2c013" edit_comp.edit_ctrls.children[2].value = "40dcc8bf-0478-4f3b-b275-ed0a94f2c013" edit_comp.edit_buttons.btn_save.click() result, _ = _validate_ws(new_ws, mp_conf_ctrl, edit_comp._COMP_PATH) check.is_true(result) # Save the current item edit_comp.edit_buttons.btn_save.click() check.is_not_none(mp_conf_ctrl.get_value(f"{edit_comp._COMP_PATH}.{new_ws}")) # Rename edit_comp.edit_ctrls.children[0].value = "TestWS" edit_comp.edit_buttons.btn_save.click() ren_workspace_settings = mp_conf_ctrl.get_value(f"{edit_comp._COMP_PATH}.TestWS") check.is_not_none(ren_workspace_settings) edit_comp.btn_set_default.click() def_ws = mp_conf_ctrl.get_value(f"{edit_comp._COMP_PATH}.Default") check.equal(def_ws, ren_workspace_settings)
def test_guess_coords_skycord_hexa(): ra = "1:00:00" dec = "00:00:00" sk = guess_coordinates(ra, dec, skycoord=True) check.is_instance(sk, SkyCoord) check.is_true(sk.ra.degree - 15 < 1e-8) check.is_true(sk.dec.degree - 0 < 1e-8)
def test_silent_option(): """Test operation of 'silent' option.""" warnings.filterwarnings(action="ignore", category=UserWarning) init(query_provider="LocalData", providers=[]) test_nb = TstNBSummary() output = _capture_nb_run_output(test_nb) check.is_true(output) # Silent option to run output = _capture_nb_run_output(test_nb, silent=True) check.is_false(output) check.is_true(get_opt("silent")) # Silent option to init test_nb = TstNBSummary(silent=True) check.is_true(test_nb.silent) output = _capture_nb_run_output(test_nb) check.is_false(output) # But overridable on run output = _capture_nb_run_output(test_nb, silent=False) check.is_true(output) check.is_false(get_opt("silent")) # Silent global option set_opt("silent", True) test_nb = TstNBSummary() output = _capture_nb_run_output(test_nb) check.is_false(output) # But overridable on run output = _capture_nb_run_output(test_nb, silent=False) check.is_true(output)
def test_setting_uncertainty_with_array(): frame = create_framedata() frame.uncertainty = None fake_uncertainty = np.sqrt(np.abs(frame.data)) frame.uncertainty = fake_uncertainty.copy() np.testing.assert_array_equal(frame.uncertainty, fake_uncertainty) check.is_true(frame.uncertainty.unit is u.adu)
def test_user_config(settings, mp_settings): """Test user config.""" settings.get = Mock() settings.get.return_value = mp_settings.get("UserDefaults") prov_dict = user_config.load_user_defaults() check.is_in("qry_asi", prov_dict) check.is_instance(prov_dict["qry_asi"], QueryProvider) check.equal(prov_dict["qry_asi"].environment, "AzureSentinel") check.is_in("qry_soc", prov_dict) check.is_instance(prov_dict["qry_soc"], QueryProvider) check.equal(prov_dict["qry_asi"].environment, "AzureSentinel") check.is_in("qry_splunk", prov_dict) check.is_instance(prov_dict["qry_splunk"], QueryProvider) check.equal(prov_dict["qry_splunk"].environment, "Splunk") check.is_in("qry_local", prov_dict) check.is_instance(prov_dict["qry_local"], QueryProvider) check.is_true(prov_dict["qry_local"].connected) check.equal(prov_dict["qry_local"].environment, "LocalData") check.is_in("ti_lookup", prov_dict) check.is_in("geoip", prov_dict) check.is_in("az_data", prov_dict) check.is_in("azs_api", prov_dict) check.is_true(hasattr(msticpy, "current_providers"))
def test_interpret_results_from_start(): job = Job(example_parameters1) results = [(1.0, 0.6), (10.0, 0.2), (20.0, 0.1), (30.0, 0.08)] #print(job.interpret_results(results, cutoff=0.5)) #exit(0) output = job.interpret_results(results, cutoff=0.5) check.is_true(stampListsAreEqual(output,[(0.0, 5.5)]))
def test_user_config(mp_settings): """Test user config.""" mpcfg_path = os.environ.get("MSTICPYCONFIG") with custom_mp_config(mp_path=mpcfg_path): settings["UserDefaults"] = mp_settings.get("UserDefaults") prov_dict = user_config.load_user_defaults() check.is_in("qry_asi", prov_dict) check.is_instance(prov_dict["qry_asi"], QueryProvider) check.equal(prov_dict["qry_asi"].environment, "AzureSentinel") check.is_in("qry_soc", prov_dict) check.is_instance(prov_dict["qry_soc"], QueryProvider) check.equal(prov_dict["qry_asi"].environment, "AzureSentinel") check.is_in("qry_splunk", prov_dict) check.is_instance(prov_dict["qry_splunk"], QueryProvider) check.equal(prov_dict["qry_splunk"].environment, "Splunk") check.is_in("qry_local", prov_dict) check.is_instance(prov_dict["qry_local"], QueryProvider) check.is_true(prov_dict["qry_local"].connected) check.equal(prov_dict["qry_local"].environment, "LocalData") check.is_in("ti_lookup", prov_dict) check.is_in("geoip", prov_dict) check.is_in("az_data", prov_dict) check.is_in("azs_api", prov_dict) check.is_true(hasattr(msticpy, "current_providers"))
def test_simple_flat(inplace): expect = np.ones((20, 20))*3 expect[0:5, 0:5] = 3/0.5 # Checking flat division: frame1 = FrameData(np.ones((20, 20))*3, unit=u.adu) master_flat_dimless = FrameData(np.ones((20, 20)), unit=None) master_flat_dimless.data[0:5, 0:5] = 0.5 res1 = flat_correct(frame1, master_flat_dimless, inplace=inplace) check.is_true(isinstance(res1, FrameData)) npt.assert_array_equal(res1.data, expect) check.equal(res1.header['hierarch astropop flat_corrected'], True) # # Checking flat-corrected frame unit: # check.equal(res1.unit, u.Unit('adu')) # Check inplace statement: if inplace: check.is_true(res1.data is frame1.data) else: check.is_false(res1.data is frame1.data)
def test_interpret_results_zero_cutoff(): job = Job(example_parameters1) results = [(1.0, 0.2), (10.0, 0.2), (20.0, 0.1), (30.0, 0.8)] job.settings["runtime"] = 40.0 check.is_true( stampListsAreEqual(job.interpret_results(results, cutoff=0.0), [(0.0, 40.0)]))
def test_interpret_results_spanning_clip(): job = Job(example_parameters1) results = [(0.0, 0.2), (10.0, 0.6), (20.0, 0.5), (30.0, 0.01)] check.is_true( stampListsAreEqual(job.interpret_results(results, cutoff=0.5), [(5.0, 25.0)]))
def test_interpret_results_multiple_seperate_clips(): job = Job(example_parameters1) results = [(0.0, 0.2), (10.0, 0.6), (20.0, 0.5), (30.0, 0.1), (40.0, 0.7), (50.0, 0.8), (60.0, 0.01)] check.is_true( stampListsAreEqual(job.interpret_results(results, cutoff=0.5), [(5.0, 25.0), (35.0, 55.0)]))
def test_enable_disable_memmap(tmpdir): f = os.path.join(tmpdir, 'npn_empty.npy') arr = [[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]] a = MemMapArray(arr, filename=f, dtype=None, memmap=False) check.is_false(a.memmap) check.is_false(os.path.exists(f)) a.enable_memmap() check.is_true(a.memmap) check.is_true(os.path.exists(f)) check.is_instance(a._contained, np.memmap) # First keep the file a.disable_memmap(remove=False) check.is_false(a.memmap) check.is_true(os.path.exists(f)) check.is_not_instance(a._contained, np.memmap) a.enable_memmap() check.is_true(a.memmap) check.is_true(os.path.exists(f)) check.is_instance(a._contained, np.memmap) # Remove the file a.disable_memmap(remove=True) check.is_false(a.memmap) check.is_false(os.path.exists(f)) check.is_not_instance(a._contained, np.memmap) with pytest.raises(ValueError): # raises error if name is locked a.enable_memmap('not_the_same_name.npy')
def test_add_fuel_card_normal(api, db, case_data): """正常添加加油卡""" url = '/gasStation/process' data_source_id = case_data.get('data_source_id') card_number = case_data.get('card_number') # 环境检查 if db.check_card(card_number): pytest.skip(f'卡号: {card_number} 已存在') json_data = { "dataSourceId": data_source_id, "methodId": "00A", "CardInfo": { "cardNumber": card_number } } res_dict = api.post(url, json=json_data).json() # 响应断言 ck.equal(200, res_dict.get("code")) ck.equal("添加卡成功", res_dict.get("msg")) ck.is_false(res_dict.get('success')) # 数据库断言 ck.is_true(db.check_card(card_number)) # 环境清理 db.del_card(card_number)
def test_with_handlers(self, mocker): """ should reset the environment and handlers with true handlers. """ mocker.patch('gym.Env.reset', lambda self: 'obs') mocker.patch('learnrl.playground.RewardHandler.reset') reward_handler = RewardHandler() mocker.patch('learnrl.playground.DoneHandler.reset') done_handler = DoneHandler() observation, step, done, previous = self.playground._reset( reward_handler, done_handler) check.equal(observation, 'obs') check.equal(step, 0) check.equal(done, False) expected_previous = [{ 'observation': None, 'action': None, 'reward': None, 'done': None, 'info': None } for _ in range(len(self.agents))] check.equal(previous, expected_previous) check.is_true(reward_handler.reset.called) check.is_true(done_handler.reset.called)
def test_mordor_load(mdr_driver: MordorDriver): """Check basic load of driver.""" check.is_true(mdr_driver.loaded) check.is_true(mdr_driver.connected) check.is_false(mdr_driver.use_query_paths) check.is_true(mdr_driver.has_driver_queries) check.is_instance(mdr_driver.mitre_techniques, pd.DataFrame) check.is_instance(mdr_driver.mitre_tactics, pd.DataFrame) check.is_in("T1078", mdr_driver.mitre_techniques.index) check.is_in("TA0001", mdr_driver.mitre_tactics.index) check.is_true(len(mdr_driver.mordor_data) > 50) _, first_item = next(iter(mdr_driver.mordor_data.items())) check.is_instance(first_item.title, str) check.is_instance(first_item.id, str) check.is_instance(first_item.author, str) check.is_instance(first_item.creation_date, datetime) check.is_instance(first_item.files, list) check.is_true(len(first_item.files) > 0) check.is_instance(first_item.attack_mappings, list) for attack in first_item.attack_mappings: check.is_in("technique", attack) check.is_in("tactics", attack)
def test_kql_connect_no_cs(get_ipython): """Check loaded true.""" get_ipython.return_value = _MockIPython() kql_driver = KqlDriver() check.is_true(kql_driver.loaded) with pytest.raises(MsticpyKqlConnectionError) as mp_ex: kql_driver.connect() check.is_in("no connection string", mp_ex.value.args)
def test_kql_connect(get_ipython): """Check loaded true.""" get_ipython.return_value = _MockIPython() kql_driver = KqlDriver() check.is_true(kql_driver.loaded) kql_driver.connect(connection_str="la://connection") check.is_true(kql_driver.connected)
def test_internal_links(): page = BasePage(TEST_HTML_PAGE, TEST_HTML_URL) nr_absolute_links = len(page.internal_links) domain_in_links = len( [i for i in page.internal_links if TEST_HTML_URL in i.absolute_url] ) check.equal(nr_absolute_links, domain_in_links) check.is_true(page.internal_links)
def test_interpret_results_cutoff_morethan_1(): job = Job(example_parameters1) results = [(0.0, 0.2), (10.0, 0.6), (20.0, 0.5), (30.0, 0.1), (40.0, 0.7), (50.0, 0.8), (60.0, 0.01)] check.is_true(stampListsAreEqual( job.interpret_results(results, cutoff=1.1), []))
def test_inventory_add_stacks(self, items): """should be able to add a list of ItemStack.""" inv = Inventory(items) stacks = [ItemStack(items[0], 15), ItemStack(items[2], 33)] inv.add_stacks(stacks) check.is_true(np.all(inv.content == [15, 0, 33]))
def test_mp_config_file_view_settings(): """Test view settings.""" mpc_file = MpConfigFile() mpc_file.txt_viewer.value = "" mpc_file.view_settings() check.is_true(len(mpc_file.txt_viewer.value) > 0) mpc_file.buttons["view"].click() mpc_file.btn_close.click()