def test_import_all(): """Test import all function.""" ns_dict = {} _imp_module_all(ns_dict, module_name="datetime") for imp in ["date", "datetime", "time", "timedelta", "timezone", "tzinfo"]: check.is_in(imp, ns_dict)
def card_recharge(setup_driver, username='******', bank_type='wechat_bank', amount='80'): response = init.login(username) recharge_response = init.recharge(username, bank_type, amount, response['data']['key']) # 因三種轉卡類的response不同, 所以需要個別拉出來判斷 if bank_type == 'bank': check.equal(True, recharge_response['success']) check.equal(2, recharge_response['data']['payType']) check.equal(float(amount + '.0'), recharge_response['data']['amount']) elif bank_type == 'alipay_bank': check.equal(True, recharge_response['success']) check.is_in(f'{amount}', recharge_response['data']['AttachWord']) check.is_in(f'{amount}', recharge_response['data']['Amount']) check.equal(2, recharge_response['data']['MoneyInType']) elif bank_type == 'wechat_bank': check.equal(True, recharge_response['success']) check.is_in(f'{amount}', recharge_response['message']) check.equal(True, recharge_response['result']['IsDeal']) check.equal(10, recharge_response['result']['MoneyInType']) check.equal(0, recharge_response['result']['Sort']) # Apollo的Api懶得抓, 直接用selenium跑且寫死部分參數 time.sleep(2) setup_driver.find_element_by_xpath(merchant_order).click() time.sleep(2) setup_driver.find_element_by_xpath(Nike).click() setup_driver.find_element_by_xpath(order_reason_after_click_Nike).send_keys('qa') setup_driver.find_element_by_xpath(ok_after_write_order_reason).click() information = setup_driver.find_element_by_xpath("//*[@class='ui-widget-content slick-row even active' and @style='top:0px']") logging.debug(information.text) check.is_in(username, information.text) check.is_in(str(amount), information.text)
def test_pivot_funcs_df_merge(_create_pivot, join_type, test_case): """Test calling function with DF input attributes.""" func = getattr(getattr(test_case.entity, test_case.provider), test_case.pivot_func) # Test DF input val = enumerate(test_case.value.keys()) in_df = pd.DataFrame(val, columns=["idx", test_case.src_df_col]) in_df["extra_col1"] = "test1" in_df["extra_col2"] = "test2" result_no_merge_df = func(data=in_df, src_column=test_case.src_df_col) result_df = func(data=in_df, src_column=test_case.src_df_col, join=join_type) in_cols = in_df.shape[1] no_merge_cols = result_no_merge_df.shape[1] merge_cols = result_df.shape[1] # merged DF should have result + input cols - join key col check.greater_equal(no_merge_cols + in_cols, merge_cols) if join_type in ("left", "inner"): # inner and left joins should have same or greater length as input check.greater_equal(result_df.shape[0], in_df.shape[0]) # all the keys from the input should be in the merged output for key in in_df[test_case.src_df_col]: check.is_in(key, result_df[test_case.key_col].values) if join_type == "right": # We don't know how many results we get back from right join # (although should not be zero) check.greater(len(result_df), 0) # but all of its key values should be present in input for key in result_df[test_case.key_col].values: check.is_in(key, in_df[test_case.src_df_col].values)
def test_hash_acct(test_input, expected): """Test hash Account function.""" result = data_obfus.hash_account(test_input) check.equal(test_input != result, expected) if test_input != result: check.is_in("account-#", result)
def test_display_function(ti_results): """Test getting and executing the display function.""" disp_func = ti_details_display(ti_results) for _, row in ti_results.iterrows(): html = disp_func((row.Ioc, [row.Provider])) check.is_in("Reference:", html.data) check.is_in(f"Provider: {row.Provider}", html.data)
def test_notebooklet_create(monkeypatch): """Test method.""" # Should run because required providers are loaded monkeypatch.setattr(data_providers, "GeoLiteLookup", GeoIPLiteMock) data_providers.init(query_provider="LocalData", providers=["tilookup", "geolitelookup"]) for _, nblt in nblts.iter_classes(): new_nblt = nblt() check.is_instance(new_nblt, Notebooklet) check.is_none(new_nblt.result) # Should throw a warning because of unrecognized provider data_providers.init(query_provider="LocalData") with pytest.raises(MsticnbDataProviderError) as err: for _, nblt in nblts.iter_classes(): curr_provs = nblt.metadata.req_providers bad_provs = [*curr_provs, "bad_provider"] try: nblt.metadata.req_providers = bad_provs new_nblt = nblt() check.is_instance(new_nblt, Notebooklet) check.is_none(new_nblt.result) finally: nblt.metadata.req_providers = curr_provs check.is_in("bad_provider", err.value.args[0]) test_nb = TstNBSummary() check.is_not_none(test_nb.get_provider("LocalData")) with pytest.raises(MsticnbDataProviderError): test_nb.get_provider("otherprovider")
def test_notebooklet_options(monkeypatch): """Test option logic for notebooklet.""" monkeypatch.setattr(data_providers, "GeoLiteLookup", GeoIPLiteMock) data_providers.init(query_provider="LocalData", providers=["tilookup", "geolitelookup"]) nb_test = TstNBSummary() # default options nb_res = nb_test.run() check.is_not_none(nb_res.default_property) check.is_none(nb_res.optional_property) # add optional option nb_res = nb_test.run(options=["+optional_opt"]) check.is_not_none(nb_res.default_property) check.is_not_none(nb_res.optional_property) # remove default option nb_res = nb_test.run(options=["-default_opt"]) check.is_none(nb_res.default_property) check.is_none(nb_res.optional_property) # specific options nb_res = nb_test.run(options=["heartbest", "azure_net"]) check.is_none(nb_res.default_property) check.is_none(nb_res.optional_property) # invalid option f_stream = StringIO() with redirect_stdout(f_stream): nb_test.run(options=["invalid_opt"]) output = str(f_stream.getvalue()) check.is_in("Invalid options ['invalid_opt']", output)
def test_add_fuel_card_method_id_blank(api, data): card_number = data['FREE_CARD_NUMBERS'][0] data = {"dataSourceId": "bHRz", "methodId": "", "CardInfo": {"cardNumber": card_number}} res = api.post(json=data).json() check.equal(301, res['code']) check.is_in('业务ID不能为空', res['msg'])
def test_splunk_saved_searches(splunk_client): """Check saved searches.""" splunk_client.connect = cli_connect sp_driver = SplunkDriver() # trying to get these before connecting should throw with pytest.raises(MsticpyNotConnectedError) as mp_ex: sp_driver._get_saved_searches() check.is_false(sp_driver.connected) check.is_none(sp_driver._saved_searches) check.is_in("not connected to Splunk.", mp_ex.value.args) # [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="Test code")] sp_driver.connect(host="localhost", username="******", password=_FAKE_STRING) # nosec check.is_true(sp_driver.connected) check.is_instance(sp_driver._saved_searches, pd.DataFrame) for _, search in sp_driver._saved_searches.iterrows(): check.is_true(search["name"].startswith("query")) check.equal(search["query"], "get stuff from somewhere") queries, name = sp_driver.service_queries check.equal(name, "SavedSearches") check.is_instance(queries, dict) for name, query in queries.items(): check.is_true(name.startswith("query")) check.equal(query, "search get stuff from somewhere")
def test_splunk_connect_errors(splunk_client): """Check connect failure errors.""" splunk_client.connect = cli_connect sp_driver = SplunkDriver() check.is_true(sp_driver.loaded) print("connected", sp_driver.connected) with pytest.raises(MsticpyConnectionError) as mp_ex: # [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="Test code")] sp_driver.connect( host="AuthError", username="******", password=_FAKE_STRING ) # nosec print("connected", sp_driver.connected) check.is_false(sp_driver.connected) check.is_in("Splunk connection", mp_ex.value.args) sp_driver = SplunkDriver() print("connected", sp_driver.connected) with pytest.raises(MsticpyConnectionError) as mp_ex: # [SuppressMessage("Microsoft.Security", "CS002:SecretInNextLine", Justification="Test code")] sp_driver.connect( host="HTTPError", username="******", password=_FAKE_STRING ) # nosec print("connected", sp_driver.connected) check.is_false(sp_driver.connected) check.is_in("Splunk connection", mp_ex.value.args)
def test_splunk_saved_searches(splunk_client): """Check saved searches.""" splunk_client.connect = cli_connect sp_driver = SplunkDriver() # trying to get these before connecting should throw with pytest.raises(MsticpyNotConnectedError) as mp_ex: sp_driver._get_saved_searches() check.is_false(sp_driver.connected) check.is_none(sp_driver._saved_searches) check.is_in("not connected to Splunk.", mp_ex.value.args) sp_driver.connect(host="localhost", username="******", password="******") # nosec check.is_true(sp_driver.connected) check.is_instance(sp_driver._saved_searches, pd.DataFrame) for _, search in sp_driver._saved_searches.iterrows(): check.is_true(search["name"].startswith("query")) check.equal(search["query"], "get stuff from somewhere") queries, name = sp_driver.service_queries check.equal(name, "SavedSearches") check.is_instance(queries, dict) for name, query in queries.items(): check.is_true(name.startswith("query")) check.equal(query, "search get stuff from somewhere")
def test_ip_summary_notebooklet(monkeypatch): """Test basic run of notebooklet.""" test_data = str(Path(TEST_DATA_PATH).absolute()) monkeypatch.setattr(data_providers, "GeoLiteLookup", GeoIPLiteMock) monkeypatch.setattr(data_providers, "TILookup", TILookupMock) data_providers.init( query_provider="LocalData", LocalData_data_paths=[test_data], LocalData_query_paths=[test_data], providers=["tilookup", "geolitelookup"], ) test_nb = nblts.azsent.network.IpAddressSummary() tspan = TimeSpan(period="1D") result = test_nb.run(value="11.1.2.3", timespan=tspan) check.is_not_none(result.ip_entity) check.equal(result.ip_type, "Public") check.equal(result.ip_origin, "External") check.is_in("CountryCode", result.geoip) check.is_not_none(result.location) check.is_not_none(result.notebooklet) check.is_not_none(result.whois) check.is_instance(result.related_alerts, pd.DataFrame) check.is_not_none(test_nb.browse_alerts()) check.is_instance(result.passive_dns, pd.DataFrame) check.is_instance(result.ti_results, pd.DataFrame)
def test_kql_connect_no_cs(get_ipython): """Check loaded true.""" get_ipython.return_value = _MockIPython() kql_driver = KqlDriver() check.is_true(kql_driver.loaded) with pytest.raises(MsticpyKqlConnectionError) as mp_ex: kql_driver.connect() check.is_in("no connection string", mp_ex.value.args)
def for_failed(username='******', bank_type='WeiXinScan', amount='19'): response = init.login(username) failed_message = init.recharge(username, bank_type, amount, response['data']['key']) check.equal(False, failed_message['IsSuccess'], f'Real response: {failed_message}') check.is_in('目前没有渠道支援, 请选择其它充值方式', failed_message['ResponseMessage'], f'Real response: {failed_message}') check.equal('', failed_message['Url'], f'Real response: {failed_message}') check.equal(True, failed_message['DoVerify'], f'Real response: {failed_message}')
def test_entity_attr_funcs(_create_pivot_ns, test_case): """Test calling function with entity attributes.""" # Test entity ent_cls = getattr(entities, test_case.entity) entity = ent_cls(test_case.args) _fake_provider_connected(_create_pivot_ns.get_provider("AzureSentinel")) func = getattr(getattr(entity, test_case.provider), test_case.pivot_func) query = func(entity, print_query=True) check.is_in(test_case.expected, query)
def test_kql_connect_authn_exceptions(get_ipython): """Check loaded true.""" get_ipython.return_value = _MockIPython() kql_driver = KqlDriver() with pytest.raises(MsticpyKqlConnectionError) as mp_ex: kql_driver.connect(connection_str="la://connection+AuthenticationError") check.is_in("authentication failed", mp_ex.value.args) check.is_false(kql_driver.connected)
def test_kql_query_not_connected(get_ipython): """Check loaded true.""" get_ipython.return_value = _MockIPython() kql_driver = KqlDriver() with pytest.raises(MsticpyNotConnectedError) as mp_ex: kql_driver.query("test") check.is_in("not connected to a workspace.", mp_ex.value.args) check.is_false(kql_driver.connected)
def test_entity_attr_funcs_entity(_create_pivot, test_case): """Test calling function with entity attributes.""" # Test entity ent_cls = getattr(entities, test_case.entity) entity = ent_cls(**(test_case.args)) func = getattr(getattr(entity, test_case.provider), test_case.pivot_func) # Test entity input result_df = func(entity) check.is_in(test_case.exp_val, result_df.iloc[0][test_case.exp_col])
def test_kql_query_failed(get_ipython): """Check loaded true.""" get_ipython.return_value = _MockIPython() kql_driver = KqlDriver() kql_driver.connect(connection_str="la://connection") output = io.StringIO() with redirect_stdout(output): kql_driver.query("test query_failed") check.is_in("Warning - query did", output.getvalue())
def testLenght(self): x = len('nameTEST') a = 1 b = 100 c = range(2, 101) d = 102 check.greater(x, a) check.less_equal(x, b) check.is_in(x, c, "Lenght ok") check.is_not_in(d, c, "Lenght not ok")
def test_entity_attr_funcs_df(_create_pivot, test_case): """Test calling function with DF input attributes.""" ent_cls = getattr(entities, test_case.entity) entity = ent_cls(**(test_case.args)) func = getattr(getattr(entity, test_case.provider), test_case.pivot_func) # Test DF input val = getattr(entity, test_case.attrib) in_df = pd.DataFrame([val], columns=[test_case.src_col]) result_df = func(data=in_df, src_column=test_case.src_col) check.is_in(test_case.exp_val, result_df.iloc[0][test_case.exp_col])
def test_entity_attr_funcs_itbl(_create_pivot, test_case): """Test calling function with iterable input.""" ent_cls = getattr(entities, test_case.entity) entity = ent_cls(**(test_case.args)) func = getattr(getattr(entity, test_case.provider), test_case.pivot_func) # Test iterable input val = [getattr(entity, test_case.attrib)] params = {test_case.func_param: val} result_df = func(**params) check.is_in(test_case.exp_val, result_df.iloc[0][test_case.exp_col])
def cargo_distritos(context, docente_test5): context["page"] = Distrito(context["driver"]) context["page"].editar_area(docente_test5.inscripciones[0].area) context["page"].seleccionar_distrito( docente_test5.inscripciones[0].distrito) context["page"].guardar() estado_page = context["page"].se_guardo_correctamente() check.is_in( "La Selección de Configuración de Distritos se guardó correctamente", estado_page, f'obtuve {estado_page}"')
def card_for_failed(username='******', bank_type='alipay_bank', amount='69'): response = init.login(username) recharge_response = init.recharge(username, bank_type, amount, response['data']['key']) # 因三種轉卡類的response不同, 所以需要個別拉出來判斷 if bank_type == 'bank': check.equal(False, recharge_response['success']) check.equal('目前没有渠道支援, 请选择其它充值方式(3)', recharge_response['message']) elif bank_type == 'alipay_bank' or bank_type == 'wechat_bank': check.equal(False, recharge_response['success']) check.is_in(f'交易金额: ¥70.00', recharge_response['message'])
def test_kql_query_partial(get_ipython): """Check loaded true.""" get_ipython.return_value = _MockIPython() kql_driver = KqlDriver() kql_driver.connect(connection_str="la://connection") output = io.StringIO() with redirect_stdout(output): result_df = kql_driver.query("test query_partial") check.is_instance(result_df, pd.DataFrame) check.is_in("Warning - query returned partial", output.getvalue())
def test_kql_query_no_table(get_ipython): """Check loaded true.""" get_ipython.return_value = _MockIPython() kql_driver = KqlDriver() kql_driver.connect(connection_str="la://connection") with pytest.raises(MsticpyNoDataSourceError) as mp_ex: query_source = {"args.table": "table3"} kql_driver.query("test query", query_source=query_source) check.is_in("table3 not found.", mp_ex.value.args)
def test_splunk_connect_no_params(splunk_client): """Check failure with no args.""" splunk_client.connect = cli_connect sp_driver = SplunkDriver() check.is_true(sp_driver.loaded) with pytest.raises(MsticpyUserConfigError) as mp_ex: sp_driver.connect() check.is_false(sp_driver.connected) check.is_in("no Splunk connection parameters", mp_ex.value.args)
def test_hardware_module_get_projects(client): rv_login = login(client, 'test', '123456') data_login = rv_login.get_json(force=True) # successfully login check.equal(data_login['message'], 'success') # test get projecs info on hardware page rv_proj = get_projects(client) data_proj = rv_proj.get_json(force=True) check.equal(data_proj['message'], 'success') for i in range(len(data_proj)): check.is_in('testProj', data_proj['projects'][i]['name'])
def test_add_fuel_card_wrong_request_format(api, data): card_number = data['FREE_CARD_NUMBERS'][0] data = { "dataSourceId": "bHRz", "methodId": "00A", "CardInfo": { "cardNumber": card_number } } res = api.post(data=data).json() check.equal(301, res['code']) check.is_in('参数类型错误', res['msg'])
def test_extract_header_nosip(): header = fits.Header.fromstring(_base_header + _wcs_no_sip, sep='\n') h, wcs = extract_header_wcs(header) check.is_instance(wcs, WCS) check.equal(wcs.wcs.ctype[0], 'RA---TAN') check.equal(wcs.wcs.ctype[1], 'DEC--TAN') check.is_instance(h, fits.Header) for i in _comon_wcs_keys: check.is_not_in(f'{i}1', h.keys()) check.is_not_in(f'{i}2', h.keys()) check.is_in('DATE-OBS', h.keys()) check.is_false(h is header) check.not_equal(h, header)