def test_print_methods(): """Test method.""" set_opt("verbose", True) f_stream = io.StringIO() with redirect_stdout(f_stream): nb_print("status") nb_data_wait("table1") check.is_in("status", str(f_stream.getvalue())) check.is_in("Getting data from table1", str(f_stream.getvalue())) set_opt("verbose", False) f_stream = io.StringIO() with redirect_stdout(f_stream): nb_print("status") check.is_not_in("status", str(f_stream.getvalue())) check.is_not_in("Getting data from table1", str(f_stream.getvalue())) set_opt("debug", True) f_stream = io.StringIO() with redirect_stdout(f_stream): nb_debug("debug", "debugmssg", "val", 1, "result", True) check.is_in("debug", str(f_stream.getvalue())) check.is_in("debugmssg", str(f_stream.getvalue())) check.is_in("val", str(f_stream.getvalue())) check.is_in("1", str(f_stream.getvalue())) check.is_in("result", str(f_stream.getvalue())) check.is_in("True", str(f_stream.getvalue()))
def test_simple_aperture(r): data = np.ones((10, 10)) res = aperture_photometry(data, [5], [5], r=r, r_ann=None) npt.assert_array_equal(res['x'], [5]) npt.assert_array_equal(res['y'], [5]) npt.assert_array_equal(res['aperture'], [r]) npt.assert_almost_equal(res['flux'], [np.pi * r**2]) check.is_not_in('sky', res.colnames) npt.assert_array_almost_equal(res['flux_error'], [np.sqrt(np.pi * r**2)]) npt.assert_array_equal(res['flags'], [0])
def testLenght(self): x = len('nameTEST') a = 1 b = 100 c = range(2, 101) d = 102 check.greater(x, a) check.less_equal(x, b) check.is_in(x, c, "Lenght ok") check.is_not_in(d, c, "Lenght not ok")
def test_extract_header_nosip(): header = fits.Header.fromstring(_base_header + _wcs_no_sip, sep='\n') h, wcs = extract_header_wcs(header) check.is_instance(wcs, WCS) check.equal(wcs.wcs.ctype[0], 'RA---TAN') check.equal(wcs.wcs.ctype[1], 'DEC--TAN') check.is_instance(h, fits.Header) for i in _comon_wcs_keys: check.is_not_in(f'{i}1', h.keys()) check.is_not_in(f'{i}2', h.keys()) check.is_in('DATE-OBS', h.keys()) check.is_false(h is header) check.not_equal(h, header)
def test_new_init_data_providers(monkeypatch): """Test creating new provider with new provider list.""" monkeypatch.setattr(data_providers, "GeoLiteLookup", GeoIPLiteMock) data_providers.init(query_provider="LocalData", providers=[]) dprov = data_providers.DataProviders.current() data_providers.init(query_provider="LocalData", providers=[]) dprov2 = data_providers.DataProviders.current() check.equal(dprov2, dprov) # specify provider dprov = data_providers.DataProviders(query_provider="LocalData") data_providers.init(query_provider="LocalData", providers=["tilookup"]) msticnb = sys.modules["msticnb"] dprov2 = data_providers.DataProviders.current() pkg_providers = getattr(msticnb, "data_providers") check.not_equal(dprov2, dprov) check.is_in("LocalData", dprov2.providers) check.is_in("tilookup", dprov2.providers) check.is_not_in("geolitelookup", dprov2.providers) check.is_not_in("ipstacklookup", dprov2.providers) check.is_in("LocalData", pkg_providers) check.is_in("tilookup", pkg_providers) check.is_not_in("geolitelookup", pkg_providers) check.is_not_in("ipstacklookup", pkg_providers) check.is_instance(dprov2.providers["tilookup"], TILookup)
def test_classify_img(): image_dir = '/sampleImage/' # Changes to tests: # we changed the images classified due to the limitations of our ML model # since the model is only trained on 1000 object categories # we will create a helper function in the next iteration # to test for semantic simularity and get better search results image_names = [ 'banana', 'basketball', 'carton', 'cucumber', 'fountain', 'golden_retriever', 'goldfish', 'passenger_car', 'pop_bottle', 'seashore', 'space_shuttle', 'sports_car', 'suit', 'tabby', 'volcano' ] # Changes to tests: # wrong_names is a rotation of original image_names # as it is unlikely that basketball # will be in the classification dict for banana and so on wrong_names = image_names[1:] + image_names[:1] img_ext = '.jpg' # instead of simply using os.getcwd(), we use full_path so that pytest will # find the images regardless of where you run pytest # (like in test/ as opposed to main dir) full_path = os.path.realpath(__file__) test_folder = os.path.dirname(full_path) w = Worker() check.is_none(w.classify_img(None)) for idx, name in enumerate(image_names): img = Image.open(test_folder + image_dir + name + img_ext) # should all be true # (that 'banana' is in classification dict for 'banana.jpg' and so on) check.is_in(name, w.classify_img(img)) # now let's try assertions that should definitely be wrong # (that 'volcano' is in the classification dict for 'banana.jpg') check.is_not_in(wrong_names[idx], w.classify_img(img))
def test_logger_remove_handler(): mylog = logger.getChild('testing') msg = 'Some error happend here.' logs = [] lh = log_to_list(mylog, logs) mylog.setLevel('DEBUG') mylog.error(msg) check.is_instance(lh, ListHandler) check.is_in(lh, mylog.handlers) mylog.removeHandler(lh) check.is_not_in(lh, mylog.handlers) check.equal(logs[0], msg) check.equal(lh.log_list[0], msg) check.equal(lh.log_list, logs)
def test_get_related_words(): w = Worker() originals = [name.replace(' ', '_') for name in image_names] wrong_names = originals[1:] + originals[:1] check.equal({}, w.get_related_words('')) for idx, name in enumerate(originals): related_set = w.get_related_words(name) # should all be true # (that 'plantain' is in related words set for 'banana' and so on) check.is_in(related[idx], related_set) # now let's try assertions that should definitely be wrong # (that 'lava' is in the related words set for 'banana') check.is_not_in(wrong_names[idx], related_set)
def test_pd_display(test_df, capsys): """Test mp_pivot.display accessor.""" test_df.mp_pivot.display( _nop_df, cols=["Computer", "SubjectUserName", "TargetUserName"], query="TargetUserName != 'MSTICAdmin'", ) cap_out = capsys.readouterr().out check.is_in("Computer SubjectUserName TargetUserName", cap_out) check.is_not_in("MSTICAdmin", cap_out) check.equal(Counter(cap_out.split())["MSTICAlertsWin1"], 12) check.equal(len(cap_out.split("\n")), len(test_df) + 1) test_df.mp_pivot.display( _nop_df, cols=["Computer", "SubjectUserName", "TargetUserName"], query="TargetUserName != 'MSTICAdmin'", head=5, ) cap_out = capsys.readouterr().out check.is_in("Computer SubjectUserName TargetUserName", cap_out) check.equal(Counter(cap_out.split())["MSTICAlertsWin1"], 5)
def test_add_sub_data_providers(monkeypatch): """Test intializing adding and subtracting providers.""" monkeypatch.setattr(data_providers, "GeoLiteLookup", GeoIPLiteMock) dprov = data_providers.DataProviders(query_provider="LocalData") data_providers.init(query_provider="LocalData", providers=["tilookup"]) msticnb = sys.modules["msticnb"] dprov2 = data_providers.DataProviders.current() # Add and remove a provider from defaults data_providers.init( query_provider="LocalData", providers=["+ipstacklookup", "-geolitelookup"] ) dprov3 = data_providers.DataProviders.current() pkg_providers = getattr(msticnb, "data_providers") check.not_equal(dprov3, dprov) check.not_equal(dprov3, dprov2) check.is_in("ipstacklookup", dprov3.providers) check.is_not_in("geolitelookup", dprov3.providers) check.is_in("tilookup", dprov3.providers) check.is_in("ipstacklookup", pkg_providers) check.is_not_in("geolitelookup", pkg_providers) check.is_in("tilookup", pkg_providers)
def test_is_not_in(): check.is_not_in(4, [1, 2, 3])
def test_pivot_funcs_df_merge(_create_pivot, join_type, test_case): """Test calling function with DF input attributes.""" func = getattr(getattr(test_case.entity, test_case.provider), test_case.pivot_func) # Test DF input val = test_case.value in_df = pd.DataFrame(val, columns=[test_case.src_df_col]) params = {test_case.func_param: test_case.src_df_col} in_df["extra_col1"] = "test1" in_df["extra_col2"] = "test2" result_no_merge_df = func(data=in_df, **params) if test_case.entity not in (entities.Account, entities.Host): # The IP test uses a list param so we cannot do index joins # with it with pytest.warns(UserWarning): result_df = func(data=in_df, **params, join=join_type) return # should work ok with Account and Host result_df = func(data=in_df, **params, join=join_type) in_cols = in_df.shape[1] no_merge_cols = result_no_merge_df.shape[1] merge_cols = result_df.shape[1] # merged DF should have result + input cols - join key col check.greater_equal(no_merge_cols + in_cols, merge_cols) if join_type in ("left", "inner"): # inner and left joins should have same or greater length as input check.greater_equal(result_df.shape[0], in_df.shape[0]) # all the keys from the input should be in the merged output for row_val in in_df[test_case.src_df_col]: check.is_in(row_val, result_df[test_case.src_df_col].values) if join_type == "right": # We don't know how many results we get back from right join # (although should not be zero) check.greater(len(result_df), 0) # but all of its key values should be present in input for row_val in result_df[test_case.src_df_col].values: check.is_in(row_val, in_df[test_case.src_df_col].values) join_in_data = { 0: "0x3e7", 1: "0xc90e957", 2: "0xc90ea44", 3: "0xc912d62", 4: "0xc913737", 10: "0x3e3", 14: "0x3e4", 15: "0xaddd", 16: "0xafff", 17: "0x3e5", 23: "no_match", } in_df = pd.DataFrame( pd.Series(join_in_data), columns=["TargetLogonId"] ).reset_index() result_no_merge_df = func(data=in_df, **params) result_df = func( data=in_df, **params, join=join_type, left_on="TargetLogonId", right_on="TargetLogonId", ) check.is_not_none(result_df) if join_type in ("inner", "right"): check.equal(len(result_df), len(result_no_merge_df)) for val in join_in_data.values(): if val != "no_match": check.is_in(val, result_df["TargetLogonId"].values) else: check.is_not_in(val, result_df["TargetLogonId"].values) if join_type == "left": check.equal(len(result_df), len(result_no_merge_df) + 1) for val in join_in_data.values(): check.is_in(val, result_df["TargetLogonId"].values)
def test_external_links(): page = BasePage(TEST_HTML_PAGE, TEST_HTML_URL) check.is_not_in(TEST_HTML_PAGE, [i.absolute_url for i in page.external_links]) check.is_true(page.external_links)