def test_dashboard_3(self, temp_dir, browser): data = DataBuilder().get_default_data() data.date = "2017-03-31" data.transits = ["95307"] dashboard = DashBoardPage(browser).open() take_screenshot(browser, "dashboard") dashboard.clear() dashboard.set_date(data.date) for transit in data.transits: dashboard.select_transit(transit) dashboard.search() dashboard.download_csv_1() actual_file = path.realpath(path.join(temp_dir, "export.csv")) wait_until_file_exists(actual_file) data = "sfsdfsdf\ndfsdsdf\nsdfsdfsdfs\n" self.reporter = GenericDiffReporterFactory().get_first_working() verify(data)
def test_reducer_on_map_output_with_single_input_line(self): map_output = open( 'test/BuildTrainingSetTest.test_map_with_single_input_line.approved.txt' ).read().split('\n') actual_output = self.run_reduce_job_on_robust(map_output) verify(actual_output)
def test_reducer_on_map_output_with_multiple_input_lines_for_ms_marco( self): map_output = open('test/BuildTrainingSetTest.test_map_with_multiple_input_lines_for_ms_marco.approved.txt')\ .read().split('\n') actual_output = self.run_reduce_job_on_ms_marco(map_output) verify(actual_output)
def test_valtonenornhag_arxiv_2020b_frHfr(self): p1 = np.array([ [-1.146281331283839, 1.050109134098990, 1.065996624259908], [-0.879951300627967, 0.620743795713172, 0.541580087112080], ]) p2 = np.array([ [0.663628650450811, 1.333268512835822, 1.318951998842419], [1.241359691717976, 0.068745345721370, 0.016786262835316], ]) R1 = np.array([ [-0.320761154096478, 0.935110133718446, 0.150603186685294], [-0.808554515336552, -0.353152142517100, 0.470662469254195], [0.493307082608358, 0.029199350209406, 0.869365009760445], ]) R2 = np.array([ [0.420952545706761, -0.744893719609945, -0.517621773835547], [0.652124812245433, -0.148125936460580, 0.743499788972085], [-0.630501533318403, -0.650532130976893, 0.423409687005158], ]) sols = homlib.get_valtonenornhag_arxiv_2020b_frHfr( np.asfortranarray(p1), np.asfortranarray(p2), np.asfortranarray(R1), np.asfortranarray(R2)) np.testing.assert_almost_equal(sols['lam'], -0.45054159850239145, self.tol) np.testing.assert_almost_equal(sols['f'], 5.756798219531202, self.tol) verify(verify_numpy_array(sols['H']))
def test_fitzgibbon_cvpr_2001_results(self): """Check the expected result.""" sols = homlib.get_fitzgibbon_cvpr_2001(np.asfortranarray(self.p1), np.asfortranarray(self.p2)) np.testing.assert_almost_equal(sols['lam'], -0.6554778901775142, self.tol) verify(verify_numpy_array(sols['H']))
def verify_all_combinations_with_namer( function_under_test: Callable, input_arguments: Sequence[Sequence[Any]], formatter: Optional[Callable] = None, reporter: Optional[Reporter] = None, *, # enforce keyword arguments - https://www.python.org/dev/peps/pep-3102/ options: Optional[Options] = None ) -> None: """Run func with all possible combinations of args and verify outputs against the recorded approval file. Args: function_under_test (function): function under test. input_arguments: list of values to test for each input argument. For example, a function f(product, quantity) could be tested with the input_arguments [['water', 'cola'], [1, 4]], which would result in outputs for the following calls being recorded and verified: f('water', 1), f('water', 4), f('cola', 1), f('cola', 4). namer (approvaltests.Namer): A namer that defines the name of received and approved files. formatter (function): function for formatting the function inputs/outputs before they are recorded to an approval file for comparison. reporter (approvaltests.reporter.Reporter): an approval reporter. Raises: ApprovalException: if the results to not match the approved results. """ text = print_combinations(formatter, function_under_test, input_arguments) options = initialize_options(options, reporter) verify(text, options=options)
def test_slash_red_win(log): game = GameState() player_moves = [ ColumnWasClicked(c) for c in [0, 1, 1, 2, 2, 3, 2, 3, 3, 5, 3] ] result = simulate_main_event_loop(game, player_moves, log) verify(print_scenario(player_moves, result, log))
def test_valid_inputs(self): duration = 3 weekly_interest_rate = Decimal('0.05') principal = Decimal('1000.00') repayment = get_weekly_repayment_amount(principal, weekly_interest_rate, duration) reporter = GenericDiffReporterFactory().get('meld') verify(str(repayment), reporter=reporter)
def test_simple(self): verify( "Hello", options=Options().with_reporter( GenericDiffReporter.create( r"C:\my\favorite\diff\utility.exe")), )
def test_format_table_with_sparse_columns(): """Test table where rows have different heading names""" class TestItem1: def __init__(self, name, age, num_pets): self.name = name self.age = age self.num_pets = num_pets class TestItem2: def __init__(self, name, age, num_computers): self.name = name self.age = age self.num_computers = num_computers # Arrange items = [ TestItem1("Ann", 21, 1), TestItem2("Bob", 45, 2), TestItem1("Cath", 52, 4) ] output = Output() # Act output.format_table(items) # Assert verify(output.report)
def test_supermarket_failure(tmpdir): test_name = "a" write_received_file( tmpdir, test_name, """\ apples 11.94 1.99 * 6.000 5 for 5.99(apples) -3.96 ---------------------------------------- Total: 7.98 """) write_approved_file( tmpdir, test_name, """\ apples 11.94 1.99 * 6.000 5 for 5.99(apples) -3.96 Total: 7.98""") test_name = "b" write_received_file( tmpdir, test_name, """\ toothbrush 0.99 ---------------------------------------- Total: 0.99 """) write_approved_file( tmpdir, test_name, """\ toothbrush 0.99 Total: 0.99""") analysis = analyze(tmpdir) verify(analysis)
def test_groups_two_tests_same_diff(): diff1 = """\ - bar + foo """ diffs = {"a": diff1, "b": diff1} verify(str(analyze_groups(diffs, identical)))
def test_report_diffs(): diff1 = '+ foo\n' diff2 = '- bar\n' diffs = { diff1: DiffGroup("All share this diff", diff1, ['a', 'b']), diff2: DiffGroup("All share this diff", diff2, ['a', 'c']) } verify(report_diffs(diffs))
def test_with_imaginary_data(self): arguments = sys.argv + ['-i', 'test/resources/input_single_topic/', '-o', '.test-output', '-q', 'test/resources/test.qrel'] with patch.object(sys, 'argv', arguments): imp.load_source('__main__', './run2fv.py') verify("".join(sorted(tuple(open('.test-output','r'))))) os.remove('.test-output')
def test_backslash_yellow_win(log): game = GameState() player_moves = [ ColumnWasClicked(c) for c in [0, 6, 5, 5, 4, 4, 3, 4, 5, 3, 0, 3, 0, 3] ] result = simulate_main_event_loop(game, player_moves, log) verify(print_scenario(player_moves, result, log))
def test_approve_one_file(tmpdir): received_file = os.path.join(tmpdir, "a.received.txt") with open(received_file, "w") as f: f.write("foo") approve_all(tmpdir) all_files = os.listdir(tmpdir) verify(str(all_files))
def test_example_statement(): with open(get_adjacent_file("invoice.json")) as f: invoice = json.loads(f.read()) with open(get_adjacent_file("plays.json")) as f: plays = json.loads(f.read()) statement = Statement.from_json(invoice=invoice, plays=plays) presentation = Text(statement) verify(str(presentation))
def test_get_kukelova_cvpr_2015_sol4(self): sols = homlib.get_kukelova_cvpr_2015(np.asfortranarray(self.p1), np.asfortranarray(self.p2)) np.testing.assert_almost_equal(sols[4]['lam1'], -0.6554778901775545, self.tol) np.testing.assert_almost_equal(sols[4]['lam2'], -0.6554778901775485, self.tol) verify(verify_numpy_array(sols[4]['H']))
def test_get_kukelova_cvpr_2015_sol2(self): sols = homlib.get_kukelova_cvpr_2015(np.asfortranarray(self.p1), np.asfortranarray(self.p2)) np.testing.assert_almost_equal(sols[2]['lam1'], -1.7814746086320201, self.tol) np.testing.assert_almost_equal(sols[2]['lam2'], -2.079301697963529, self.tol) verify(verify_numpy_array(sols[2]['H']))
def test_get_kukelova_cvpr_2015_sol3(self): sols = homlib.get_kukelova_cvpr_2015(np.asfortranarray(self.p1), np.asfortranarray(self.p2)) np.testing.assert_almost_equal(sols[3]['lam1'], -2.136402668559706, self.tol) np.testing.assert_almost_equal(sols[3]['lam2'], 0.6928831549898077, self.tol) verify(verify_numpy_array(sols[3]['H']))
def test_get_kukelova_cvpr_2015_sol0(self): sols = homlib.get_kukelova_cvpr_2015(np.asfortranarray(self.p1), np.asfortranarray(self.p2)) np.testing.assert_almost_equal(sols[0]['lam1'], 0.2537509800067458, self.tol) np.testing.assert_almost_equal(sols[0]['lam2'], -1.9785613160596929, self.tol) verify(verify_numpy_array(sols[0]['H']))
def test_get_kukelova_cvpr_2015_sol1(self): sols = homlib.get_kukelova_cvpr_2015(np.asfortranarray(self.p1), np.asfortranarray(self.p2)) np.testing.assert_almost_equal(sols[1]['lam1'], -1.1832137508476386, self.tol) np.testing.assert_almost_equal(sols[1]['lam2'], -1.8809663034629707, self.tol) verify(verify_numpy_array(sols[1]['H']))
def test_documents_to_remove_for_leave_one_out_with_single_topic_and_multiple_runs_reverse( cls): run_01 = TrecRun('test/resources/sample-run-file-01') run_02 = TrecRun('test/resources/sample-run-file-02') actual = identify_judgments_to_remove_for_leave_one_out( [run_02, run_01]) verify(actual.to_csv(header=False))
def test_pycharm_diff_command(): reporter = GenericDiffReporter([ "PyCharm", '/Applications/PyCharm CE.app/Contents/MacOS/pycharm', ["diff"] ]) received_path = "received.txt" approved_path = "approved.txt" verify(str(reporter.get_command(received_path, approved_path)))
def test_with_reporter(): testr = ReporterForTesting() options = Options().with_reporter(testr) try: verify("Data2", options=options) except: pass assert testr.called
def test_sample_lift_system(): liftA = Lift("A", floor=3, requested_floors=[0]) liftB = Lift("B", floor=2) liftC = Lift("C", floor=2, doors_open=True) liftD = Lift("D", floor=0, requested_floors=[0]) lift_system = LiftSystem(floors=[0, 1, 2, 3], lifts=[liftA, liftB, liftC, liftD], calls=[Call(1, Direction.DOWN)]) verify(print_lifts(lift_system))
def test_read_pieces(): position = """\ 0 1 2. 0 x . 1 o . 2 x . """ grid = read_grid(position) verify(print_grid(grid))
def test_files_identical(tmpdir): file1 = os.path.join(str(tmpdir), "a.received.txt") file2 = os.path.join(str(tmpdir), "b.approved.txt") identical_contents = "abc" with open(file1, "w") as f1: f1.write(identical_contents) with open(file2, "w") as f2: f2.write(identical_contents) verify(calculate_diff(file1, file2))
def test_mix_of_files(tmpdir): received_file = os.path.join(tmpdir, "a.received.txt") with open(received_file, "w") as f: f.write("foo") approved_file = os.path.join(tmpdir, "a.approved.txt") with open(approved_file, "w") as f: f.write("bar") clean_received(tmpdir) verify(str(os.listdir(tmpdir)))
def test_two_similar_failures(tmpdir): test_name = "a" write_received_file(tmpdir, test_name, "foo\n") write_approved_file(tmpdir, test_name, "bar\nbaz\n") test_name = "b" write_received_file(tmpdir, test_name, "foo\n") write_approved_file(tmpdir, test_name, "bar\n") analysis = analyze(tmpdir) verify(analysis)