def test_cross_val_two_model_classifier_testing_binary(): df, column_names, target_name, clf1, clf2 = generate_binary_classification_data_and_models( ) test_suite = classification_tests.ClassifierComparison( clf1, clf2, df, target_name, column_names) try: test_suite.cross_val_per_class_two_model_classifier_testing() assert True except: assert False
def test_two_model_classifier_testing_multiclass(): df, column_names, target_name, clf1, clf2 = generate_multiclass_classification_data_and_models( ) test_suite = classification_tests.ClassifierComparison( clf1, clf2, df, target_name, column_names) try: test_suite.two_model_classifier_testing(average="micro") assert True except: assert False
def test_two_model_prediction_run_time_stress_test(): df, column_names, target_name, clf1, clf2 = generate_binary_classification_data_and_models( ) test_suite = classification_tests.ClassifierComparison( clf1, clf2, df, target_name, column_names) sample_sizes = [i for i in range(100, 1000, 100)] try: test_suite.two_model_prediction_run_time_stress_test(sample_sizes) assert True except: assert False
def test_two_model_prediction_run_time_stress_test(): df, column_names, target_name, clf1, clf2 = generate_binary_classification_data_and_models( ) test_suite = classification_tests.ClassifierComparison( clf1, clf2, df, target_name, column_names) performance_boundary = [] for i in range(100, 1000, 100): performance_boundary.append({"sample_size": i, "max_run_time": 100}) try: test_suite.two_model_prediction_run_time_stress_test( performance_boundary) assert True except: assert False