average_scores_and_best_scores = dict() # best_model_name == 'Worst average_scores_and_best_scores[best_model_name] \ = (best_score, best_score_dev, best_exec_time, best_model, {}) # Start evaluation process print() print("=== [task] Evaluation of DummyClassifier") print() wtr = eu.calculate_sample_weight(y_train) evaluation_result = eu.single_classic_cv_evaluation( X_train_transformed, y_train, 'DummyClf_2nd', DummyClassifier(strategy='most_frequent'), wtr, scoring, outer_cv, dict(), scores_of_best_model, results, names, seed) average_scores_and_best_scores = evaluation_result[0] scores_of_best_model = evaluation_result[1] Dummy_scores.append(scores_of_best_model[0]) # Dummy score -- ROC_AUC Dummy_scores.append(scores_of_best_model[1]) # Dummy score std Dummy_scores.append(scores_of_best_model[2]) # Dummy cv results Dummy_scores.append(scores_of_best_model[3]) # Dummy execution time # Dummy model's name and estimator Dummy_scores.append(scores_of_best_model[4]) names = [] results = []
scores_of_best_model = (best_score, best_score_dev, best_cv_results, best_exec_time, best_model) # Start evaluation process print() print("=== [task] Evaluation of DummyClassifier") print() wtr = eu.calculate_sample_weight(y_train) strategy = 'stratified' # 'most_frequent' average_scores_and_best_scores = eu.single_classic_cv_evaluation( X_train_transformed, y_train, 'DummyClf_2nd', DummyClassifier(strategy=strategy), wtr, scoring, outer_cv, dict(), scores_of_best_model, results, names, seed) scores_of_best_model = average_scores_and_best_scores[1] Dummy_scores.append(scores_of_best_model[0]) # Dummy score -- ROC_AUC Dummy_scores.append(scores_of_best_model[1]) # Dummy score std Dummy_scores.append(scores_of_best_model[2]) # Dummy cv results Dummy_scores.append(scores_of_best_model[3]) # Dummy execution time # Dummy model's name and estimator Dummy_scores.append(scores_of_best_model[4]) names = [] results = [] print()