示例#1
0
# Predicting
print('Predicting...')
start_time = time.time()
y_pred = model.predict(dataset.X_test.values)
elapsed_time_testing = time.time() - start_time

# Analytics
#eval_results = {
#    'multi_logloss': model.evals_result_['valid_0']['multi_logloss'],
#    'gmean': np.absolute(model.evals_result_['valid_0']['gmean'])}

title = "LinearSVC (longrun minmax)"
save_path = "C:/Users/thoma/source/repos/PythonMachineLearning/PythonMachineLearning/Library/Results"
print('Analyzing...')
evaluator = Evaluator(title, save_path)
#evaluator.append_to_file(f'Best iteration: {model.best_iteration_}', "info.txt")
evaluator.append_to_file(f'Training time (seconds): {elapsed_time_training}',
                         "info.txt")
evaluator.append_to_file(f'Testing time (seconds): {elapsed_time_testing}',
                         "info.txt")
evaluator.append_to_file(dataset_parameters, "dataset_parameters.txt")
evaluator.append_to_file(model_parameters, "model_parameters.txt")
evaluator.save_advanced_metrics(dataset.y_test, y_pred, dataset.class_labels,
                                dataset.class_descriptions)
#evaluator.append_to_file(eval_results, "metric_results.txt")
#evaluator.create_evaluation_metric_results(eval_results, xlabel='number of trees', ylabel='geometric mean')
evaluator.create_confusion_matrix(dataset.y_test,
                                  y_pred,
                                  dataset.class_labels,
                                  normalize=True)
plt.show()
示例#2
0
eval_results = {
    'accuracy': callback[0].val_accuracy,
    'gmean': callback[0].val_gmean,
    'loss': model.history.history["val_loss"]
}

#eval_results = {
#    'accuracy': model.history.history["val_acc"],
#    'loss': model.history.history["val_loss"],
#    }

title = "TensorFlow (4 layers weights smote)"
save_path = "C:/Users/thoma/source/repos/PythonMachineLearning/PythonMachineLearning/Library/Results"
print('Analyzing...')
evaluator = Evaluator(title, save_path)
evaluator.append_to_file(f'Best iteration: {callback[0].best_epoch}',
                         "info.txt")
#evaluator.append_to_file(f'Best iteration: {len(model.history.history["val_loss"])}', "info.txt")
evaluator.append_to_file(f'Training time (seconds): {elapsed_time_training}',
                         "info.txt")
evaluator.append_to_file(f'Testing time (seconds): {elapsed_time_testing}',
                         "info.txt")
evaluator.save_dict_to_file(dataset_parameters, "dataset_parameters.csv")
evaluator.save_dict_to_file(model_parameters, "model_parameters.csv")
evaluator.save_advanced_metrics(dataset.y_test, y_pred, dataset.class_labels,
                                dataset.class_descriptions)
evaluator.save_eval_scores_to_file(eval_results, "metric_results.csv")
evaluator.create_evaluation_metric_results(eval_results,
                                           xlabel='epochs',
                                           ylabel='metric score')
evaluator.create_confusion_matrix(dataset.y_test,
                                  y_pred,
print('Predicting...')
start_time = time.time()
y_pred = model.predict(dataset.X_test)
elapsed_time_testing = time.time() - start_time

# Analytics
eval_results = {
    'mlogloss': model.evals_result()['validation_0']['mlogloss'],
    'gmean': np.absolute(model.evals_result()['validation_0']['gmean'])
}

title = "XGBoost (HYPER)"
save_path = "C:/Users/thoma/source/repos/PythonMachineLearning/PythonMachineLearning/Library/Results"
print('Analyzing...')
evaluator = Evaluator(title, save_path)
evaluator.append_to_file(f'Best iteration: {model.best_iteration}', "info.txt")
evaluator.append_to_file(f'Training time (seconds): {elapsed_time_training}',
                         "info.txt")
evaluator.append_to_file(f'Testing time (seconds): {elapsed_time_testing}',
                         "info.txt")
evaluator.append_to_file(dataset_parameters, "dataset_parameters.txt")
evaluator.append_to_file(model_parameters, "model_parameters.txt")
evaluator.save_advanced_metrics(dataset.y_test, y_pred, dataset.class_labels,
                                dataset.class_descriptions)
evaluator.append_to_file(eval_results, "metric_results.txt")
evaluator.create_evaluation_metric_results(eval_results,
                                           xlabel='number of trees',
                                           ylabel='metric score')
evaluator.create_confusion_matrix(dataset.y_test,
                                  y_pred,
                                  dataset.class_labels,
    scorer=partial(scorer, dataset.X_validate, dataset.y_validate),
    monitor_score = "gmean",
    patience = 25,
    higher_is_better = True)

start_time = time.time()
early.fit(dataset.X_train, dataset.y_train)
elapsed_time_training = time.time() - start_time

# Predicting
print('Predicting...')
start_time = time.time()
y_pred = early.estimator.predict(dataset.X_test)
elapsed_time_testing = time.time() - start_time

# Analytics
title = "AdaBoost (hyper weight minmax)"
save_path = "C:/Users/thoma/source/repos/PythonMachineLearning/PythonMachineLearning/Library/Results"
print('Analyzing...')
evaluator = Evaluator(title, save_path)
evaluator.append_to_file(f'Best iteration: {early.best_iteration_}', "info.txt")
evaluator.append_to_file(f'Training time (seconds): {elapsed_time_training}', "info.txt")
evaluator.append_to_file(f'Testing time (seconds): {elapsed_time_testing}', "info.txt")
evaluator.append_to_file(classifier_parameters, "classifier_parameters.txt")
evaluator.append_to_file(dataset_parameters, "dataset_parameters.txt")
evaluator.append_to_file(model_parameters, "model_parameters.txt")
evaluator.save_advanced_metrics(dataset.y_test, y_pred, dataset.class_labels, dataset.class_descriptions)
evaluator.append_to_file(early.scores_, "metric_results.txt")
evaluator.create_evaluation_metric_results(early.scores_, xlabel='number of trees', ylabel='geometric mean')
evaluator.create_confusion_matrix(dataset.y_test, y_pred, dataset.class_labels, normalize = True)
plt.show()
elapsed_time_training = time.time() - start_time

# Predicting
print('Predicting...')
start_time = time.time()
y_pred = model.predict(dataset.X_test, prediction_type='Class')
elapsed_time_testing = time.time() - start_time

# Analytics
print('Analyzing...')
title = "CatBoost (weights smote)"

eval_results = {
    'MultiClass': np.absolute(model.get_evals_result()['validation_0']['MultiClass']),
    'Accuracy': np.absolute(model.get_evals_result()['validation_0']['Accuracy']),
    #'F1': np.absolute(model.get_evals_result()['validation_0']['TotalF1']),
    #'gmean': model.get_evals_result()['validation_0']['GeometricMean']
    }

save_path = "C:/Users/thoma/source/repos/PythonMachineLearning/PythonMachineLearning/Library/Results"
evaluator = Evaluator(title, save_path)
evaluator.append_to_file(f'Best iteration: {model.get_best_iteration()}', "info.txt")
evaluator.append_to_file(f'Training time (seconds): {elapsed_time_training}', "info.txt")
evaluator.append_to_file(f'Testing time (seconds): {elapsed_time_testing}', "info.txt")
evaluator.save_dict_to_file(dataset_parameters, "dataset_parameters.csv")
evaluator.save_dict_to_file(model_parameters, "model_parameters.csv")
evaluator.save_advanced_metrics(dataset.y_test, y_pred, dataset.class_labels, dataset.class_descriptions)
evaluator.save_eval_scores_to_file(eval_results, "metric_results.csv")
evaluator.create_evaluation_metric_results(eval_results, xlabel='number of trees', ylabel='metric score')
evaluator.create_confusion_matrix(dataset.y_test, y_pred, dataset.class_labels, normalize = True)
plt.show()