Beispiel #1
0
                       'C': np.around(np.linspace(1, 101, 100).tolist(), 0)}]

tuned_parameters_Best = [{'kernel': ['rbf'], 'gamma': [.04], 'C': [4]}]

scores = ['precision_macro', 'recall_macro', 'balanced_accuracy']
scores2 = ['precision_macro']

scr = scores2

# t_p = tuned_parameters         # *****
# t_p = tuned_parameters2b
t_p = tuned_parameters_Best

labels = ['g', 'b']

clf, b_params, c_rV, c_rT, clf_l = grid_search_tester(t_p, train_set, validation_set, test_set, cv=5, scores=scr,
                                                      verbose=True, show_param_test=False, t_names=labels)

show_best_params(b_params, c_rV, c_rT)

b_avg, b_score = get_best_performance(c_rT, scr, labels, metric='precision')

print('Should use score: {:s}'.format(b_score))
print()
print('param options: ')
print(t_p)
print('# -------------------------------   use parameters: ')
print('# ', b_params[b_score])
print('# ---------------------------------------------------')
print()
print('best score: ', b_score)
print('best avg: ', b_avg)
    'kernel': ['rbf'],
    'gamma':
    np.around(np.linspace(.0930, .0931, 10).tolist(), 5),
    'C':
    np.around(np.linspace(8.0, 9.0, 10).tolist(), 1)
}]

scores = ['precision_macro', 'recall_macro', 'accuracy']

scores1 = ['precision_macro', 'accuracy', 'balanced_accuracy']
scores2 = ['balanced_accuracy']

clf, b_params = grid_search_tester(tuned_parameters2,
                                   train_set,
                                   validation_set,
                                   test_set,
                                   cv=5,
                                   scores=scores,
                                   verbose=True)

for score in b_params:

    b_param = b_params[score]

    b_C = b_param['C']

    b_gamma = b_param['gamma']

    b_kernel = ['kernel']

    print('Score: {:s}'.format(score))