def main(): from sklearn.linear_model import LogisticRegression # Load generated data X_train, X_test, y_train, y_test = bench.load_data(params) params.n_classes = len(np.unique(y_train)) if params.multiclass == 'auto': params.multiclass = 'ovr' if params.n_classes == 2 else 'multinomial' if not params.tol: params.tol = 1e-3 if params.solver == 'newton-cg' else 1e-10 # Create our classifier object clf = LogisticRegression(penalty='l2', C=params.C, n_jobs=params.n_jobs, fit_intercept=params.fit_intercept, verbose=params.verbose, tol=params.tol, max_iter=params.maxiter, solver=params.solver, multi_class=params.multiclass) # Time fit and predict fit_time, _ = bench.measure_function_time(clf.fit, X_train, y_train, params=params) y_pred = clf.predict(X_train) y_proba = clf.predict_proba(X_train) train_acc = bench.accuracy_score(y_train, y_pred) train_log_loss = bench.log_loss(y_train, y_proba) train_roc_auc = bench.roc_auc_score(y_train, y_proba) predict_time, y_pred = bench.measure_function_time(clf.predict, X_test, params=params) y_proba = clf.predict_proba(X_test) test_acc = bench.accuracy_score(y_test, y_pred) test_log_loss = bench.log_loss(y_test, y_proba) test_roc_auc = bench.roc_auc_score(y_test, y_proba) bench.print_output( library='sklearn', algorithm='logistic_regression', stages=['training', 'prediction'], params=params, functions=['LogReg.fit', 'LogReg.predict'], times=[fit_time, predict_time], metric_type=['accuracy', 'log_loss', 'roc_auc'], metrics=[ [train_acc, test_acc], [train_log_loss, test_log_loss], [train_roc_auc, test_roc_auc], ], data=[X_train, X_test], alg_instance=clf, )
def main(): from sklearn.ensemble import RandomForestClassifier # Load and convert data X_train, X_test, y_train, y_test = bench.load_data(params) # Create our random forest classifier clf = RandomForestClassifier( criterion=params.criterion, n_estimators=params.num_trees, max_depth=params.max_depth, max_features=params.max_features, min_samples_split=params.min_samples_split, max_leaf_nodes=params.max_leaf_nodes, min_impurity_decrease=params.min_impurity_decrease, bootstrap=params.bootstrap, random_state=params.seed, n_jobs=params.n_jobs) params.n_classes = len(np.unique(y_train)) fit_time, _ = bench.measure_function_time(clf.fit, X_train, y_train, params=params) y_pred = clf.predict(X_train) y_proba = clf.predict_proba(X_train) train_acc = bench.accuracy_score(y_train, y_pred) train_log_loss = bench.log_loss(y_train, y_proba) train_roc_auc = bench.roc_auc_score(y_train, y_proba) predict_time, y_pred = bench.measure_function_time(clf.predict, X_test, params=params) y_proba = clf.predict_proba(X_test) test_acc = bench.accuracy_score(y_test, y_pred) test_log_loss = bench.log_loss(y_test, y_proba) test_roc_auc = bench.roc_auc_score(y_test, y_proba) bench.print_output( library='sklearn', algorithm='df_clsf', stages=['training', 'prediction'], params=params, functions=['df_clsf.fit', 'df_clsf.predict'], times=[fit_time, predict_time], metric_type=['accuracy', 'log_loss', 'roc_auc'], metrics=[ [train_acc, test_acc], [train_log_loss, test_log_loss], [train_roc_auc, test_roc_auc], ], data=[X_train, X_test], alg_instance=clf, )
def main(): from sklearn.neighbors import KNeighborsClassifier # Load generated data X_train, X_test, y_train, y_test = bench.load_data(params) params.n_classes = len(np.unique(y_train)) # Create classification object knn_clsf = KNeighborsClassifier(n_neighbors=params.n_neighbors, weights=params.weights, algorithm=params.method, metric=params.metric, n_jobs=params.n_jobs) # Measure time and accuracy on fitting train_time, _ = bench.measure_function_time(knn_clsf.fit, X_train, y_train, params=params) if params.task == 'classification': y_pred = knn_clsf.predict(X_train) y_proba = knn_clsf.predict_proba(X_train) train_acc = bench.accuracy_score(y_train, y_pred) train_log_loss = bench.log_loss(y_train, y_proba) train_roc_auc = bench.roc_auc_score(y_train, y_proba) # Measure time and accuracy on prediction if params.task == 'classification': predict_time, yp = bench.measure_function_time(knn_clsf.predict, X_test, params=params) y_proba = knn_clsf.predict_proba(X_test) test_acc = bench.accuracy_score(y_test, yp) test_log_loss = bench.log_loss(y_test, y_proba) test_roc_auc = bench.roc_auc_score(y_test, y_proba) else: predict_time, _ = bench.measure_function_time(knn_clsf.kneighbors, X_test, params=params) if params.task == 'classification': bench.print_output( library='sklearn', algorithm=knn_clsf._fit_method + '_knn_classification', stages=['training', 'prediction'], params=params, functions=['knn_clsf.fit', 'knn_clsf.predict'], times=[train_time, predict_time], metric_type=['accuracy', 'log_loss', 'roc_auc'], metrics=[ [train_acc, test_acc], [train_log_loss, test_log_loss], [train_roc_auc, test_roc_auc], ], data=[X_train, X_test], alg_instance=knn_clsf, ) else: bench.print_output( library='sklearn', algorithm=knn_clsf._fit_method + '_knn_search', stages=['training', 'search'], params=params, functions=['knn_clsf.fit', 'knn_clsf.kneighbors'], times=[train_time, predict_time], metric_type=None, metrics=[], data=[X_train, X_test], alg_instance=knn_clsf, )
def metric_call(x, y): return bench.log_loss(x, y)
def main(): from sklearn.svm import SVC X_train, X_test, y_train, y_test = bench.load_data(params) y_train = np.asfortranarray(y_train).ravel() if params.gamma is None: params.gamma = 1.0 / X_train.shape[1] cache_size_bytes = bench.get_optimal_cache_size( X_train.shape[0], max_cache=params.max_cache_size) params.cache_size_mb = cache_size_bytes / 1024**2 params.n_classes = len(np.unique(y_train)) clf = SVC(C=params.C, kernel=params.kernel, cache_size=params.cache_size_mb, tol=params.tol, gamma=params.gamma, probability=params.probability, random_state=43, degree=params.degree) fit_time, _ = bench.measure_function_time(clf.fit, X_train, y_train, params=params) params.sv_len = clf.support_.shape[0] if params.probability: state_predict = 'predict_proba' clf_predict = clf.predict_proba train_acc = None test_acc = None predict_train_time, y_pred = bench.measure_function_time(clf_predict, X_train, params=params) train_log_loss = bench.log_loss(y_train, y_pred) train_roc_auc = bench.roc_auc_score(y_train, y_pred) _, y_pred = bench.measure_function_time(clf_predict, X_test, params=params) test_log_loss = bench.log_loss(y_test, y_pred) test_roc_auc = bench.roc_auc_score(y_test, y_pred) else: state_predict = 'prediction' clf_predict = clf.predict train_log_loss = None test_log_loss = None train_roc_auc = None test_roc_auc = None predict_train_time, y_pred = bench.measure_function_time(clf_predict, X_train, params=params) train_acc = bench.accuracy_score(y_train, y_pred) _, y_pred = bench.measure_function_time(clf_predict, X_test, params=params) test_acc = bench.accuracy_score(y_test, y_pred) bench.print_output( library='sklearn', algorithm='SVC', stages=['training', state_predict], params=params, functions=['SVM.fit', f'SVM.{state_predict}'], times=[fit_time, predict_train_time], metric_type=['accuracy', 'log_loss', 'roc_auc', 'n_sv'], metrics=[ [train_acc, test_acc], [train_log_loss, test_log_loss], [train_roc_auc, test_roc_auc], [int(clf.n_support_.sum()), int(clf.n_support_.sum())], ], data=[X_train, X_train], alg_instance=clf, )