def main(): # Séparer les données et leur cibles g_donnees = gd.GestionDonnees(d_base) [types, X, t] = g_donnees.lecture_donnees(d_base) # Séparer les données pour test et train x_tr, x_ts, t_tr, t_ts = g_donnees.sep_donnees(X, t) # Entraînement debut_e = time.time( ) # Heure de debut pour mesurer le temps d'entraînement classif.entrainement(x_tr, t_tr, cherche_hyp) fin_e = time.time() # Heure de fin pour mesurer le temps d'entraînement print( 'Fin de l\'entrainement. Réalisé en %.2f secondes.' % (fin_e - debut_e), '\n') # Prédictions pour les ensembles d'entraînement et de test predict_tr = classif.prediction(x_tr) predict_ts = classif.prediction(x_ts) # Métriques pour évaluer l'entraînement et test prs_tr, rec_tr, fbeta_tr, _ = metriques(t_tr, predict_tr, average='macro') prs_ts, rec_ts, fbeta_ts, _ = metriques(t_ts, predict_ts, average='macro') acc_tr = accu(t_tr, predict_tr) acc_ts = accu(t_ts, predict_ts) tab_perform = [['Accuracy', acc_tr, acc_ts],['Précision', prs_tr, prs_ts],\ ['Rappel', rec_tr, rec_ts],['F-Beta', fbeta_tr, fbeta_ts]] print( tabulate(tab_perform, headers=['Metrique', 'Train', 'Test'], floatfmt='.4f')) return tab_perform
def predict_nb(df): df = __init__(df) X_train, X_test = train_test_split(df, test_size=0.7, random_state=int(time.time())) gnb = GaussianNB() gnb.fit(X_train.values, X_train['gname_num']) y_pred = gnb.predict(X_test) accuracy = accu(X_test['gname_num'], y_pred) print(accuracy)
for i in range(1,20): C = pow(10,i) clf = SVC(kernel="rbf",C=C) clf.fit(features_train,labels_train) pred = clf.predict(features_test) print "C:",C,"Accuracy:",acc(pred,labels_test) C = 10000 clf = SVC(kernel="rbf",C=C) clf.fit(features_train,labels_train) pred = clf.predict(features_test) print "C:",C,"Accuracy:",acc(pred,labels_test) from sklearn.tree import DecisionTreeClassifier as DTC from sklearn.metrics import accuracy_score as accu clf = DTC(min_samples_split=2) clf.fit(features_train,labels_train) pred = clf.predict(features_test) acc = accu(pred,labels_test) #########################################################
np.expand_dims(y_train_sample, axis=0)) ls += output[0] ac += output[1] ls = ls / X_train.shape[0] ac = ac / X_train.shape[0] print('epoch', ep, '/', epoch, ', sample', id, '/', len(train_idx), ',', model.metrics_names[0], ls, ',', model.metrics_names[1], ac) model.reset_states() id = id + 1 model.save('../results/model.h5') #%% validation on training sequence X_validation = np.reshape(X[sample_idx][0], (1, len(X[sample_idx][0]), 1)) pdt = model.predict_classes(X_validation) accuracy = accu(pdt[:, :, 0].T, X[sample_idx][1]) print('training accuracy is', accuracy) #%% validation result plot plt.figure(figsize=(12, 3)) plt.subplot(121) plt.plot(pdt[:, :, 0].T) plt.title('prediction') plt.subplot(122) plt.plot(X[sample_idx][1]) plt.title('truth') plt.figure(figsize=(12, 3)) plt.subplot(121) plt.plot(pdt[:, :, 0].T) plt.title('prediction') plt.xlim([8000, len(pdt[:, :, 0].T)])
y_train = label[:X_train_size * recurr] y_train = keras.utils.to_categorical(y_train, 2) y_train = np.reshape(y_train, (X_train_size, recurr, 2), order='C') # X_train = np.expand_dims(np.expand_dims(seq, axis=0), axis=2) # y_train = keras.utils.to_categorical(label, 2) # y_train = np.expand_dims(y_train, axis=0) loss = model.train_on_batch(X_train, y_train) print('epoch #', ep, 'processing #', id, model.metrics_names[0], loss[0], model.metrics_names[1], loss[1]) if 0: X_validation = np.reshape(seq, (1, len(seq), 1)) pdt_validation = model.predict_classes(X_validation) ac_validation = accu(pdt_validation.T, label) print('epoch #', id, 'processing #', id, 'validation accuracy =', ac_validation) id = id + 1 model.save('../results/model.h5') #%% validation on training set ac = 0 id = 0 percent = 0.5 validation_idx = train_idx[:int(len(train_idx) * percent)] truth = [] result = [] for ind in validation_idx: seq_validation = X[ind][0]
metrics=['accuracy']) #%% prepare training data train_data = [] train_label = [] recurr = 200 cat = 3 for i in range(3, 4): # adjust training images here XX, yy = utils.reshapeData(X[i], recurr, None) # adjust image size here X_train, y_train = utils.label_to_cat(XX, yy, cat) train_data.append(X_train) train_label.append(y_train) #%% epoch = 50 for ep in range(epoch): for i in range(len(train_data)): for j in range(train_data[i].shape[0]): loss = model.train_on_batch(train_data[i][None, j, :, :], train_label[i][None, j, :, :]) print('epoch #', ep, 'processing #', i, model.metrics_names[0], loss[0], model.metrics_names[1], loss[1]) model.reset_states() model.save(result_path) #%% validation on training sequence seq = X[2][0] label = X[2][1] X_validation = np.reshape(seq, (1, len(seq), 1)) pdt = model.predict_classes(X_validation) plt.plot(pdt.T) plt.plot(label) plt.imshow() accuracy = accu(pdt.T, label) print('training accuracy is', accuracy)
def accuracy(x_test, y_test): #计算准确率 y_predict = predict(x_test) return accu(y_test, y_predict) #调用acuuracy_score函数计算准确率
######################################################### ### your code goes here ### from sklearn.svm import SVC from sklearn.metrics import accuracy_score as acc for i in range(1, 20): C = pow(10, i) clf = SVC(kernel="rbf", C=C) clf.fit(features_train, labels_train) pred = clf.predict(features_test) print "C:", C, "Accuracy:", acc(pred, labels_test) C = 10000 clf = SVC(kernel="rbf", C=C) clf.fit(features_train, labels_train) pred = clf.predict(features_test) print "C:", C, "Accuracy:", acc(pred, labels_test) from sklearn.tree import DecisionTreeClassifier as DTC from sklearn.metrics import accuracy_score as accu clf = DTC(min_samples_split=2) clf.fit(features_train, labels_train) pred = clf.predict(features_test) acc = accu(pred, labels_test) #########################################################
# X_train = np.expand_dims(np.expand_dims(seq, axis=0), axis=2) # y_train = keras.utils.to_categorical(label, 2) # y_train = np.expand_dims(y_train, axis=0) # model.train_on_batch(X_train, y_train) id = id + 1 model.save('../results/model.h5') #%% validation on training set ac = 0 id = 0 percent = 0.1 validation_idx = train_idx[:int(len(train_idx) * percent)] truth = [] result = [] for ind in validation_idx: seq_validation = X[ind][0] label_validation = X[ind][1] truth.append(label_validation) X_validation = np.reshape(seq_validation, (1, len(seq_validation), 1)) pdt = model.predict_classes(X_validation) result.append(pdt) accuracy = accu(pdt.T, label_validation) print('accuracy for #', id, 'sample =', accuracy, 'sample length =', len(seq_validation)) ac = ac + accuracy id = id + 1 ac_avg = ac / len(validation_idx) print('****************************************************************') print('average validation accuracy =', ac_avg)
f = open('../../Data/data', 'r') X = js.load(f) f.close() data_size = len(X) #%% data_idx = np.load('../../Data/idx.npy') test_idx = data_idx[1] train_idx = data_idx[0] #%% model = ld('../../results/model.h5') #%% ac = 0 id = 0 percent = 0.1 test_idx = test_idx[:int(len(test_idx) * percent)] truth = [] result = [] for ind in test_idx: seq_test = X[ind][0] label_test = X[ind][1] truth.append(label_test) X_test = np.reshape(seq_test, (1, len(seq_test), 1)) pdt = model.predict_classes(X_test) result.append(pdt) accuracy = accu(pdt.T, label_test) print('accuracy for #', id, 'sample =', accuracy) ac = ac + accuracy id = id + 1 ac_avg = ac / len(test_idx) print('average test accuracy =', ac_avg)