def test(): # TODO : Test Later print('==> Testing network..') # Make predictions on full X_test mels y_predicted = accuracy.predict_class_all(create_segmented_mels(X_test), a_net) # Print statistics print(np.sum(accuracy.confusion_matrix(y_predicted, y_test),axis=1)) print(accuracy.confusion_matrix(y_predicted, y_test)) print(accuracy.get_accuracy(y_predicted,y_test))
X_test = pool.map(get_wav, X_test) # Convert to MFCC if DEBUG: print('converting to mfcc') X_train = pool.map(to_mfcc, X_train) X_test = pool.map(to_mfcc, X_test) # Create segments from MFCCs X_train, y_train = make_segments(X_train, y_train) X_validation, y_validation = make_segments(X_test, y_test) # Randomize training segments X_train, _, y_train, _ = train_test_split(X_train, y_train, test_size=0) # Train model model = train_model(np.array(X_train), np.array(y_train), np.array(X_validation),np.array(y_validation)) # Make predictions on full X_test MFCCs y_predicted = accuracy.predict_class_all(create_segmented_mfccs(X_test), model) # Print statistics print train_count print test_count print acc_to_beat print np.sum(accuracy.confusion_matrix(y_predicted, y_test),axis=1) print accuracy.confusion_matrix(y_predicted, y_test) print accuracy.get_accuracy(y_predicted,y_test) # Save model save_model(model, model_filename)
if DEBUG: print('Converting to MFCC....') X_train = pool.map(to_mfcc, X_train) X_test = pool.map(to_mfcc, X_test) # Create segments from MFCCs X_train, y_train = make_segments(X_train, y_train) X_validation, y_validation = make_segments(X_test, y_test) # Randomize training segments X_train, _, y_train, _ = train_test_split(X_train, y_train, test_size=0.2) # Train model model = train_model(np.array(X_train), np.array(y_train), np.array(X_validation),np.array(y_validation)) # Make predictions on full X_test MFCCs y_predicted = accuracy.predict_class_all(create_segmented_mfccs(X_test), model) # Save model save_model(model, model_filename) # Print statistics print('Training samples:', train_count) print('Testing samples:', test_count) print('Accuracy to beat:', acc_to_beat) print('Confusion matrix of total samples:\n', np.sum(accuracy.confusion_matrix(y_predicted, y_test),axis=1)) print('Confusion matrix:\n',accuracy.confusion_matrix(y_predicted, y_test)) print('Accuracy:', accuracy.get_accuracy(y_predicted,y_test))
zs = var_auto.encode(data) model = KMeans(n_clusters=n_clusters, random_state=42, n_init=20) #model = SpectralClustering(n_clusters=n_clusters, eigen_solver='arpack', affinity="nearest_neighbors") #model = DBSCAN(eps=.2) results = model.fit_predict(zs) #joblib.dump(model, "F:/Fer-novo/Diplomski rad/modeli/saves/kmeans.pkl") # optional storing the model for future use # tsne on clusters on train data tsne = TSNE(n_components = 2, random_state=42) tsne_results = tsne.fit_transform(zs[-1300:]) P.scatterTsne(tsne_results, results[-1300:]) #tsne for test data test_data_all = np.array([]).reshape(0, num) for test_data in test_set_list: test_data_all = np.append(test_data_all, test_data, axis=0) padding = np.zeros(1000*num).reshape(1000, num) test_data_all = np.append(test_data_all, padding.reshape(-1, num), axis=0) zs_test = var_auto.encode(test_data_all)[:(test_set_size * len(test_set_list))] tsne_results_test = tsne.fit_transform(zs_test) P.scatterTsne(tsne_results_test, model.predict(zs_test)) #save results #D.saveResults(save_folder, results, original_data) print(A.confusion_matrix(test_set_list, var_auto, model, n_clusters, num=num, test_set_size=test_set_size))
# X_validation, y_validation = make_segments(X_test, y_test) X_train, X_validation, y_train, y_validation = train_test_split( X_train, y_train, test_size=0.15) # print "Validation shape: {}".format(X_validation) # Randomize training segments X_train, _, y_train, _ = train_test_split(X_train, y_train, test_size=0) if network == 'cnn': # Train model model = train_model(np.array(X_train), np.array(y_train), np.array(X_validation), np.array(y_validation), EPOCHS) # Make predictions on full X_test MFCCs y_predicted = accuracy.predict_class_all( create_segmented_mfccs(X_test), model, 'cnn') class_sum = np.sum(accuracy.confusion_matrix(y_predicted, y_test), axis=1) confusion_matrix = accuracy.confusion_matrix(y_predicted, y_test) print confusion_matrix print accuracy.get_accuracy(y_predicted, y_test) show_confusion_matrix(confusion_matrix, plt, ['mandarin', 'arabic', 'english'], 'cnn') if network == 'lstm': # Train Lstm Model lstm = train_lstm_model(np.array(X_train), np.array(y_train), np.array(X_validation), np.array(y_validation), EPOCHS) y_predicted_lstm = accuracy.predict_class_all( create_segmented_mfccs(X_test), lstm, 'lstm') print np.sum(accuracy.confusion_matrix(y_predicted_lstm, y_test),
# print (trainer) # Train model model = train_model(np.array(X_train), np.array(y_train), np.array(X_validation), np.array(y_validation)) # model = load_model("model.h5") # predicted = model.predict (k.create_segmented_mfccs(X_test)) # for i in predicted: # print (i) # Make predictions on full X_test MFCCs y_predicted = accuracy.predict_class_all( k.create_segmented_mfccs(X_test), model) # print (y_predicted) # for i in y_predicted: # print (i) # Print statistics print('Training samples:', train_count) print('Testing samples:', test_count) print('Accuracy to beat:', acc_to_beat) print('Confusion matrix of total samples:\n', np.sum(accuracy.confusion_matrix(y_predicted, y_test), axis=1)) print('Confusion matrix:\n', accuracy.confusion_matrix(y_predicted, y_test)) print('Accuracy:', accuracy.get_accuracy(y_predicted, y_test)) results.append(accuracy.confusion_matrix(y_predicted, y_test)) acc.append(accuracy.get_accuracy(y_predicted, y_test)) # Save model print(results) print(acc) save_model(model, model_filename)