def deep_belief_network_prediction( self, learning_rate, training_iterations, testing_iterations=10, hidden_layer_sizes_array=[10, 10], ): accuracy_list = [] for x in range(testing_iterations): self.prepare_training_data_from_csv_data(self.csv_data) classifier = SupervisedDBNClassification( hidden_layers_structure=hidden_layer_sizes_array, learning_rate_rbm=learning_rate / 2, learning_rate=learning_rate, n_epochs_rbm=int(training_iterations / 10), n_iter_backprop=training_iterations, batch_size=256, activation_function="relu", dropout_p=0.2, ) classifier.fit(self.x_data_training, self.y_data_training) y_data_prediction = classifier.predict(self.x_data_testing) classifier_accuracy = accuracy_score(self.y_data_testing, y_data_prediction) accuracy_list.append(classifier_accuracy) return max(accuracy_list)
classnames_16, train_data_16, train_labels_16, test_data_16, test_labels_16 = load_dataset( 'data/caltech101_silhouettes_16_split1.mat') classnames_28, train_data_28, train_labels_28, test_data_28, test_labels_28 = load_dataset( 'data/caltech101_silhouettes_28_split1.mat') classnames_16, train_data_16, train_labels_16, test_data_16, test_labels_16 = filter_dataset( classnames_16, train_data_16, train_labels_16, test_data_16, test_labels_16) classnames_28, train_data_28, train_labels_28, test_data_28, test_labels_28 = filter_dataset( classnames_28, train_data_28, train_labels_28, test_data_28, test_labels_28) # classifier.fit(np.array(train_data_16).astype('int64'), train_labels_16) # Y_pred = classifier.predict(np.array(test_data_16).astype('int64')) # print('Done.\nAccuracy: %f' % accuracy_score(test_labels_16, Y_pred)) classifier.fit(np.array(train_data_28).astype('int64'), train_labels_28) Y_pred = classifier.predict(np.array(test_data_28).astype('int64')) print('Done.\nAccuracy: %f' % accuracy_score(test_labels_28, Y_pred)) data = np.zeros([len(classnames_28), len(classnames_28)]) for i in range(0, len(Y_pred) - 1): Y_pred[i] = Y_pred[i] - 34 # for i in range(0,len(test_labels_28)-1): # print(test_labels_28[i]-34) # test_labels_28[i] = test_labels_28[i]-34 for i in range(0, len(Y_pred) - 1): data[test_labels_28[i] - 34 - 1, Y_pred[i] - 1] = data[test_labels_28[i] - 34 - 1, Y_pred[i] - 1] + 1 print(data) with open('wyniki.csv', mode='w') as wyniki: wyniki_writer = csv.writer(wyniki, dialect='excel') csvRow = ['']
# classifier = SupervisedDBN(hidden_layers_structure=[32, 16, 32, 64], # batch_size=10, # learning_rate_rbm=0.06, # n_epochs_rbm=2, # activation_function='sigmoid') X_train, X_test, Y_train, Y_test = train_test_split(encoded_out, Y, train_size=0.7, random_state=seed(2017)) Y_train.astype(np.float32) x1 = [] print(x1) print(X_train) print(Y_train) print(X_train.dtype) print(Y_train.dtype) classifier.fit(X_train, Y_train) Y_pred = classifier.predict(X_test) print('Accuracy for Deep Belief Network: %f' % accuracy_score(Y_test, Y_pred)) print(classification_report(Y_test, classifier.predict(X_test))) ''' Step 5: Save the output of the Deep Belief Network to a file ''' # np.savetxt('dbn_output.csv', Y_pred, delimiter=',')
# Data scaling X = (X / 16).astype(np.float32) # Splitting data X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0) # Training classifier = SupervisedDBNClassification( hidden_layers_structure=[1000, 1000, 1000], learning_rate_rbm=0.05, learning_rate=0.1, n_epochs_rbm=15, n_iter_backprop=50, batch_size=32, activation_function='relu', dropout_p=0.2) classifier.fit(X_train, Y_train) # Save the model classifier.save('model.pkl') # Restore it classifier = SupervisedDBNClassification.load('model.pkl') # Test Y_pred = classifier.predict(X_test) print 'Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred)
learning_rate_rbm=0.05, learning_rate=0.1, n_epochs_rbm=1000, n_iter_backprop=100, batch_size=32, activation_function='relu', dropout_p=0.2) X_train = [] Y_train = [] import math for _ in range(100): X_train.append([1, _, _ * _, _ * _ * _, _ * _ * _ * _, _ * _ * _ * _ * _]) Y_train.append(math.sin(_)) X_train = np.array(X_train) Y_train = np.array(Y_train) from sklearn.preprocessing import MinMaxScaler sc_X = MinMaxScaler(copy=True, feature_range=(0, 1)) sc_X.fit(X_train) X_train = sc_X.transform(X_train) classifier.fit(X_train, Y_train) Y_pred = np.array(classifier.predict(X_train)) error = 0 for i in range(len(Y_train)): try: error += abs(Y_train[i] - Y_pred[i]) / abs(Y_train[i]) except Exception: error += 0 error = error / len(Y_train) print(1 - error)