# insert bias no_rows = dataset.shape[0] dataset = np.c_[-1 * np.ones(no_rows), dataset] dictionary = {} dictionary['mse'] = [] dictionary['rmse'] = [] for j in range(0, 20): print("realization %d" % j) train_X, train_y, test_X, test_y = Classifier.train_test_split(dataset) train_X = np.array(train_X, dtype=float) test_X = np.array(test_X, dtype=float) elm = ELM(no_of_classes, no_of_attributes) elm.train(train_X, train_y) predictions = elm.predict(test_X) mse, rmse = elm.evaluate(test_y, predictions) dictionary['mse'].append(mse) dictionary['rmse'].append(rmse) # ELM.plot_decision_boundaries_one(train_X, train_y, test_X, test_y, elm, j) print('mean square error: {}'.format(dictionary['mse'])) print('root mean square error: {}'.format(dictionary['rmse'])) print('mean mse: {}'.format(np.mean(dictionary['mse']))) print('mean rmse: {}'.format(np.mean(dictionary['rmse']))) print('std mse: {}'.format(np.std(dictionary['mse']))) print('std rmse: {}'.format(np.std(dictionary['rmse']))) # ELM.show_plot_decision_boundaries()
hit_rates = [] no_of_attributes = dataset.shape[1] - 1 no_of_classes = len(dataset[0, no_of_attributes]) # insert bias no_rows = dataset.shape[0] dataset = np.c_[-1 * np.ones(no_rows), dataset] # perceptron = Perceptron(no_of_classes, no_of_attributes, 5, 'logistic') for j in range(0, 20): print("realization %d" % j) train_X, train_y, test_X, test_y = Classifier.train_test_split(dataset) train_X = np.array(train_X, dtype=float) test_X = np.array(test_X, dtype=float) hidden_units = ELM.model_training(no_of_classes, no_of_attributes, train_X, train_y) elm = ELM(no_of_classes, no_of_attributes, hidden_units) elm.train(train_X, train_y) predictions = elm.predict(test_X) hit_rates.append(elm.evaluate(test_y, predictions)) print(elm.confusion_matrix(test_y, predictions)) # Perceptron.plot_decision_boundaries(train_X, train_y, test_X, test_y, perceptron, hidden_neurons, j) print('hit rates: {}'.format(hit_rates)) print('accuracy: {}'.format(np.mean(hit_rates))) print('std: {}'.format(np.std(hit_rates))) # Perceptron.show_plot_decision_boundaries()
def main(args): # =============================== # Load dataset # =============================== n_classes = 10 (x_train, t_train), (x_test, t_test) = mnist.load_data() # =============================== # Preprocess # =============================== x_train = x_train.astype(np.float32) / 255. x_train = x_train.reshape(-1, 28**2) x_test = x_test.astype(np.float32) / 255. x_test = x_test.reshape(-1, 28**2) t_train = to_categorical(t_train, n_classes).astype(np.float32) t_test = to_categorical(t_test, n_classes).astype(np.float32) # =============================== # Instantiate ELM # =============================== model = ELM( n_input_nodes=28**2, n_hidden_nodes=args.n_hidden_nodes, n_output_nodes=n_classes, loss=args.loss, activation=args.activation, name='elm', ) # =============================== # Training # =============================== model.fit(x_train, t_train) train_loss, train_acc = model.evaluate(x_train, t_train, metrics=['loss', 'accuracy']) print('train_loss: %f' % train_loss) print('train_acc: %f' % train_acc) # =============================== # Validation # =============================== val_loss, val_acc = model.evaluate(x_test, t_test, metrics=['loss', 'accuracy']) print('val_loss: %f' % val_loss) print('val_acc: %f' % val_acc) # =============================== # Prediction # =============================== x = x_test[:10] t = t_test[:10] y = softmax(model.predict(x)) for i in range(len(y)): print('---------- prediction %d ----------' % (i + 1)) class_pred = np.argmax(y[i]) prob_pred = y[i][class_pred] class_true = np.argmax(t[i]) print('prediction:') print('\tclass: %d, probability: %f' % (class_pred, prob_pred)) print('\tclass (true): %d' % class_true) # =============================== # Save model # =============================== print('saving model...') model.save('model.h5') del model # =============================== # Load model # =============================== print('loading model...') model = load_model('model.h5')