def elm_load_predict(model, X_test, multiTasks, unweighted, stl, dictForLabelsTest, hidden_num=50, main_task_id=-1, elm_load_path='./model/elm.ckpt', dataset='test'): sess = tf.Session() print('elm high level feature generating') pred_test = model.predict([X_test]) feat_test = high_level_feature_mtl(pred_test, stl=stl, main_task_id=main_task_id) print('high level feature dim: ', feat_test.shape[1]) scores = [] for task, classes, idx in multiTasks: elm = ELM(sess, feat_test.shape[0], feat_test.shape[1], hidden_num, dictForLabelsTest[task].shape[1]) print('elm loading') elm.load(elm_load_path) print('elm testing') if unweighted: labels = dictForLabelsTest[task] preds = elm.test(feat_test) scores.append(unweighted_recall(preds, labels, task, dataset)) else: acc = elm.test(feat_test, labels) scores.append(acc) return scores
i, axis=0) return train_input, train_target, test_input, test_target # data preparation my_data = genfromtxt('iris.csv', delimiter=',') x_inp = my_data[:, 0:-1] t_inp = my_data[:, -1] train_input, train_target, test_input, test_target = split_data( x_inp, t_inp, 0.6, 0.4) e = ELM(50) e.train(train_input, train_target) e.test(test_input, test_target) print e.train_acc print e.test_acc """ # start for article on https://fennialesmana.com/extreme-learning-machine/ # 1. Prepare the input data (x) and target data (t) x = np.array([[-1, -5, 5, 5], [2, -4, 2, 3]]) t = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]]) # 2. Prepare the number of hidden nodes, input weight (w), and bias (b) randomly w = np.array([[0.5, 0.2], [0.7, -0.4], [-0.6, 0.3]]) b = np.array([[0.6], [0.7], [0.4]]) # 3. Calculate the output of hidden layer (H) H = np.dot(w, x) + b H = (1/(1+(numpy.matlib.exp(H*-1)))).transpose() # 4. Calculate the weight of hidden to output layer using zero error equation H_inv = np.linalg.pinv(H)
model12.train(out1, out1, alpha, batch_size, max_iter) # stacking the pretrained autoencoders model = MLP([n, 42, 24], ['sigmoid', 'sigmoid']) # initializing pretrained weights model.W_list[0] = model11.W_list[0] model.W_list[-1] = model11.W_list[0].T model.W_list[1] = model12.W_list[0] model.W_list[-2] = model12.W_list[0].T # finetuning the stacked autoencoder # print("training stacked autoencoder") # model.train(X_train, X_train, alpha, batch_size, 50) print("\nELM part of the neural network\n") elm_X_train = np.ndarray((X_train.shape[0], model.A[2].shape[0])) elm_X_test = np.ndarray((X_test.shape[0], model.A[2].shape[0])) for i in range(X_train.shape[0]): model.forward_prop(X_train[i]) elm_X_train[i] = model.A[2].reshape(-1, ) for i in range(X_test.shape[0]): model.forward_prop(X_test[i]) elm_X_test[i] = model.A[2].reshape(-1, ) elm_model = ELM(128, elm_X_train, y_train, 'tanh') elm_model.test(elm_X_test, y_test) elm_model.test(elm_X_train, y_train)
def elm_predict(model, X_train, X_test, X_valid, multiTasks, unweighted, stl, dictForLabelsTrain, dictForLabelsTest, dictForLabelsValid, hidden_num=50, main_task_id=-1, elm_save_path='./model/elm.ckpt', dataset='test'): sess = tf.Session() print('elm high level feature generating') pred_train = model.predict([X_train]) feat_train = high_level_feature_mtl(pred_train, stl=stl, main_task_id=main_task_id) print('high level feature dim for train: ', feat_train.shape[1]) #add total features add_high_feature(feat_train, multiTasks, dictForLabelsTrain, total_high_pred_train) pred_test = model.predict([X_test]) feat_test = high_level_feature_mtl(pred_test, stl=stl, main_task_id=main_task_id) #add total features add_high_feature(feat_test, multiTasks, dictForLabelsTest, total_high_pred_test) print('high level feature dim for test: ', feat_test.shape[1]) if len(X_valid) != 0: pred_valid = model.predict([X_valid]) feat_valid = high_level_feature_mtl(pred_valid, stl=stl, main_task_id=main_task_id) scores = [] for task, classes, idx in multiTasks: elm = ELM(sess, feat_train.shape[0], feat_train.shape[1], hidden_num, dictForLabelsTrain[task].shape[1], task=str(task)) print('elm training') elm.feed(feat_train, dictForLabelsTrain[task]) elm.save(elm_save_path + "." + str(task) + ".elm.ckpt") print('elm testing') labels = dictForLabelsTest[task] if unweighted: preds = elm.test(feat_test) scores.append(unweighted_recall(preds, labels, task, dataset)) else: acc = elm.test(feat_test, labels) scores.append(acc) if len(X_valid) != 0: print('elm validating') labels = dictForLabelsValid[task] if unweighted: preds = elm.test(feat_valid) scores.append(unweighted_recall(preds, labels, task, dataset)) else: acc = elm.test(feat_valid, labels) scores.append(acc) return scores
portion_over_threshold = portion_over_threshold / max( training_pred_emotion_sequence.shape[1], 1) high_lvl_features[index] = np.concatenate( (max_, min_, mean, portion_over_threshold), axis=1) high_lvl_labels[index] = labels[2] return high_lvl_features, high_lvl_labels training_high_lvl_features, training_labels = extract_high_level_features( training_sequence) test_high_lvl_features, testing_labels = extract_high_level_features( test_sequence) sess = tf.Session() elm = ELM(sess, *training_high_lvl_features.shape, hidden_num=50, output_len=4) elm.feed(training_high_lvl_features, training_labels) predictions = elm.test(test_high_lvl_features) predictions = np.argmax(predictions, axis=1) testing_labels = np.argmax(testing_labels, axis=1) cm = confusion_matrix(testing_labels, predictions, labels=np.array([0, 1, 2, 3])) with open(args.save_dir + f'/{args.prefix}_elm_confussion_matrix.txt', mode='w') as f: f.write(str(cm)) with open(args.save_dir + f'/{args.prefix}_elm_confussion_matrix.pkl', mode='wb') as f: pickle.dump(cm, f, protocol=4)