Esempio n. 1
0
def elm_predict(model,
                X_train,
                X_test,
                X_valid,
                multiTasks,
                unweighted,
                stl,
                dictForLabelsTrain,
                dictForLabelsTest,
                dictForLabelsValid,
                hidden_num=50,
                main_task_id=-1,
                elm_save_path='./model/elm.ckpt',
                dataset='test'):
    sess = tf.Session()

    print('elm high level feature generating')
    pred_train = model.predict([X_train])
    feat_train = high_level_feature_mtl(pred_train,
                                        stl=stl,
                                        main_task_id=main_task_id)
    print('high level feature dim for train: ', feat_train.shape[1])

    #add total features
    add_high_feature(feat_train, multiTasks, dictForLabelsTrain,
                     total_high_pred_train)

    pred_test = model.predict([X_test])
    feat_test = high_level_feature_mtl(pred_test,
                                       stl=stl,
                                       main_task_id=main_task_id)

    #add total features
    add_high_feature(feat_test, multiTasks, dictForLabelsTest,
                     total_high_pred_test)

    print('high level feature dim for test: ', feat_test.shape[1])

    if len(X_valid) != 0:
        pred_valid = model.predict([X_valid])
        feat_valid = high_level_feature_mtl(pred_valid,
                                            stl=stl,
                                            main_task_id=main_task_id)

    scores = []
    for task, classes, idx in multiTasks:
        elm = ELM(sess,
                  feat_train.shape[0],
                  feat_train.shape[1],
                  hidden_num,
                  dictForLabelsTrain[task].shape[1],
                  task=str(task))

        print('elm training')
        elm.feed(feat_train, dictForLabelsTrain[task])
        elm.save(elm_save_path + "." + str(task) + ".elm.ckpt")

        print('elm testing')
        labels = dictForLabelsTest[task]
        if unweighted:
            preds = elm.test(feat_test)
            scores.append(unweighted_recall(preds, labels, task, dataset))
        else:
            acc = elm.test(feat_test, labels)
            scores.append(acc)

        if len(X_valid) != 0:
            print('elm validating')
            labels = dictForLabelsValid[task]
            if unweighted:
                preds = elm.test(feat_valid)
                scores.append(unweighted_recall(preds, labels, task, dataset))
            else:
                acc = elm.test(feat_valid, labels)
                scores.append(acc)
    return scores
Esempio n. 2
0
def main(args):
    # ===============================
    # Load dataset
    # ===============================
    n_classes = 10
    (x_train, t_train), (x_test, t_test) = mnist.load_data()

    # ===============================
    # Preprocess
    # ===============================
    x_train = x_train.astype(np.float32) / 255.
    x_train = x_train.reshape(-1, 28**2)
    x_test = x_test.astype(np.float32) / 255.
    x_test = x_test.reshape(-1, 28**2)
    t_train = to_categorical(t_train, n_classes).astype(np.float32)
    t_test = to_categorical(t_test, n_classes).astype(np.float32)

    # ===============================
    # Instantiate ELM
    # ===============================
    model = ELM(
        n_input_nodes=28**2,
        n_hidden_nodes=args.n_hidden_nodes,
        n_output_nodes=n_classes,
        loss=args.loss,
        activation=args.activation,
        name='elm',
    )

    # ===============================
    # Training
    # ===============================
    model.fit(x_train, t_train)
    train_loss, train_acc = model.evaluate(x_train,
                                           t_train,
                                           metrics=['loss', 'accuracy'])
    print('train_loss: %f' % train_loss)
    print('train_acc: %f' % train_acc)

    # ===============================
    # Validation
    # ===============================
    val_loss, val_acc = model.evaluate(x_test,
                                       t_test,
                                       metrics=['loss', 'accuracy'])
    print('val_loss: %f' % val_loss)
    print('val_acc: %f' % val_acc)

    # ===============================
    # Prediction
    # ===============================
    x = x_test[:10]
    t = t_test[:10]
    y = softmax(model.predict(x))

    for i in range(len(y)):
        print('---------- prediction %d ----------' % (i + 1))
        class_pred = np.argmax(y[i])
        prob_pred = y[i][class_pred]
        class_true = np.argmax(t[i])
        print('prediction:')
        print('\tclass: %d, probability: %f' % (class_pred, prob_pred))
        print('\tclass (true): %d' % class_true)

    # ===============================
    # Save model
    # ===============================
    print('saving model...')
    model.save('model.h5')
    del model

    # ===============================
    # Load model
    # ===============================
    print('loading model...')
    model = load_model('model.h5')