Exemple #1
0
def grid_search():
    mode_lst = ['padding', 'sum']
    max_iter_lst = [100, 500, 1000, 5000, 10000]
    learning_rate_lst = [0.1, 0.01, 0.001, 0.0001, 0.00001]

    verbose = 0

    hyper_tag_lst = []
    acc_lst = []
    for mode in mode_lst:
        for max_iter in max_iter_lst:
            for learning_rate in learning_rate_lst:
                print('\nmode_%s_iter_%s_lr_%s' %
                      (mode, max_iter, learning_rate))
                data_pth = '../data/mode_%s.npz' % mode
                x_train, y_train, x_test, y_test, x_val, y_val = _data(
                    data_pth, split_val=True)

                LR = LogisticRegression(max_iter=max_iter, lr=learning_rate)
                thetas = LR.fit(x=x_train,
                                y=y_train,
                                reduce_lr=True,
                                verbose=verbose)
                y_pred_val = LR.predict(thetas, x_val, y_val, Onsave=False)
                acc, f1, mcc, recalls, precisions, f1s, mccs = test_score(
                    y_real=y_val, y_pred=y_pred_val, classes=10)

                hyper_tag_lst.append('%s_%s_%s' %
                                     (mode, max_iter, learning_rate))
                acc_lst.append(acc)
                print('val_acc:', acc)
                # sys.stdout.flush()# or "python -u“
    print('\nThe Best Hypers are: %s, Best val_acc is: %s' %
          (hyper_tag_lst[acc_lst.index(max(acc_lst))], max(acc_lst)))
Exemple #2
0
def main():
    if len(sys.argv) < 4:
        print('Usage: %s %s %s %s' %
              (sys.argv[0], ['padding', 'sum'], 'max_iter', 'learning_rate'))
        exit(0)

    MODE = sys.argv[1]
    MAX_ITER = int(sys.argv[2])
    LEARNING_RATE = float(sys.argv[3])

    verbose = 1
    data_pth = '../data/mode_%s.npz' % MODE
    outdir = '../model/LR/mode_%s_maxiter_%s_lr_%s' % (MODE, MAX_ITER,
                                                       LEARNING_RATE)
    os.makedirs(outdir, exist_ok=True)
    x_train, y_train, x_test, y_test = _data(data_pth,
                                             split_val=False,
                                             verbose=1)

    LR = LogisticRegression(max_iter=MAX_ITER, lr=LEARNING_RATE)
    thetas = LR.fit(x=x_train, y=y_train, reduce_lr=True, verbose=verbose)
    y_pred = LR.predict(thetas, x_test, y_test, modeldir=outdir, Onsave=True)
    acc, f1, mcc, recalls, precisions, f1s, mccs = test_score(y_real=y_test,
                                                              y_pred=y_pred,
                                                              classes=10)
    print('\nacc: %s'
          '\nf1: %s'
          '\nmcc: %s'
          '\nrecalls: %s'
          '\nprecisions: %s'
          '\nf1s: %s'
          '\nmccs: %s' % (acc, f1, mcc, recalls, precisions, f1s, mccs))
    print('\nThe Hypers are: mode_%s_iter_%s_lr_%s' %
          (MODE, MAX_ITER, LEARNING_RATE))
Exemple #3
0
def main():
    if len(sys.argv) < 4:
        print('Usage: %s %s %s %s' %
              (sys.argv[0], ['padding', 'sum'], 'C', 'kernel'))
        exit(0)

    MODE = sys.argv[1]
    C = float(sys.argv[2])
    KERNEL = sys.argv[3]

    verbose = 1
    data_pth = '../data/mode_%s.npz' % MODE
    outdir = '../model/svm/mode_%s_C_%s_kernel_%s' % (MODE, C, KERNEL)
    os.makedirs(outdir, exist_ok=True)
    x_train, y_train, x_test, y_test = _data(data_pth,
                                             split_val=False,
                                             verbose=verbose)

    svm_clf = SupportVectorMachine(C=C, kernel=KERNEL, verbose=verbose)
    svm_clf.fit(x=x_train, y=y_train)
    y_pred = svm_clf.predict(x_test, y_test, modeldir=outdir, Onsave=True)
    acc, f1, mcc, recalls, precisions, f1s, mccs = test_score(y_real=y_test,
                                                              y_pred=y_pred,
                                                              classes=10)
    print('\nacc: %s'
          '\nf1: %s'
          '\nmcc: %s'
          '\nrecalls: %s'
          '\nprecisions: %s'
          '\nf1s: %s'
          '\nmccs: %s' % (acc, f1, mcc, recalls, precisions, f1s, mccs))
    print('\nThe Hypers are: mode_%s_C_%s_kernel_%s' % (MODE, C, KERNEL))
Exemple #4
0
def grid_search():
    mode_lst = ['padding', 'sum']
    C_lst = [1., 10., 100., 1000.]  # Regularization parameter
    kernel_lst = ['linear', 'poly', 'rbf', 'sigmoid']

    verbose = 0

    hyper_tag_lst = []
    acc_lst = []
    for mode in mode_lst:
        for C in C_lst:
            for kernel in kernel_lst:
                print('\nmode_%s_C_%s_kernel_%s' % (mode, C, kernel))
                data_pth = '../data/mode_%s.npz' % mode
                x_train, y_train, x_test, y_test, x_val, y_val = _data(
                    data_pth, split_val=True, verbose=verbose)

                svm_clf = SupportVectorMachine(C=C,
                                               kernel=kernel,
                                               verbose=verbose)
                svm_clf.fit(x=x_train, y=y_train)
                y_pred_val = svm_clf.predict(x_val, y_val, Onsave=False)
                acc, f1, mcc, recalls, precisions, f1s, mccs = test_score(
                    y_real=y_val, y_pred=y_pred_val, classes=10)

                hyper_tag_lst.append('%s_%s_%s' % (mode, C, kernel))
                acc_lst.append(acc)
                print('val_acc:', acc)
                # sys.stdout.flush()# or "python -u“
    print('\nThe Best Hypers are: %s, Best val_acc is: %s' %
          (hyper_tag_lst[acc_lst.index(max(acc_lst))], max(acc_lst)))
Exemple #5
0
def main():
    if len(sys.argv) < 4:
        print('Usage: %s %s %s %s' %
              (sys.argv[0], ['padding', 'sum'], 'epochs', 'learning_rate'))
        exit(0)
    MODE = sys.argv[1]
    EPOCHS = int(sys.argv[2])
    LEARNING_RATE = float(sys.argv[3])

    data_pth = '../data/mode_%s.npz' % MODE
    model_name = '%s_mode_%s_epochs_%s_lr_%s_%s' % (
        sys.argv[0].split('/')[-1][:-3], MODE, EPOCHS, LEARNING_RATE,
        time.strftime("%Y.%m.%d.%H.%M.%S", time.localtime()))
    modeldir = '../model/Conv1D/%s' % model_name
    os.makedirs(modeldir, exist_ok=True)

    #
    # load data
    #
    x_train, y_train, x_test, y_test, class_weights_dict = _data(
        data_pth, split_val=False, verbose=1)

    #
    # train and save
    #
    if USEGPU:
        config_tf(user_mem=2500, cuda_rate=0.2)
    model, history_dict = TrainConv1D(x_train,
                                      y_train,
                                      class_weights_dict=class_weights_dict,
                                      epochs=EPOCHS,
                                      lr=LEARNING_RATE)
    net_saver(model, modeldir, history_dict)

    #
    # test
    #
    y_test, p_pred, y_real, y_pred = net_predictor(modeldir,
                                                   x_test,
                                                   y_test,
                                                   Onsave=True)
    acc, f1, mcc, recalls, precisions, f1s, mccs = test_score(y_real=y_real,
                                                              y_pred=y_pred,
                                                              classes=10)
    print('\nacc: %s'
          '\nf1: %s'
          '\nmcc: %s'
          '\nrecalls: %s'
          '\nprecisions: %s'
          '\nf1s: %s'
          '\nmccs: %s' % (acc, f1, mcc, recalls, precisions, f1s, mccs))
    print('\nThe Hypers are: mode_%s_epochs_%s_lr_%s' %
          (MODE, EPOCHS, LEARNING_RATE))
Exemple #6
0
def compare_metrics():
    p_reals = []
    p_preds = []
    classifiers = {
        'LR':
        '../model/LR/mode_sum_maxiter_1000_lr_0.1/test_rst.npz',
        'SVM':
        '../model/svm/mode_sum_C_1000.0_kernel_rbf/test_rst.npz',
        'ConvNet':
        '../model/Conv1D/convnet_mode_padding_epochs_111_lr_0.01_2020.06.12.08.49.55/test_rst.npz',
        'ResNet':
        '../model/Conv1D/resnet_mode_padding_epochs_81_lr_0.01_2020.06.12.08.50.57/test_rst.npz'
    }
    for clf, rst_pth in classifiers.items():
        rst_data = np.load(rst_pth)
        y_real = rst_data['y_real']
        y_pred = rst_data['y_pred']
        p_reals.append(rst_data['p_real'])
        p_preds.append(rst_data['p_pred'])
        acc_micro, f1_micro, mcc_micro, recalls, precisions, f1s, mccs = test_score(
            y_real, y_pred)
        classifiers[clf] = (acc_micro, f1_micro, mcc_micro, recalls,
                            precisions, f1s, mccs)

    print(
        '########################################################################################'
        '\n          |labels    |recall    |precision |F1        |mcc       |F1_micro  |mcc_micro |'
        '\nLR        ------------------------------------------------------------------------------'
        '\n          |c1        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|%-10.4f|%-10.4f|'
        '\n          |c2        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c3        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c4        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c5        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c6        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c7        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c8        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c9        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c10       |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\nSVM       ------------------------------------------------------------------------------'
        '\n          |c1        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|%-10.4f|%-10.4f|'
        '\n          |c2        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c3        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c4        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c5        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c6        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c7        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c8        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c9        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c10       |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\nConvNet   ------------------------------------------------------------------------------'
        '\n          |c1        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|%-10.4f|%-10.4f|'
        '\n          |c2        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c3        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c4        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c5        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c6        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c7        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c8        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c9        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c10       |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\nResNet    ------------------------------------------------------------------------------'
        '\n          |c1        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|%-10.4f|%-10.4f|'
        '\n          |c2        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c3        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c4        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c5        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c6        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c7        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c8        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c9        |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n          |c10       |%-10.4f|%-10.4f|%-10.4f|%-10.4f|          |          |'
        '\n########################################################################################'
        % (
            classifiers['LR'][3][0],
            classifiers['LR'][4][0],
            classifiers['LR'][5][0],
            classifiers['LR'][6][0],
            classifiers['LR'][1],
            classifiers['LR'][2],
            classifiers['LR'][3][1],
            classifiers['LR'][4][1],
            classifiers['LR'][5][1],
            classifiers['LR'][6][1],
            classifiers['LR'][3][2],
            classifiers['LR'][4][2],
            classifiers['LR'][5][2],
            classifiers['LR'][6][2],
            classifiers['LR'][3][3],
            classifiers['LR'][4][3],
            classifiers['LR'][5][3],
            classifiers['LR'][6][3],
            classifiers['LR'][3][4],
            classifiers['LR'][4][4],
            classifiers['LR'][5][4],
            classifiers['LR'][6][4],
            classifiers['LR'][3][5],
            classifiers['LR'][4][5],
            classifiers['LR'][5][5],
            classifiers['LR'][6][5],
            classifiers['LR'][3][6],
            classifiers['LR'][4][6],
            classifiers['LR'][5][6],
            classifiers['LR'][6][6],
            classifiers['LR'][3][7],
            classifiers['LR'][4][7],
            classifiers['LR'][5][7],
            classifiers['LR'][6][7],
            classifiers['LR'][3][8],
            classifiers['LR'][4][8],
            classifiers['LR'][5][8],
            classifiers['LR'][6][8],
            classifiers['LR'][3][9],
            classifiers['LR'][4][9],
            classifiers['LR'][5][9],
            classifiers['LR'][6][9],
            classifiers['SVM'][3][0],
            classifiers['SVM'][4][0],
            classifiers['SVM'][5][0],
            classifiers['SVM'][6][0],
            classifiers['SVM'][1],
            classifiers['SVM'][2],
            classifiers['SVM'][3][1],
            classifiers['SVM'][4][1],
            classifiers['SVM'][5][1],
            classifiers['SVM'][6][1],
            classifiers['SVM'][3][2],
            classifiers['SVM'][4][2],
            classifiers['SVM'][5][2],
            classifiers['SVM'][6][2],
            classifiers['SVM'][3][3],
            classifiers['SVM'][4][3],
            classifiers['SVM'][5][3],
            classifiers['SVM'][6][3],
            classifiers['SVM'][3][4],
            classifiers['SVM'][4][4],
            classifiers['SVM'][5][4],
            classifiers['SVM'][6][4],
            classifiers['SVM'][3][5],
            classifiers['SVM'][4][5],
            classifiers['SVM'][5][5],
            classifiers['SVM'][6][5],
            classifiers['SVM'][3][6],
            classifiers['SVM'][4][6],
            classifiers['SVM'][5][6],
            classifiers['SVM'][6][6],
            classifiers['SVM'][3][7],
            classifiers['SVM'][4][7],
            classifiers['SVM'][5][7],
            classifiers['SVM'][6][7],
            classifiers['SVM'][3][8],
            classifiers['SVM'][4][8],
            classifiers['SVM'][5][8],
            classifiers['SVM'][6][8],
            classifiers['SVM'][3][9],
            classifiers['SVM'][4][9],
            classifiers['SVM'][5][9],
            classifiers['SVM'][6][9],
            classifiers['ConvNet'][3][0],
            classifiers['ConvNet'][4][0],
            classifiers['ConvNet'][5][0],
            classifiers['ConvNet'][6][0],
            classifiers['ConvNet'][1],
            classifiers['ConvNet'][2],
            classifiers['ConvNet'][3][1],
            classifiers['ConvNet'][4][1],
            classifiers['ConvNet'][5][1],
            classifiers['ConvNet'][6][1],
            classifiers['ConvNet'][3][2],
            classifiers['ConvNet'][4][2],
            classifiers['ConvNet'][5][2],
            classifiers['ConvNet'][6][2],
            classifiers['ConvNet'][3][3],
            classifiers['ConvNet'][4][3],
            classifiers['ConvNet'][5][3],
            classifiers['ConvNet'][6][3],
            classifiers['ConvNet'][3][4],
            classifiers['ConvNet'][4][4],
            classifiers['ConvNet'][5][4],
            classifiers['ConvNet'][6][4],
            classifiers['ConvNet'][3][5],
            classifiers['ConvNet'][4][5],
            classifiers['ConvNet'][5][5],
            classifiers['ConvNet'][6][5],
            classifiers['ConvNet'][3][6],
            classifiers['ConvNet'][4][6],
            classifiers['ConvNet'][5][6],
            classifiers['ConvNet'][6][6],
            classifiers['ConvNet'][3][7],
            classifiers['ConvNet'][4][7],
            classifiers['ConvNet'][5][7],
            classifiers['ConvNet'][6][7],
            classifiers['ConvNet'][3][8],
            classifiers['ConvNet'][4][8],
            classifiers['ConvNet'][5][8],
            classifiers['ConvNet'][6][8],
            classifiers['ConvNet'][3][9],
            classifiers['ConvNet'][4][9],
            classifiers['ConvNet'][5][9],
            classifiers['ConvNet'][6][9],
            classifiers['ResNet'][3][0],
            classifiers['ResNet'][4][0],
            classifiers['ResNet'][5][0],
            classifiers['ResNet'][6][0],
            classifiers['ResNet'][1],
            classifiers['ResNet'][2],
            classifiers['ResNet'][3][1],
            classifiers['ResNet'][4][1],
            classifiers['ResNet'][5][1],
            classifiers['ResNet'][6][1],
            classifiers['ResNet'][3][2],
            classifiers['ResNet'][4][2],
            classifiers['ResNet'][5][2],
            classifiers['ResNet'][6][2],
            classifiers['ResNet'][3][3],
            classifiers['ResNet'][4][3],
            classifiers['ResNet'][5][3],
            classifiers['ResNet'][6][3],
            classifiers['ResNet'][3][4],
            classifiers['ResNet'][4][4],
            classifiers['ResNet'][5][4],
            classifiers['ResNet'][6][4],
            classifiers['ResNet'][3][5],
            classifiers['ResNet'][4][5],
            classifiers['ResNet'][5][5],
            classifiers['ResNet'][6][5],
            classifiers['ResNet'][3][6],
            classifiers['ResNet'][4][6],
            classifiers['ResNet'][5][6],
            classifiers['ResNet'][6][6],
            classifiers['ResNet'][3][7],
            classifiers['ResNet'][4][7],
            classifiers['ResNet'][5][7],
            classifiers['ResNet'][6][7],
            classifiers['ResNet'][3][8],
            classifiers['ResNet'][4][8],
            classifiers['ResNet'][5][8],
            classifiers['ResNet'][6][8],
            classifiers['ResNet'][3][9],
            classifiers['ResNet'][4][9],
            classifiers['ResNet'][5][9],
            classifiers['ResNet'][6][9],
        ))

    return p_reals, p_preds, list(classifiers.keys())
Exemple #7
0
def train(train_batch, train_label, hidden_size, test_length=5):
    '''
    ##############################
    # modified from the original code by Nurakhmetov (2019)
    references:
    Nurakhmetov, D. (2019). Reinforcement Learning Applied to Adaptive Classification Testing. 
    In Theoretical and Practical Advances in Computer-based Educational Measurement (pp. 325-336). Springer, Cham.    
    ###############################
    '''
    batch_size = 10

    tests = []
    tests_train = []
    policy = Policy(n_tests=18, n_scores=1401, hidden_size=hidden_size)
    optimizer = optim.Adam(policy.parameters())
    criterion = nn.CrossEntropyLoss(reduce=False)

    (score, test) = tuple(None, None)
    tests, scores = [], []
    rewards = []
    hidden = Variable(torch.zeros(batch_size, test), volatile=True)

    for t in range(test_length):
        logits, value, hidden, _ = policy(test, score, hidden, batch_size)
        probs = nn.functional.softmax(logits)  #sample next item
        next_test = torch.multinomial(probs, 1)

        test = next_test.data.squeeze(1)
        score, test = utils.test_score(train_batch, test)

        masks = []
        for prev_test in tests:
            mask = prev_test.squeeze(1).eq(test).unsqueeze(1)
            masks.append(mask)
        if len(masks) > 0:
            masks = torch.cat(masks, 1)
            masks = masks.sum(1).gt(0)
            masks = -1 * masks.float()
            rewards.append(masks.unsqueeze(1))

        tests.append(test.unsqueeze(1))
        scores.append(score.unsqueeze(1))

        score = Variable(score.unsqueeze(1), volatile=True)
        test = Variable(test.unsqueeze(1), volatile=True)

        tests_train.append(tests)

    saved_log_probs = []
    saved_values = []

    hidden = Variable(torch.zeros(batch_size, tests))
    logits, value, hidden, _ = policy(None, None, hidden, batch_size)
    log_probs = nn.functional.log_softmax(logits)

    for test, score in zip(test, scores):

        log_prob = log_probs.gather(1, Variable(test))
        saved_log_probs.append(log_prob)
        saved_values.append(value)

        logits, value, hidden, clf_logits = policy(Variable(test),
                                                   Variable(score), hidden,
                                                   batch_size)
        log_probs = nn.functional.log_softmax(logits)

    loss = nn.functional.cross_entropy(clf_logits, Variable(train_label))

    clf_rewards = []
    for clf_logit, targ in zip(clf_logits.data, train_label):
        reward = -criterion(Variable(clf_logit.unsqueeze(0)),
                            Variable(torch.LongTensor([targ]))).data
        clf_rewards.append(reward.unsqueeze(0))
    clf_rewards = torch.cat(clf_rewards, 0).unsqueeze(-1)

    rewards.append(clf_rewards)
    returns = utils.get_reward(rewards)

    saved_log_probs = torch.cat(saved_log_probs, 1)
    saved_values = torch.cat(saved_values, 1)

    advantages = Variable(returns) - saved_values

    critic_loss = advantages.pow(2).mean()
    actor_loss = -(saved_log_probs * Variable(advantages.data)).mean()

    optimizer.zero_grad()
    (critic_loss + actor_loss + loss).backward()
    optimizer.step()

    return tests_train