Пример #1
0
                                           shuffle=True,
                                           drop_last=False)
    dl_test = torch.utils.data.DataLoader(ds_test,
                                          batch_size=x_validate.shape[0],
                                          shuffle=False)
    model = Net4(inputs, hiddenUnits)
    # model = MLP4(inputs, hiddenUnits)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=learningRate,
                                 weight_decay=l2Penalty)
    accuracy, loss, accuracy_validate, loss_validate = pre_train(model,
                                                                 optimizer,
                                                                 dl_train,
                                                                 dl_test,
                                                                 x_train,
                                                                 y_train,
                                                                 x_validate,
                                                                 y_validate,
                                                                 run,
                                                                 outputDir,
                                                                 maxEpoch=200)
    accs.append(accuracy)
    losses.append(loss)
    accs_validate.append(accuracy_validate)
    losses_validate.append(loss_validate)
dir = outputDir + 'pre_train_acc_loss_net4.npz'
np.savez(dir,
         a=np.mean(accs, axis=0),
         b=np.std(accs, axis=0),
         c=np.mean(losses, axis=0),
         d=np.std(losses, axis=0),
    dl_test = torch.utils.data.DataLoader(ds_test,
                                          batch_size=x_test.shape[0],
                                          shuffle=False)
    model = csnn_learnable_r(2, features, bias=BIAS)
    if learnable_r:
        model.set_lambda(LAMBDA)
        model.set_miu(MIU)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=learningRate,
                                 weight_decay=l2Penalty)
    accuracy, loss, accuracy_test, loss_test = pre_train(model,
                                                         optimizer,
                                                         dl_train,
                                                         dl_test,
                                                         x_train,
                                                         y_train,
                                                         x_test,
                                                         y_test,
                                                         run,
                                                         outputDir,
                                                         maxEpoch=50)
#     accs.append(accuracy)
#     losses.append(loss)
#     accs_test.append(accuracy_test)
#     losses_test.append(loss_test)
# dir = outputDir + 'pre_train_acc_loss_csnn.npz'
# np.savez(dir, a=np.mean(accs, axis=0), b=np.std(accs, axis=0),
#          c=np.mean(losses, axis=0), d=np.std(losses, axis=0),
#          e=np.mean(accs_test, axis=0), f=np.std(accs_test, axis=0),
#          g=np.mean(losses_test, axis=0), h=np.std(losses_test, axis=0))
Пример #3
0
                                           batch_size=batchSize,
                                           shuffle=True,
                                           drop_last=False)
    dl_test = torch.utils.data.DataLoader(ds_test,
                                          batch_size=x_validate.shape[0],
                                          shuffle=False)
    model = Net3(inputs, hiddenUnits)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=learningRate,
                                 weight_decay=l2Penalty)
    pre_train(model,
              optimizer,
              dl_train,
              dl_test,
              x_train,
              y_train,
              x_validate,
              y_validate,
              run,
              outputDir,
              maxEpoch=10)

bestValidationAccs = []
AUCs = []
ACCs = []
epochs = 200
ALPHAs = None
for run in range(runs):
    np.random.seed(seeds[run])
    torch.manual_seed(seeds[run])
    dl_train = torch.utils.data.DataLoader(ds_train,
np.random.seed(seed)
torch.manual_seed(seed)
for LAMBDA in LAMBDAS:
    print('lambda {:.2f}'.format(LAMBDA))
    for run in range(runs):
        dl_train = torch.utils.data.DataLoader(ds_train, batch_size=batchSize, shuffle=True, drop_last=True)
        # when batchNorm is involved in model, sample size in batch has to be > 1, so drop last just in case
        dl_test = torch.utils.data.DataLoader(ds_test, batch_size=x_test.shape[0], shuffle=False)
        model = Net3_learnable_r(inputs, hiddenUnits, bias=BIAS)
        if learnable_r:
            model.set_lambda(LAMBDA)
            model.set_miu(MIU)
        optimizer = torch.optim.Adam(model.parameters(), lr=learningRate,
                                     weight_decay=l2Penalty)
        pre_train(model, optimizer, dl_train, dl_test, x_train, y_train, x_test, y_test, run, outputDir, maxEpoch=1)

    bestValidationAccs = []
    AUCs = []
    ACCs = []
    epochs = 1000
    ALPHAs = None
    for run in range(runs):
        dl_train = torch.utils.data.DataLoader(ds_train, batch_size=batchSize, shuffle=True, drop_last=True)
        dl_test = torch.utils.data.DataLoader(ds_test, batch_size=x_test.shape[0], shuffle=False)
        dl_combined = torch.utils.data.DataLoader(ds_combined, batch_size=x_combined.shape[0], shuffle=False)
        PATH = outputDir + '/csnn_run{}_epoch{}.pth'.format(run, 0)
        l = torch.load(PATH)
        model = l['net']
        optimizer = torch.optim.Adam(model.parameters(), lr=learningRate,
                                     weight_decay=l2Penalty)