Esempio n. 1
0
                               float_keys,
                               shuffle=True)
    valid_batches = batch_iter(valid_data,
                               len(valid_data),
                               embed_keys,
                               float_keys,
                               shuffle=False)
    test_batches = batch_iter(test_data,
                              len(test_data),
                              embed_keys,
                              float_keys,
                              shuffle=False)

    model = MultiNet(len(float_keys), embed_dims, embed_sizes, hidden_dim,
                     embed_keys, num_class)
    model = set_cuda(model)
    opt = optim.Adam(model.parameters(),
                     lr=learning_rate,
                     weight_decay=weight_decay)

    start = time.time()
    best_epoch, best_tst_ret, best_med_ret, best_std_ret = train(
        model, num_batch, train_batches, valid_batches, test_batches, opt,
        num_epochs, hidden_dim)

    print(
        'Best test return: %.3f, median: %.3f, std: %.3f in epoch %d. Finished in %.3f seconds'
        % (best_tst_ret, best_med_ret, best_std_ret, best_epoch,
           time.time() - start))

print('Training finished')
Esempio n. 2
0
            max_acc = acc
            outfile = os.path.join(params.checkpoint_dir, 'best_model.tar')
            torch.save({'epoch': epoch, 'state': model.state_dict()}, outfile)

        if (epoch % params.save_freq == 0) or (epoch == stop_epoch - 1):
            outfile = os.path.join(params.checkpoint_dir,
                                   '{:d}.tar'.format(epoch))
            torch.save({'epoch': epoch, 'state': model.state_dict()}, outfile)

    return model


if __name__ == '__main__':
    np.random.seed(10)
    params = parse_args('train')
    set_cuda(params.cuda)

    if params.dataset == 'cross':
        base_file = configs.data_dir['miniImagenet'] + 'all.json'
        val_file = configs.data_dir['CUB'] + 'val.json'
    elif params.dataset == 'cross_char':
        base_file = configs.data_dir['omniglot'] + 'noLatin.json'
        val_file = configs.data_dir['emnist'] + 'val.json'
    else:
        base_file = configs.data_dir[params.dataset] + 'base.json'
        val_file = configs.data_dir[params.dataset] + 'val.json'

    if 'Conv' in params.model:
        if params.dataset in ['omniglot', 'cross_char']:
            image_size = 28
        else:
                torch.save(model.state_dict(), 'em_model.pt')
    return best_auc, best_acc


num_samples = X_train.shape[0]
batch_size = 1000
num_batch = int(num_samples / batch_size)
hidden_dim = 4
num_epochs = 50
learning_rate = 1e-2
weight_decay = 5e-5
rpt_step = 10000
test_step = 50000
ratio_0 = sum([1 for t in y_test if t == 0]) / y_train.shape[0]
ratio_1 = sum([1 for t in y_test if t == 1]) / y_train.shape[0]
weight = set_cuda(
    torch.FloatTensor([1 / ratio_0, 1 / ratio_1, 1 / (1 - ratio_0 - ratio_1)]))
num_class = 3

train_batches = batch_iter(X_train,
                           y_train,
                           batch_size,
                           c1=GBC,
                           c2=rf_en,
                           shuffle=True)
test_batches = batch_iter(X_test,
                          y_test,
                          X_test.shape[0],
                          c1=GBC,
                          c2=rf_en,
                          shuffle=False)