예제 #1
0
def main():
    args = process_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda
    if args.seed == -1:
        RANDOMSEED = None
    else:
        RANDOMSEED = args.seed
    torch.manual_seed(RANDOMSEED)
    torch.cuda.manual_seed(RANDOMSEED)
    IMP_WEIGHT = args.imp_weight
    if not IMP_WEIGHT:
        imp = torch.ones(args.class_num - 1, dtype=torch.float)
    elif IMP_WEIGHT == 1:
        pass
    else:
        raise ValueError('Incorrect importance weight parameter.')
    imp = imp.cuda()
    embedding = 'random'
    torch.backends.cudnn.deterministic = True

    config = Config(args, embedding, 'OR')
    start_time = time.time()
    print("Loading data...")
    vocab, train_data, test_data = build_dataset(config, args.word)
    train_iter = build_iterator(train_data, config, doubly_flag=False)
    # train_iter = buil_random_iterator(train_data,config)
    test_iter = build_test_iterator(test_data, config)

    print("Time used: ", get_time_dif(start_time))

    config.n_vocab = len(vocab)
    model = OR(config).cuda()
    init_model(model)
    print("start training...")
    train_or(config, model, train_iter, test_iter, imp)
예제 #2
0
def main():
    args = process_args()
    os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda
    if args.seed == -1:
        RANDOMSEED = None
    else:
        RANDOMSEED = args.seed
    torch.manual_seed(RANDOMSEED)
    torch.cuda.manual_seed(RANDOMSEED)
    embedding = 'random'
    torch.backends.cudnn.deterministic = True

    config = Config(args, embedding, 'POR')
    start_time = time.time()
    print("Loading data...")
    vocab, train_data, test_data = build_adaptive_dataset(config, args.word)

    train_iter2 = build_iterator(train_data, config, doubly_flag=False)
    test_iter = build_test_iterator(test_data, config)
    print("Time used: ", get_time_dif(start_time))

    config.n_vocab = len(vocab)
    model = POR(config).cuda()
    init_model(model)
    print("start training...")
    train_por(config, model, train_iter, train_iter2, test_iter)
예제 #3
0
def main():
    args = process_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda

    dataloader = get_dataloader(num_training=args.train_num,
                                num_labeled=args.n_labeled,
                                batch_size=args.batch_size,
                                y_dim=args.y_dim,
                                file_name=args.input_file)

    #x, y = dataloader['labeled']

    model = DSGM(args)
    print(model)
    #
    # trade-off parameter
    #alpha = 0.1*unlabeled_size/labeled_size
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 betas=(0.9, 0.999))
    #m = unlabeled_size
    cuda = torch.cuda.is_available()
    if cuda:
        model = model.cuda()
    epoch_bar = tqdm(range(args.n_epochs))

    # for epoch in epoch_bar:
    #     model.train()
    #     # print('training')
    #     total_loss, accuracy = (0., 0.)
    #     #preds = torch.zeros_like(unlabeled_y)
    #     i = 0
    #     #preds = torch.zeros_like(unlabeled_y).cuda()
    #     #batch_bar = tqdm(zip(range(m), cycle(labeled_x), cycle(labeled_y), unlabeled_x))
    #     #for i, batch_x, batch_y, batch_u in batch_bar:
    #     for batch_labeled in zip(dataloader['labeled']):
    #         batch_x, batch_y = batch_labeled[0]
    #         batch_x, batch_y = Variable(batch_x), Variable(batch_y)
    #
    #         ln, d = batch_y.size()
    #         worker_y = torch.zeros(args.workers, ln, d)
    #         #batch_x = batch_x.reshape(1, batch_x.size(0))
    #         #batch_u = batch_u.reshape(1, batch_u.size(0))
    #         worker_y[0, :, :] = batch_y
    #         worker_y[1, :, :] = batch_y
    #         if cuda:
    #             batch_x, worker_y = batch_x.cuda(), worker_y.cuda()
    #
    #         label_t, label_loss = model(batch_x, worker_y)
    #         workers_bar = worker_y.mean(dim=0)
    #         clas_loss = torch.mean(torch.sum(F.binary_cross_entropy( label_t, workers_bar, reduction='none'), dim=1))
    #         J_alpha = label_loss+clas_loss
    #
    #         J_alpha.backward()
    #         optimizer.step()
    #         optimizer.zero_grad()
    #         #accuracy = f1_score(batch_uy.cpu(), (unlabel_t>0.5).float().cpu(), average='micro')
    #
    #         total_loss += J_alpha.item()
    #     if epoch % 1 == 0:
    #         model.eval()
    #         for test_batch in zip(dataloader['test']):
    #             test_x, test_y = test_batch[0]
    #             test_x = test_x.cuda()
    #             test_y = test_y.cuda()
    #             unlabel_t, unlabel_loss = model(test_x)
    #             unlabel_t = (unlabel_t>0.5).float()
    #             accuracy = f1_score(test_y.cpu(), unlabel_t.cpu(), average='macro')
    #     epoch_bar.set_description("f1: {:.2f}, loss : {:.2f}\n".format(accuracy, total_loss))

    beta = args.alpha * (args.train_num / (args.train_num - args.n_labeled))

    for epoch in epoch_bar:
        model.train()
        # print('training')
        total_loss, accuracy = (0., 0.)
        #preds = torch.zeros_like(unlabeled_y)
        i = 0
        #preds = torch.zeros_like(unlabeled_y).cuda()
        #batch_bar = tqdm(zip(range(m), cycle(labeled_x), cycle(labeled_y), unlabeled_x))
        #for i, batch_x, batch_y, batch_u in batch_bar:
        for batch_labeled, batch_unlabeled in zip(dataloader['labeled'],
                                                  dataloader['unlabeled']):
            batch_x, batch_y = batch_labeled
            batch_u, batch_uy = batch_unlabeled
            batch_x, batch_y, batch_u = Variable(batch_x), Variable(
                batch_y), Variable(batch_u)

            ln, d = batch_y.size()
            un, _ = batch_uy.size()
            worker_y = torch.zeros(args.workers, ln, d)
            #batch_x = batch_x.reshape(1, batch_x.size(0))
            #batch_u = batch_u.reshape(1, batch_u.size(0))
            worker_y[0, :, :] = batch_y
            worker_y[1, :, :] = batch_y
            if cuda:
                batch_x, worker_y = batch_x.cuda(), worker_y.cuda()
                batch_u = batch_u.cuda()

            label_t, label_loss = model(batch_x, worker_y)
            unlabel_t, unlabel_loss = model(batch_u)
            workers_bar = worker_y.mean(dim=0)
            clas_loss = torch.mean(
                torch.sum(F.binary_cross_entropy(label_t,
                                                 workers_bar,
                                                 reduction='none'),
                          dim=1))
            # J_alpha = label_loss + unlabel_loss + clas_loss
            J_alpha = label_loss + unlabel_loss + beta * clas_loss

            J_alpha.backward()
            optimizer.step()
            optimizer.zero_grad()
            #accuracy = f1_score(batch_uy.cpu(), (unlabel_t>0.5).float().cpu(), average='micro')

            total_loss += J_alpha.item()
        if epoch % 1 == 0:
            model.eval()
            for test_batch in zip(dataloader['test']):
                test_x, test_y = test_batch[0]
                test_x = test_x.cuda()
                test_y = test_y.cuda()
                unlabel_t, unlabel_loss = model(test_x)
                unlabel_t = (unlabel_t > 0.5).float()
                accuracy = f1_score(test_y.cpu(),
                                    unlabel_t.cpu(),
                                    average='micro')
        epoch_bar.set_description("f1: {:.2f}, loss : {:.2f}\n".format(
            accuracy, total_loss))
예제 #4
0
파일: train.py 프로젝트: AlfheimLe/ss2
from get_args import process_args
from model import DSGM
import os
import pprint
import torch
if __name__ == '__main__':
    args = process_args()

    args.device = torch.device('cuda:{}'.format(args.cuda) if torch.cuda.
                               is_available() and args.cuda != None else 'cpu')
    torch.manual_seed(args.seed)
    if args.device.type == 'cuda': torch.cuda.manual_seed(args.seed)

    model = DSGM(args).to(args.device)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    if args.restore_file:
        # load model and optimizer states
        state = torch.load(args.restore_file, map_location=args.device)
        model.load_state_dict(state)
        # set up paths
        args.output_dir = os.path.dirname(args.restore_file)
    args.results_file = os.path.join(args.output_dir, args.results_file)

    print('Loaded settings and model:')
    print(pprint.pformat(args.__dict__))
    print(model)
    print(pprint.pformat(args.__dict__), file=open(args.results_file, 'a'))
    print(model, file=open(args.results_file, 'a'))
예제 #5
0
파일: test_dgm.py 프로젝트: AlfheimLe/ss2
def main():
    args = process_args()
    file_name = '../DataSource/yeast_train.svm'
    x, y = get_data(file_name)
    # numpy -> tensor
    n, x_dim = x.shape
    y_dim = 14
    z_dim = 128
    h_dim = []
    for i in range(y_dim):
        h_dim.append(128)
    worker_dim = 1

    x = x.toarray().astype(np.float32)
    min_max = MinMaxScaler()
    x = min_max.fit_transform(x)
    x = torch.from_numpy(x)
    #generate tensor y [1 0 0 1]
    y = torch.from_numpy(generate(y, y_dim, n).astype(np.float32))

    labeled_x = x[0:200, :]
    labeled_y = y[0:200, :]

    unlabeled_x = x[201:400, :]
    unlabeled_y = y[201:400, :]

    labeled_size = labeled_x.shape[0]
    unlabeled_size = unlabeled_x.shape[0]

    model = DSGM(args)
    print(model)
    #
    # trade-off parameter
    alpha = 0.1 * unlabeled_size / labeled_size
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=0.001,
                                 betas=(0.9, 0.999))
    m = unlabeled_size
    cuda = torch.cuda.is_available()
    if cuda:
        model = model.cuda()
    epoch_bar = tqdm(range(500))
    for epoch in epoch_bar:
        model.train()
        # print('training')
        total_loss, accuracy = (0., 0.)
        preds = torch.zeros_like(unlabeled_y)
        i = 0
        preds = torch.zeros_like(unlabeled_y).cuda()
        #batch_bar = tqdm(zip(range(m), cycle(labeled_x), cycle(labeled_y), unlabeled_x))
        #for i, batch_x, batch_y, batch_u in batch_bar:
        for i, batch_x, batch_y, batch_u in zip(range(m), cycle(labeled_x),
                                                cycle(labeled_y), unlabeled_x):
            batch_x, batch_y, batch_u = Variable(batch_x), Variable(
                batch_y), Variable(batch_u)
            worker_y = torch.zeros(args.workers, 1, batch_y.size(0))
            batch_x = batch_x.reshape(1, batch_x.size(0))
            batch_u = batch_u.reshape(1, batch_u.size(0))
            worker_y[0, :, :] = batch_y
            worker_y[1, :, :] = batch_y
            if cuda:
                batch_x, worker_y = batch_x.cuda(), worker_y.cuda()
                batch_u = batch_u.cuda()

            label_t, label_loss = model(batch_x, worker_y)
            unlabel_t, unlabel_loss = model(batch_u)
            workers_bar = worker_y.mean(dim=0)
            clas_loss = torch.sum(workers_bar * torch.log(label_t + 1e-6),
                                  dim=1).mean()
            J_alpha = label_loss + unlabel_loss - clas_loss

            J_alpha.backward()
            optimizer.step()
            optimizer.zero_grad()

            total_loss += J_alpha.item()
            preds[i, :] = unlabel_t
            # batch_bar.set_description('[Loss={:.4f}], [L_Loss={:.4f}], [U_Loss={:.4f}], [S_Loss={:.4f}]'.format(J_alpha.item(),
            #                                                                                                     -label_loss.item(),
            #                                                                                                     -unlabel_loss.item(),
            #                                                                                                     -clas_loss.item()))
            # preds[i, :] = logits
            # accuracy += torch.mean((torch.max(logits, 1)[1].data == torch.max(batch_y, 1)[1].data).float())
        accuracy = f1_score(unlabeled_y.cpu(), preds.cpu(), average='micro')

        epoch_bar.set_description("accuracy: {:.2f}, loss : {:.2f}\n".format(
            accuracy, total_loss / m))