Пример #1
0
def train():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    json_file = open("parameters.json")
    parameters = json.load(json_file)
    json_file.close()
    net = CNNModel(1, 10)
    optimizer = torch.optim.Adam(net.parameters(), lr=parameters["lr"])
    criterion = nn.BCELoss()

    if torch.cuda.is_available():
        net = torch.nn.DataParallel(net,
                                    device_ids=range(
                                        torch.cuda.device_count())).cuda()
        cudnn.benchmark = True
    ecg_dataset = EcgDataset(is_train=True)
    train_loader = torch.utils.data.DataLoader(dataset=ecg_dataset,
                                               batch_size=10)
    for epoch in range(parameters["num_epochs"]):
        net.train()
        for i, (data, label) in enumerate(train_loader):
            data, label = data.to(device), label.to(device)
            output = net(data)
            optimizer.zero_grad()
            loss = criterion(output, label)
            loss.backward()
            optimizer.step()
        print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch + 1,
                                                   parameters["num_epochs"],
                                                   loss.item()))
        evaluation(net)
Пример #2
0
def run(args):
    train_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder('../../ssl_data_96/supervised/train',
                             transform=data_transforms),
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=4)  #n_worker to 4, to use 4 gpu

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder('../../ssl_data_96/supervised/val',
                             transform=validation_data_transforms),
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=4)  #n_worker to 4, to use 4 gpu

    model = CNNModel()
    model.cuda()

    optimizer = optim.RMSprop(model.parameters(),
                              lr=args.lr,
                              momentum=args.momentum,
                              weight_decay=1e-3)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=5)

    for epoch in range(1, args.epochs + 1):
        scheduler.step()
        train(epoch, model, optimizer, train_loader, args.log_interval)
        validation(epoch, model, val_loader)
        model_file = 'model_' + str(epoch) + '.pth'
        torch.save(model.state_dict(), model_file)
    writer.close()
Пример #3
0
def run(args):
    train_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        args.data + '/train', transform=data_transforms),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=16)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        args.data + '/val', transform=validation_data_transforms),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=16)

    model = CNNModel()
    model = nn.DataParallel(model)
    model = model.to(args.device)

    if args.checkpoint is not None:
        model.load_state_dict(torch.load(args.checkpoint))

    optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-3)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size)

    for epoch in range(1, args.epochs + 1):
        scheduler.step()
        train(epoch, model, optimizer, train_loader, args.log_interval)
        validation(epoch, model, val_loader)
        model_file = 'model_' + str(epoch) + '.pth'
        torch.save(model.state_dict(), model_file)
    writer.close()
Пример #4
0
def main_simple_cnn():
    TEXT = data.Field(sequential=True, include_lengths=True)
    LABEL = data.Field(sequential=False)
    train, val, test = datasets.SNLI.splits(TEXT, LABEL)
    TEXT.build_vocab(train, vectors="glove.840B.300d")
    LABEL.build_vocab(train)
    vocab = TEXT.vocab
    train_iter, val_iter, test_iter = data.Iterator.splits(
        (train, val, test), 
        batch_size=50,
        repeat=True,
        shuffle=False)
    config = Config()
    
    criterion = nn.CrossEntropyLoss()

    model = CNNModel(vocab, config)
    # model = Model(vocab, config)

    if args.cuda:
        model.cuda()

    optimizer = optim.Adam([param for param in model.parameters() if param.requires_grad], lr=1e-3)

    
    for epoch in range(args.max_epoch):
        train_acc = 0.0
        train_cnt = 0
        for batch in train_iter:
            x, y = batch, batch.label - 1
            f_x = model(x)
            acc = (f_x.max(1)[1] == y).type(torch.FloatTensor).mean().float()
            loss = criterion(f_x, y)
            model.zero_grad()
            loss.backward()
            optimizer.step()

            if train_cnt % 100 == 0:
                print 'cnt = {}, acc = {}, loss = {}'.format(train_cnt, acc, loss.float())
            train_cnt += 1
            train_acc += acc

        test_acc = 0.0
        test_cnt = 0
        for batch in test_iter:
            x, y = batch, batch.label - 1
            f_x = model(x)
            test_acc += (f_x.max(1)[1] == y).type(torch.FloatTensor).mean().float()
            test_cnt += 1
        print 'epoch = {}, train_acc = {}, test_acc = {}'.format(epoch, train_acc / train_cnt, test_acc / test_cnt)
Пример #5
0
def train(train_config):
    use_cuda = train_config.num_gpus > 0

    if use_cuda:
        torch.cuda.manual_seed(train_config.num_gpus)
        logger.info("Number of GPUs available: {}".format(train_config.num_gpus))

    device = torch.device('cuda' if use_cuda else 'cpu')
    model = CNNModel().to(device)
    optimizer = optim.Adam(model.parameters(), lr=train_config.lr)
    best_train_loss = 0

    for epoch in range(1, train_config.num_epochs + 1):
        model.train()
        train_loss = 0

        for batch_idx, sample_batch in enumerate(train_loader):
            pdb = sample_batch['pdb']
            x = sample_batch['pocket']
            y_true = sample_batch['label']

            x, y_true = x.to(device), y_true.to(device)
            x, y_true = Variable(x), Variable(y_true)

            optimizer.zero_grad()
            output = model(x)

            loss = F.mse_loss(output, y_true)
            train_loss += loss.data[0]
            loss.backward()

            optimizer.step()

            if batch_idx % train_config.log_train_freq == 0:
                logger.info("Train epoch: {}, Loss: {:.06f}"
                             .format(epoch, loss.data[0]))

        if train_loss < best_train_loss:
            utils.save_model(model, train_config.model_dir, logger)
Пример #6
0
                                                  'mnist_m_train'),
                           data_list=train_list,
                           transform=img_transform_target)

dataloader_target = torch.utils.data.DataLoader(dataset=dataset_target,
                                                batch_size=batch_size,
                                                shuffle=True,
                                                num_workers=8)

# load model

my_net = CNNModel()

# setup optimizer

optimizer = optim.Adam(my_net.parameters(), lr=lr)

loss_class = torch.nn.NLLLoss()
loss_domain = torch.nn.NLLLoss()

if cuda:
    my_net = my_net.cuda()
    loss_class = loss_class.cuda()
    loss_domain = loss_domain.cuda()

for p in my_net.parameters():
    p.requires_grad = True

# training
best_accu_s = 0.0
for epoch in range(n_epoch):
Пример #7
0
# model_ft = model_ft.to(device)
model_conv = models.resnet18(pretrained=True)
model_conv.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
for p in model_conv.parameters():
    p.requires_grad = True
model_conv = nn.Sequential(*list(model_conv.children())[:-1])
model_conv = model_conv.cuda()

criterion_1 = nn.CrossEntropyLoss()
criterion_2 = nn.CrossEntropyLoss()

my_model = CNNModel()
my_model = my_model.cuda()
# Observe that all parameters are being optimized
optimizer_ft = optim.Adam(my_model.parameters(), lr=0.1, weight_decay=1e-1)

# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)

model_ft = train_model(my_model,
                       criterion_1,
                       criterion_2,
                       optimizer_ft,
                       exp_lr_scheduler,
                       num_epochs=50,
                       alpha=-1)

# visualize_model(model_ft)

Пример #8
0
    # Set dataloader
    train_loader = DataLoader(dataset=TensorDataset(torch.FloatTensor(x_train),
                                                    torch.LongTensor(y_train)),
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=4)

    valid_loader = DataLoader(dataset=TensorDataset(torch.FloatTensor(x_valid),
                                                    torch.LongTensor(y_valid)),
                              batch_size=batch_size,
                              shuffle=False,
                              num_workers=4)

    print('Initial CNN model.')
    model = CNNModel(input_dim, output_dim).cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    loss_func = torch.nn.CrossEntropyLoss()

    print('Start training.')

    best_ed = 999
    early_stop_cnt = 0

    for epoch in range(1, epochs + 1):

        print('Epoch: {}/{}'.format(epoch, epochs))

        total_loss, total_acc, nonzeros = 0, 0, 0

        widgets = [
            FormatLabel(''), ' ',
Пример #9
0
def main(args):

    cuda = True
    cudnn.benchmark = True
    # data_root = '/home/weiyuhua/Challenge2020/Data/DG'
    data_root = '/home/yin/code/weiyuhua/Challenge2020/Data/DG'

    model_root = args.model_root
    logs = args.logs
    lr = args.lr
    batch_size = args.batch_size
    n_epoch = args.n_epoch
    unseen_index = args.unseen_index
    val_split = args.val_split

    manual_seed = random.randint(1, 10000)
    random.seed(manual_seed)
    torch.manual_seed(manual_seed)

    tb_dir = os.path.join(logs, 'tb_dir')
    if not os.path.exists(logs):
        os.makedirs(logs)
    if not os.path.exists(model_root):
        os.makedirs(model_root)
    if not os.path.exists(tb_dir):
        os.makedirs(tb_dir)

    # Tensorboard
    train_writer = SummaryWriter(tb_dir + '/train')
    val_writer = SummaryWriter(tb_dir + '/valid')
    test_writer = SummaryWriter(tb_dir + '/test')

    # get train, val and test datasets
    D = GetDataset(data_root, unseen_index, val_split)
    train_datasets, val_datasets, test_dataset = D.get_datasets()

    # get dataloaders
    train_dataloaders = []
    for train_dataset in train_datasets:
        train_dataloader = DataLoader(dataset=train_dataset,
                                      batch_size=batch_size,
                                      shuffle=True,
                                      num_workers=8)
        train_dataloaders.append(train_dataloader)

    val_dataloaders = []
    for val_dataset in val_datasets:
        val_dataloader = DataLoader(dataset=val_dataset,
                                    batch_size=batch_size,
                                    shuffle=False,
                                    num_workers=8)
        val_dataloaders.append(val_dataloader)

    test_dataloader = DataLoader(dataset=test_dataset,
                                 batch_size=batch_size,
                                 shuffle=False,
                                 num_workers=8)

    # load model
    my_net = CNNModel()

    # setup optimizer

    optimizer = optim.Adam(my_net.parameters(), lr=lr)

    loss_class = torch.nn.NLLLoss()
    loss_domain = torch.nn.NLLLoss()

    if cuda:
        my_net = my_net.cuda()
        loss_class = loss_class.cuda()
        loss_domain = loss_domain.cuda()

    for p in my_net.parameters():
        p.requires_grad = True

    # training
    best_accu_val = 0.0
    for epoch in range(n_epoch):

        len_dataloader = np.min(
            np.array([
                len(train_dataloaders[i])
                for i in range(len(train_dataloaders))
            ]))

        data_train_iters = []
        for train_dataloader in train_dataloaders:
            data_train_iter = iter(train_dataloader)
            data_train_iters.append(data_train_iter)

        for i in range(len_dataloader):

            p = float(i + epoch * len_dataloader) / n_epoch / len_dataloader
            alpha = 2. / (1. + np.exp(-10 * p)) - 1

            err_label_s = []
            err_domain_s = []

            # err_label_all = torch.tensor(0.0)
            # err_domain_all = torch.tensor(0.0)
            err_label_all = 0
            err_domain_all = 0

            # training model using multi-source data
            for j, data_train_iter in enumerate(data_train_iters):
                data_train = data_train_iter.next()
                s_ecg, s_label = data_train

                my_net.zero_grad()
                batch_size = len(s_label)

                domain_label = (torch.ones(batch_size) * j).long()

                if cuda:
                    s_ecg = s_ecg.cuda()
                    s_label = s_label.cuda()
                    domain_label = domain_label.cuda()

                class_output, domain_output = my_net(input_data=s_ecg,
                                                     alpha=alpha)
                err_label = loss_class(class_output, s_label)
                err_domain = loss_domain(domain_output, domain_label)

                err_label_s.append(err_label.data.cpu().numpy())
                err_domain_s.append(err_domain.data.cpu().numpy())
                err_label_all += err_label
                err_domain_all += err_domain

            # err = err_domain_all + err_label_all
            err = err_label_all
            err.backward()
            optimizer.step()

            print('\n')

            for j in range(len(train_dataloaders)):
                print('\r epoch: %d, [iter: %d / all %d], domain: %d, err_label: %f, err_domain: %f' \
                      % (epoch, i + 1, len_dataloader, j + 1, err_label_s[j], err_domain_s[j]))
                # tb training
                train_writer.add_scalar('err_label_%d' % (j), err_label_s[j])
                train_writer.add_scalar('err_domain_%d' % (j), err_domain_s[j])

            torch.save(my_net,
                       '{0}/model_epoch_current.pth'.format(model_root))

        print('\n')

        ## validation
        val_accus, best_accu_val, val_err_label_s, val_err_domain_s = valid(
            val_dataloaders, model_root, best_accu_val)

        for i in range(len(val_dataloaders)):
            print('\r epoch: %d, Validation, domain: %d, accu: %f' %
                  (epoch, i + 1, val_accus[i]))
            # tb validation
            val_writer.add_scalar('err_label_%d' % (i), val_err_label_s[i])
            val_writer.add_scalar('err_domain_%d' % (i), val_err_domain_s[i])
            val_writer.add_scalar('accu_%d' % (i), val_accus[i])

        ## test
        test_accu, test_err_label = test(test_dataloader,
                                         model_root,
                                         model_best=False)
        test_writer.add_scalar('accu', test_accu)
        test_writer.add_scalar('err_label', test_err_label)

    result_path = os.path.join(logs, 'results.txt')
    print('============ Summary ============= \n')
    for i, train_dataloader in enumerate(train_dataloaders):
        train_accu, train_err_label = test(train_dataloader, model_root)
        write_log(
            'Accuracy of the train dataset %d : %f err_label : %f' %
            (i + 1, train_accu, train_err_label), result_path)

    for i, val_dataloader in enumerate(val_dataloaders):
        val_accu, val_err_label = test(val_dataloader, model_root)
        write_log(
            'Accuracy of the val dataset %d : %f err_label : %f' %
            (i + 1, val_accu, val_err_label), result_path)

    test_accu, test_err_label = test(test_dataloader, model_root)
    write_log(
        'Accuracy of the test dataset %d : %f err_label : %f' %
        (i + 1, test_accu, test_err_label), result_path)
Пример #10
0
from torch.utils.data import DataLoader

from local_config import *
from model import CNNModel
from evaluate import evaluate
from data_helper import trainset, devset, testset, word_vectors, word2id, rel2id


if __name__ == "__main__":

    train_loader = DataLoader(trainset, shuffle=True, batch_size=BATCH_SIZE)
    dev_loader = DataLoader(devset, shuffle=False, batch_size=BATCH_SIZE)
    test_loader = DataLoader(testset, shuffle=False, batch_size=BATCH_SIZE)

    model = CNNModel(torch.tensor(word_vectors, dtype=torch.float32), rel2id)
    optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
    criterion = nn.CrossEntropyLoss()

    model.to(DEVICE)
    criterion.to(DEVICE)

    best_f1_micro = 0.0
    waste_epoch = 0
    for epoch in range(EPOCH):
        running_loss = 0.0
        for i, data in enumerate(train_loader):
            model.train()
            tokens, pos1, pos2, label = data

            tokens = tokens.to(DEVICE)
            pos1 = pos1.to(DEVICE)
Пример #11
0
        shuffle=True,
        num_workers = 3)

valid_dataset = torch.utils.data.TensorDataset(
    valid_data,
    valid_label)
valid_loader = torch.utils.data.DataLoader(
        dataset = valid_dataset,
        batch_size = 512,
        shuffle = True,
        num_workers = 3
)

model = CNNModel().cuda()
#model.load_state_dict(torch.load('models/model.pth'))
optimizer = optim.Adam(model.parameters(), lr = 1e-5)

best = []
W = []
for epoch in range(10):
    model.train()
    for step, (batch_x, batch_y) in enumerate(loader):
        data = batch_x.unsqueeze(1)
        # data = batch_x.unsqueeze(2)#bs, seq, 1      LSTM MODEL UNCOMMENT THIS
        data = (data-torch.mean(data, dim=2, keepdim=True))/torch.std(data, dim=2, keepdim=True)
        # target = batch_y.unsqueeze(2).cuda()        LSTM MODEL UNCOMMENT THIS
        target = batch_y.cuda()      # LSTML MODEL COMMENT THIS
        data = data.cuda()
        target = target.cuda()
        pred = model(data)
        lossB = torch.abs(pred - target).mean()
Пример #12
0
def train_dann(dataset_source, dataset_target, n_epoch, batch_size, in_dim,
               h_dims, out_dim, ckpt_save_path):
    lr = 1e-3
    l_d = 0.1

    dataloader_source = torch.utils.data.DataLoader(
        dataset=dataset_source,
        batch_size=batch_size,
        shuffle=True,
    )

    dataloader_target = torch.utils.data.DataLoader(
        dataset=dataset_target,
        batch_size=batch_size,
        shuffle=True,
    )

    model = CNNModel(in_dim, h_dims, out_dim)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    loss_class = torch.nn.CrossEntropyLoss()
    loss_domain = torch.nn.CrossEntropyLoss()

    if cuda:
        model = model.cuda()
        loss_class = loss_class.cuda()
        loss_domain = loss_domain.cuda()

    for p in model.parameters():
        p.requires_grad = True

    # training
    best_acc = 0.0
    best_ep = 0
    tr_acc_ls = []
    te_acc_ls = []
    loss_ls = []
    for epoch in range(n_epoch):
        model.train()
        len_dataloader = min(len(dataloader_source), len(dataloader_target))
        data_source_iter = iter(dataloader_source)
        data_target_iter = iter(dataloader_target)
        loss_sum = 0.0
        n_s = 0
        for i in range(len_dataloader):
            # Compute reverse layer parameter alpha
            p = float(i + epoch * len_dataloader) / n_epoch / len_dataloader
            alpha = 2. / (1. + np.exp(-10 * p)) - 1

            # training model using source data
            data_s, label_s = data_source_iter.next()
            batch_size_s = len(label_s)
            n_s += batch_size_s
            domain_label = torch.zeros(batch_size_s).long()

            if cuda:
                data_s = data_s.cuda()
                label_s = label_s.cuda()
                domain_label = domain_label.cuda()

            class_output, domain_output = model(input_data=data_s, alpha=alpha)
            loss_c = loss_class(class_output, label_s)
            loss_ds = loss_domain(domain_output, domain_label)

            # training model using target data
            data_t, _ = data_target_iter.next()
            batch_size_t = len(data_t)
            domain_label = torch.ones(batch_size_t).long()

            if cuda:
                data_t = data_t.cuda()
                domain_label = domain_label.cuda()
            _, domain_output = model(input_data=data_t, alpha=alpha)
            loss_dt = loss_domain(domain_output, domain_label)

            # Compute overall loss and backprop
            loss = loss_c + l_d * (loss_dt + loss_ds)
            loss_sum += loss.item() * batch_size_s

            model.zero_grad()
            loss.backward()
            optimizer.step()

            # logger.info('epoch: {:>4}, [iter: {:>4} / all {:>4}], loss {:8.4f}, '
            #             'loss_c: {:8.4f}, loss_ds: {:8.4f}, loss_dt: {:8.4f}\n'
            #             .format(epoch, i+1, len_dataloader, loss.item(), loss_c.item(), loss_ds.item(), loss_dt.item()))

        tr_acc, tr_f1 = evaluate_dann(model, dataset_source, batch_size)
        te_acc, te_f1 = evaluate_dann(model, dataset_target, batch_size)
        tr_acc_ls.append(tr_acc)
        te_acc_ls.append(te_acc)
        loss_ls.append(loss_sum)
        # If find a better result, save the model
        if te_acc > best_acc:
            best_acc = te_acc
            best_ep = epoch
            checkpoint = {"epoch": epoch, "state_dict": model.state_dict()}
            torch.save(checkpoint, ckpt_save_path + '.ckpt')

        logger.info(
            'epoch: {:>4}, loss: {:8.4f}, train acc: {:8.4f}, train f1: {:8.4f},'
            ' eval acc: {:8.4f}, eval f1: {:8.4f}'.format(
                epoch, loss_sum, tr_acc, tr_f1, te_acc, te_f1))

    logger.info('=' * 10)
    logger.info('best epoch: {:>4}, best acc: {:8.4f}'.format(
        best_ep, best_acc))
    pickle.dump(tr_acc_ls, open(ckpt_save_path + '.tracc', 'wb'))
    pickle.dump(te_acc_ls, open(ckpt_save_path + '.teacc', 'wb'))
    pickle.dump(loss_ls, open(ckpt_save_path + '.loss', 'wb'))
Пример #13
0
def train(source, target):
    source_dataset_name = 'source'
    target_dataset_name = 'target'
    model_root = 'models'
    cuda = True
    cudnn.benchmark = True

    manual_seed = random.randint(1, 10000)
    random.seed(manual_seed)
    torch.manual_seed(manual_seed)

    # load model

    my_net = CNNModel()

    # setup optimizer

    optimizer = optim.Adam(my_net.parameters(), lr=lr)

    loss_class = torch.nn.CrossEntropyLoss()
    # torch.nn.AdaptiveLogSoftmaxWithLoss

    loss_domain = torch.nn.CrossEntropyLoss()

    if cuda:
        my_net = my_net.cuda()
        loss_class = loss_class.cuda()
        loss_domain = loss_domain.cuda()

    for p in my_net.parameters():
        p.requires_grad = True

    # training
    best_accu_t = 0.0
    for epoch in range(n_epoch):

        len_dataloader = min(len(source), len(target))
        data_source_iter = iter(source)
        data_target_iter = iter(target)

        for i in range(len_dataloader):

            p = float(i + epoch * len_dataloader) / n_epoch / len_dataloader
            alpha = 2. / (1. + np.exp(-10 * p)) - 1

            # training model using source data
            data_source = data_source_iter.next()
            s_img, s_label = data_source
            s_label = s_label.long()

            my_net.zero_grad()
            batch_size = len(s_label)

            domain_label = torch.zeros(batch_size).long()

            if cuda:
                s_img = s_img.cuda()
                s_label = s_label.cuda()
                domain_label = domain_label.cuda()

            class_output, domain_output = my_net(input_data=s_img, alpha=alpha)
            err_s_label = loss_class(class_output, s_label)
            err_s_domain = loss_domain(domain_output, domain_label)

            # training model using target data
            data_target = data_target_iter.next()
            t_img, _ = data_target

            batch_size = len(t_img)

            domain_label = torch.ones(batch_size).long()

            if cuda:
                t_img = t_img.cuda()
                domain_label = domain_label.cuda()

            _, domain_output = my_net(input_data=t_img, alpha=alpha)
            err_t_domain = loss_domain(domain_output, domain_label)
            # print(err_t_domain, err_s_domain)
            err = mu * (err_t_domain + err_s_domain) + err_s_label
            err.backward()
            optimizer.step()

            # sys.stdout.write('\r epoch: %d, [iter: %d / all %d], err_s_label: %f, err_s_domain: %f, err_t_domain: %f' \
            #                  % (epoch, i + 1, len_dataloader, err_s_label.data.cpu().numpy(),
            #                     err_s_domain.data.cpu().numpy(), err_t_domain.data.cpu().item()))
            # sys.stdout.flush()
            torch.save(my_net, '{0}/SEED_current.pth'.format(model_root))

        # print('\n')
        accu_s = mytest(source_dataset_name)
        # print('Accuracy of the %s dataset: %f' % ('source', accu_s))
        accu_t = mytest(target_dataset_name)
        # print('Accuracy of the %s dataset: %f\n' % ('target', accu_t))
        if accu_t > best_accu_t:
            best_accu_s = accu_s
            best_accu_t = accu_t
            torch.save(my_net, '{0}/SEED_best.pth'.format(model_root))

    print('============ Summary ============= \n')
    print('Accuracy of the %s dataset: %f' % ('source', best_accu_s))
    print('Accuracy of the %s dataset: %f' % ('target', best_accu_t))
    print('Corresponding model was save in ' + model_root + '/SEED_best.pth')
    accu_test = mytest("target")
    print('============ Test ============= \n')
    print('Accuracy of the %s dataset: %f\n' % ('test', accu_test))
    return accu_test
Пример #14
0
                                 batch_size=batch_size,
                                 use_cache=False)
dataloader_target_train = DataLoader(dataset=dataset_target_train,
                                     batch_size=batch_size,
                                     shuffle=True,
                                     num_workers=2)
dataloader_target_valid = DataLoader(dataset=dataset_target_valid,
                                     batch_size=batch_size,
                                     shuffle=True,
                                     num_workers=2)

# load model
model = CNNModel()

# setup optimizer
optimizer = optim.Adam(model.parameters(), lr=lr)

loss_class = torch.nn.CrossEntropyLoss()
loss_domain = torch.nn.CrossEntropyLoss()

if cuda:
    model = model.cuda()
    loss_class = loss_class.cuda()
    loss_domain = loss_domain.cuda()

for p in model.parameters():
    p.requires_grad = True

# training
best_accu_s = 0.
best_accu_t = 0.