Пример #1
0
def main():
    # Check device available
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("Running on: {}".format(device))
    # parse and print arguments
    args = make_args_parser()
    print_args(args)
    # Load both source and target domain datasets
    source_dataloader = datasets.get_source_domain(args.source)
    target_dataloader = datasets.get_target_domain(args.target)
    # Create directory to save model's checkpoints
    try:
        model_root = MODEL_CHECKPOINTS + args.source + '-' + args.target
        os.makedirs(model_root)
    except OSError as e:
        if e.errno == errno.EEXIST:
            pass
        else:
            raise
    # Init model
    net = models.DANN()
    if device == 'cuda':
        net.cuda()
    # Init losses
    class_loss = torch.nn.NLLLoss()
    domain_loss = torch.nn.NLLLoss()
    if device == 'cuda':
        class_criterion.cuda()
        domain_criterion.cuda()
    # Init optimizer
    optimizer = optim.Adam(net.parameters(), lr=constants.LR)
    # Init all parameters to be optimized using Backpropagation
    for param in net.parameters():
        param.requires_grad = True
    # Train model
    for epoch in range(constants.N_EPOCHS):
        train_test.train(net, class_loss, domain_loss, source_dataloader,
                         target_dataloader, optimizer, epoch,
                         model_root, device)
        train_test.test(net, source_dataloader, target_dataloader, device)
Пример #2
0
        writer.writerow(['image_name', 'label'])
        for key in result_dict.keys():
            writer.writerow([key, result_dict[key]])


''' setup GPU '''
torch.cuda.set_device(args.gpu)

''' prepare data_loader '''
data_loader = torch.utils.data.DataLoader(data.TESTDATA(args),
                                              batch_size=args.train_batch,
                                              num_workers=args.workers,
                                              shuffle=False)

''' prepare model '''
feature_extractor, label_predictor, domain_classifier = models.DANN(args)
feature_extractor.cuda(), label_predictor.cuda()

''' resume save model '''
if args.target_domain == 'mnistm':
    feature_extractor.load_state_dict(torch.load('s2m_dann_feature_extractor.pth.tar',map_location='cuda:0'))
    label_predictor.load_state_dict(torch.load('s2m_dann_label_predictor.pth.tar',map_location='cuda:0'))
elif args.target_domain == 'svhn':
    feature_extractor.load_state_dict(torch.load('m2s_dann_feature_extractor.pth.tar',map_location='cuda:0'))
    label_predictor.load_state_dict(torch.load('m2s_dann_label_predictor.pth.tar',map_location='cuda:0'))

saveResult((feature_extractor, label_predictor), data_loader, args.save_folder)



Пример #3
0
def eval():
    parser = argparse.ArgumentParser()
    parser.add_argument('--method', type=str, default='emm')
    parser.add_argument('--dataset', type=str, default='mnist_svhn')
    parser.add_argument(
        '--data_root',
        type=str,
        default='/media/b3-542/196AE2835A1F87B0/HeHai/Dataset/Mnist')
    parser.add_argument('--batch_size', type=int, default=32)
    parser.add_argument('--image_size', type=int, default=28)
    parser.add_argument('--nf', type=int, default=64)
    parser.add_argument('--nepochs', type=int, default=100)
    parser.add_argument('--num_classes', type=int, default=10)
    parser.add_argument('--lr', type=float, default=0.002)
    parser.add_argument('--beta', type=float, default=0.8)
    parser.add_argument('--lr_patience', type=float, default=8)

    opt = parser.parse_args()

    # target_dataset_name = 'mnist_m'
    # target_image_root = os.path.join(opt.data_root, target_dataset_name)
    target_dataset_name = 'svhn'
    opt.data_root = '/media/b3-542/196AE2835A1F87B0/HeHai/Dataset/Mnist/digits'
    target_train_root = os.path.join(opt.data_root, target_dataset_name,
                                     'trainset')

    cudnn.benchmark = True

    # load data
    # source data
    img_transform = transforms.Compose([
        transforms.Resize(opt.image_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])

    # target data
    # train_list = os.path.join(target_image_root, 'mnist_m_train_labels.txt')
    # dataset_target = utils.GetLoader(
    #     data_root=os.path.join(target_image_root, 'mnist_m_train'),
    #     data_list=train_list,
    #     transform=img_transform
    # )

    # target data
    dataset_target = datasets.ImageFolder(
        root=target_train_root,
        transform=img_transform,
    )

    loader = torch.utils.data.DataLoader(dataset=dataset_target,
                                         batch_size=opt.batch_size,
                                         shuffle=True,
                                         num_workers=8)

    if opt.method == 'dann':
        model = models.DANN(opt).cuda()
        model_path = './models/' + opt.dataset + '/best_dann.pth'
    elif opt.method == 'sourceonly':
        model = models.Classifier().cuda()
        model_path = './models/' + opt.dataset + '/best_cls.pth'
    if opt.method == 'emm':
        # model = models.CNNModel().cuda()
        model_path = './models/' + opt.dataset + '/dann.pth'
        model = torch.load(model_path)
    print(model_path)

    # model.load_state_dict(torch.load(model_path))
    model.eval()
    total = 0
    correct = 0

    for i, src_data in enumerate(loader):
        src_image, src_label = src_data
        src_image = src_image.cuda()
        src_image = Variable(src_image, volatile=True)
        if opt.method == 'dann':
            class_out, _ = model(src_image, -1)
        else:  #if opt.method == 'sourceonly':
            class_out, _ = model(src_image, 0)
        _, predicted = torch.max(class_out.data, 1)
        total += src_label.size(0)
        correct += ((predicted == src_label.cuda()).sum())

    val_acc = 100 * float(correct) / total
    print('%s| Test Accuracy: %f %%' % (datetime.datetime.now(), val_acc))

    return val_acc