예제 #1
0
파일: function.py 프로젝트: pjirayu/BTDA
def test_model_equal_weight(args):
    target_name_list = ["mnist","mnist_m","usps","synth","svhn"]
    target_name_list.remove(args.source_name)
    # load data
    img_transform = transforms.Compose([
        transforms.Resize(args.image_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])
    
    count = 0
    total_acc = 0.0
    for target_name in target_name_list:
        #target test
        train_list = os.path.join("dataset/image_list/", target_name+'_test.txt')
        dataset_target_test = GetLoader(
            data_root = os.path.join(args.data_root, 'imgs'),
            data_list = train_list,
            transform = img_transform
        )
        dataloader_target_test = torch.utils.data.DataLoader(
            dataset = dataset_target_test,
            batch_size = args.batch_size,
            shuffle = False,
            num_workers = 8)
        total_acc+=test_model(dataloader_target_test,0,args)
        count +=1
    ave_acc = total_acc*1.0/count
    
    
    return ave_acc
예제 #2
0
def load_test_data(dataset_name):
    img_transform = transforms.Compose([
        transforms.Resize(IMAGE_SIZE),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])
    if dataset_name == 'mnist_m':
        test_list = '../dataset/mnist_m/mnist_m_test_labels.txt'
        dataset = GetLoader(
            data_root='../dataset/mnist_m/mnist_m_test',
            data_list=test_list,
            transform=img_transform
        )
    else:
        dataset = datasets.MNIST(
            root=IMG_DIR_SRC,
            train=False,
            transform=img_transform,
            download=True
        )
    dataloader = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=BATCH_SIZE,
        shuffle=False,
        num_workers=8
    )
    return dataloader
def load_data():
    img_transform = transforms.Compose([
        transforms.Resize(IMAGE_SIZE),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])
    dataset_source = datasets.MNIST(root=IMG_DIR_SRC,
                                    train=True,
                                    transform=img_transform,
                                    download=True)
    dataloader_source = torch.utils.data.DataLoader(dataset=dataset_source,
                                                    batch_size=BATCH_SIZE,
                                                    shuffle=True,
                                                    drop_last=True,
                                                    num_workers=8)
    train_list = IMG_DIR_TAR + '/mnist_m_train_labels.txt'
    dataset_target = GetLoader(data_root=IMG_DIR_TAR + '/mnist_m_train',
                               data_list=train_list,
                               transform=img_transform)
    dataloader_target = torch.utils.data.DataLoader(dataset=dataset_target,
                                                    batch_size=BATCH_SIZE,
                                                    shuffle=True,
                                                    drop_last=True,
                                                    num_workers=8)
    return dataloader_source, dataloader_target
예제 #4
0
def test(dataset_name, epoch):
    assert dataset_name in ['mnist', 'mnist_m']

    model_root = os.path.join('..', 'models')
    image_root = os.path.join('..', 'dataset', dataset_name)

    cuda = True
    cudnn.benchmark = True
    batch_size = 128
    image_size = 28
    alpha = 0

    """load data"""

    img_transform = transforms.Compose([
        transforms.RandomResizedCrop(image_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])

    if dataset_name == 'mnist_m':
        test_list = os.path.join(image_root, 'mnist_m_test_labels.txt')

        dataset = GetLoader(
            data_root=os.path.join(image_root, 'mnist_m_test'),
            data_list=test_list,
            transform=img_transform
        )
    else:
        dataset = datasets.MNIST(
            root=image_root,
            train=False,
            transform=img_transform,
        )

    dataloader = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=8
    )

    """ training """

    my_net = torch.load(os.path.join(
        model_root, 'mnist_mnistm_model_epoch_' + str(epoch) + '.pth'
    ))
    my_net = my_net.eval()

    if cuda:
        my_net = my_net.cuda()

    len_dataloader = len(dataloader)
    data_target_iter = iter(dataloader)

    i = 0
    n_total = 0
    n_correct = 0

    while i < len_dataloader:

        # test model using target data
        data_target = data_target_iter.next()
        t_img, t_label = data_target

        batch_size = len(t_label)

        input_img = torch.FloatTensor(batch_size, 3, image_size, image_size)
        class_label = torch.LongTensor(batch_size)

        if cuda:
            t_img = t_img.cuda()
            t_label = t_label.cuda()
            input_img = input_img.cuda()
            class_label = class_label.cuda()

        input_img.resize_as_(t_img).copy_(t_img)
        class_label.resize_as_(t_label).copy_(t_label)
        inputv_img = Variable(input_img)
        classv_label = Variable(class_label)

        class_output, _ = my_net(input_data=inputv_img, alpha=alpha)
        pred = class_output.data.max(1, keepdim=True)[1]
        n_correct += pred.eq(classv_label.data.view_as(pred)).cpu().sum()
        n_total += batch_size

        i += 1

        accu = n_correct * 1.0 / n_total

    print 'epoch: %d, accuracy of the %s dataset: %f' % (epoch, dataset_name, accu)
예제 #5
0
파일: main.py 프로젝트: nkmeng/DANN
dataset_source = datasets.MNIST(
    root=source_image_root,
    train=True,
    transform=img_transform,
)

dataloader_source = torch.utils.data.DataLoader(dataset=dataset_source,
                                                batch_size=batch_size,
                                                shuffle=True,
                                                num_workers=8)

train_list = os.path.join(target_image_root, 'mnist_m_train_labels.txt')

dataset_target = GetLoader(data_root=os.path.join(target_image_root,
                                                  'mnist_m_train'),
                           data_list=train_list,
                           transform=img_transform)

dataloader_target = torch.utils.data.DataLoader(dataset=dataset_target,
                                                batch_size=batch_size,
                                                shuffle=True,
                                                num_workers=8)

# load model

my_net = CNNModel()

# setup optimizer

optimizer = optim.Adam(my_net.parameters(), lr=lr)
예제 #6
0
embedding_file = ""
model_root = os.path.join('models')
cuda = True
cudnn.benchmark = True
lr = 1e-3
batch_size = 64
n_epoch = 100

manual_seed = random.randint(1, 10000)
random.seed(manual_seed)
torch.manual_seed(manual_seed)

####################################################################################
#  Load data
####################################################################################
dataset_source_train = GetLoader(data_fn=source_train,)
dataset_source_eval = GetLoader(data_fn=source_eval,)
dataset_target_train = GetLoader(data_fn=target_train,)

dataloader_source_train = torch.utils.data.DataLoader(dataset=dataset_source_train, batch_size=batch_size, shuffle=True)
dataloader_source_eval = torch.utils.data.DataLoader(dataset=dataset_source_eval, batch_size=batch_size, shuffle=False)
dataloader_target_train = torch.utils.data.DataLoader(dataset=dataset_target_train, batch_size=batch_size, shuffle=True)

####################################################################################
#  Load model
####################################################################################
my_net = CNNModel()


####################################################################################
#  Setup optimizer
예제 #7
0
alpha = 0
##target
img_transform_target = transforms.Compose([
    transforms.Resize(image_size),
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])

target_image_root = "/home/xiec/PycharmProjects/DLCV-HW3/digits/mnistm/"
# target_image_root = "/home/xiec/PycharmProjects/DLCV-HW3/digits/svhn/"
# target_image_root = "/home/xiec/PycharmProjects/DLCV-HW3/digits/usps/"

target_train_list = os.path.join(target_image_root, 'test.csv')

dataset_target = GetLoader(path=os.path.join(target_image_root),
                           transform=img_transform_target,
                           train=False)

dataloader_target = torch.utils.data.DataLoader(
    dataset=dataset_target,
    batch_size=batch_size,
    shuffle=False,
)  #num_workers=6

test_loss = 0
correct = 0
i = 0

for data, target in dataloader_target:
    # if i == 0:
    #     target_add = target
예제 #8
0
def test(net, dataset_name, epoch):
    assert dataset_name in ['mnist', 'mnist_m']

    model_root = os.path.join('models')
    image_root = os.path.join('dataset', dataset_name)

    cuda = True
    cudnn.benchmark = True
    batch_size = 128
    image_size = 28
    alpha = 0
    """load data"""

    img_transform_source = transforms.Compose([
        transforms.Resize(image_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.1307, ), std=(0.3081, ))
    ])

    img_transform_target = transforms.Compose([
        transforms.Resize(image_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])

    if dataset_name == 'mnist_m':
        test_list = os.path.join(image_root, 'mnist_m_test_labels.txt')

        dataset = GetLoader(data_root=os.path.join(image_root, 'mnist_m_test'),
                            data_list=test_list,
                            transform=img_transform_target)
    else:
        dataset = datasets.MNIST(
            root=image_root,
            train=False,
            transform=img_transform_source,
        )

    dataloader = torch.utils.data.DataLoader(dataset=dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=0)

    net.eval()

    len_dataloader = len(dataloader)
    data_target_iter = iter(dataloader)

    i = 0
    n_total = 0
    n_correct = 0

    while i < len_dataloader:

        data_target = data_target_iter.next()
        t_img, t_label = data_target
        batch_size = len(t_label)

        if cuda:
            t_img = t_img.cuda()
            t_label = t_label.cuda()

        class_output, _ = net(input_data=t_img, alpha=alpha)
        pred = class_output.data.max(1, keepdim=True)[1]

        n_correct += pred.eq(t_label.data.view_as(pred)).cpu().sum()
        n_total += batch_size

        i += 1

    accu = n_correct.double() * 1.0 / n_total

    print('epoch: %d, accuracy of the %s dataset: %f' %
          (epoch, dataset_name, accu))
예제 #9
0
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    'val':
    transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
}

train_list = os.path.join('/1116', 'SUN', 'train_label.txt')
dataset_source = GetLoader(data_root=os.path.join(source_image_root, 'train'),
                           data_list=train_list,
                           transform=data_transforms['train'])
dataloader_source = torch.utils.data.DataLoader(dataset=dataset_source,
                                                batch_size=batch_size,
                                                shuffle=True,
                                                num_workers=8)

dataset_target = GetLoader(data_root=os.path.join(target_image_root, 'train'),
                           data_list=train_list,
                           transform=data_transforms['train'])
dataloader_target = torch.utils.data.DataLoader(dataset=dataset_target,
                                                batch_size=batch_size,
                                                shuffle=True,
                                                num_workers=8)

# for k in dataloader_target:
예제 #10
0
파일: madan.py 프로젝트: rs-dl/MADAN
    transforms.Resize(image_size),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

img_transform_target = transforms.Compose([
    transforms.Resize(image_size),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])

source_list = os.path.join(source_image_root,
                           '{}_train_labels.txt'.format(source_dataset_name))

dataset_source = GetLoader(data_root=os.path.join(source_image_root, 'train'),
                           data_list=source_list,
                           transform=img_transform_source)

dataloader_source = torch.utils.data.DataLoader(dataset=dataset_source,
                                                batch_size=batch_size,
                                                shuffle=True)

target_list = os.path.join(target_image_root,
                           '{}_train_labels.txt'.format(target_dataset_name))

dataset_target = GetLoader(data_root=os.path.join(target_image_root, 'train'),
                           data_list=target_list,
                           transform=img_transform_target)

dataloader_target = torch.utils.data.DataLoader(dataset=dataset_target,
                                                batch_size=batch_size,
예제 #11
0
def test(dataset_name, epoch):
    assert dataset_name in ['MNIST', 'mnist_m']

    model_root = 'models'
    image_root = os.path.join('dataset', dataset_name)

    # Hyperparameter setting

    cuda = True
    cudnn.benchmark = True
    batch_size = 128
    image_size = 28
    alpha = 0

    # Load data
    img_transform_source = transforms.Compose([
        transforms.Resize(image_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.1307, ), std=(0.3081, ))
    ])

    img_transform_target = transforms.Compose([
        transforms.Resize(image_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
    ])

    if dataset_name == 'mnist_m':
        test_list = os.path.join(image_root, 'mnist_m_test_labels.txt')

        dataset = GetLoader(data_root=os.path.join(image_root, 'mnist_m_test'),
                            data_list=test_list,
                            transform=img_transform_target)
    else:
        dataset = datasets.MNIST(
            root='dataset',
            train=False,
            transform=img_transform_source,
        )

    dataloader = torch.utils.data.DataLoader(dataset=dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=8)

    # Training (testing) data

    my_net = torch.load(
        os.path.join(model_root,
                     'mnist_mnistm_model_epoch_' + str(epoch) + '.pth'))
    my_net = my_net.eval()

    if cuda:
        my_net = my_net.cuda()

    len_dataloader = len(dataloader)
    data_target_iter = iter(dataloader)

    i = 0
    n_total = 0
    n_correct = 0

    while i < len_dataloader:
        data_target = data_target_iter.next()
        t_img, t_label = data_target

        batch_size = len(t_label)

        input_img = torch.FloatTensor(batch_size, 3, image_size, image_size)
        class_label = torch.LongTensor(batch_size)

        if cuda:
            t_img = t_img.cuda()
            t_label = t_label.cuda()
            input_img = input_img.cuda()
            class_label = class_label.cuda()

        input_img.resize_as_(t_img).copy_(t_img)
        class_label.resize_as_(t_label).copy_(t_label)

        #alpha = 0, no domain classifier
        class_output, _ = my_net(input_data=input_img, alpha=alpha)
        pred = class_output.data.max(1, keepdim=True)[1]
        n_correct = n_correct + pred.eq(class_label.view_as(pred)).cpu().sum()
        n_total = n_total + batch_size

        i = i + 1

    acc = n_correct.data.numpy() * 1.0 / n_total

    print('epoch: %d, accuracy of the %s dataset: %f' %
          (epoch, dataset_name, acc))
    return acc
예제 #12
0
#     download=True
# )
#
# dataloader_source = torch.utils.data.DataLoader(
#     dataset=dataset_source,
#     batch_size=batch_size,
#     shuffle=True,
#     num_workers=6)

##source
source_image_root = "/home/xiec/PycharmProjects/DLCV-HW3/digits/mnistm/"

source_train_list = os.path.join(source_image_root, 'train.csv')

dataset_source = GetLoader(path=os.path.join(source_image_root),
                           transform=img_transform_source,
                           train=True)

dataloader_source = torch.utils.data.DataLoader(
    dataset=dataset_source,
    batch_size=batch_size,
    shuffle=True,
)  #num_workers=6

##target
target_image_root = "/home/xiec/PycharmProjects/DLCV-HW3/digits/svhn/"

target_train_list = os.path.join(target_image_root, 'train.csv')

dataset_target = GetLoader(path=os.path.join(target_image_root),
                           transform=img_transform_target,
예제 #13
0
파일: test_local.py 프로젝트: rs-dl/MADAN
def test(dataset_name, epoch, domain, my_net_path, cla_net_path):
    assert dataset_name in ['0', '1', '3', '4']

    model_root = os.path.join('.', 'models', 'oilPalm', domain)
    image_root = os.path.join('.', 'dataset', dataset_name)

    cuda = True
    cudnn.benchmark = True
    batch_size = 128
    image_size = 17
    alpha = 0
    """load data"""

    img_transform_source = transforms.Compose([
        transforms.Resize(image_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    img_transform_target = transforms.Compose([
        transforms.Resize(image_size),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    test_list = os.path.join(image_root,
                             '{}_test_labels.txt'.format(dataset_name))

    dataset = GetLoader(data_root=os.path.join(image_root, 'test'),
                        data_list=test_list,
                        transform=img_transform_target)

    dataloader = torch.utils.data.DataLoader(dataset=dataset,
                                             batch_size=batch_size,
                                             shuffle=False,
                                             num_workers=8)
    """ training """

    my_net = torch.load(my_net_path)
    my_net = my_net.eval()
    cla_net = torch.load(cla_net_path)
    cla_net = cla_net.eval()

    if cuda:
        my_net = my_net.cuda()
        cla_net = cla_net.cuda()

    len_dataloader = len(dataloader)
    data_target_iter = iter(dataloader)

    i = 0
    n_total = 0
    n_correct = 0

    while i < len_dataloader:

        # test model using target data
        data_target = data_target_iter.next()
        t_img, t_label = data_target

        batch_size = len(t_label)

        input_img = torch.FloatTensor(batch_size, 3, image_size, image_size)
        class_label = torch.LongTensor(batch_size)

        if cuda:
            t_img = t_img.cuda()
            t_label = t_label.cuda()
            input_img = input_img.cuda()
            class_label = class_label.cuda()

        input_img.resize_as_(t_img).copy_(t_img)
        class_label.resize_as_(t_label).copy_(t_label)

        feature = my_net(input_data=input_img, alpha=alpha)
        class_output = cla_net(feature)
        class_output_softmax = nn.Softmax(dim=1)(class_output)
        pred = class_output.data.max(1, keepdim=True)[1]
        n_correct += pred.eq(class_label.data.view_as(pred)).cpu().sum()
        n_total += batch_size

        i += 1

    accu = n_correct.data.numpy() * 1.0 / n_total

    print('epoch: %d, accuracy of the %s dataset: %f' %
          (epoch, dataset_name, accu))
    return accu