Esempio n. 1
0
def test(dataset_name, epoch):
    image_root = os.path.join('../../hw3_data/digits/', dataset_name)
    cuda = True if torch.cuda.is_available() else False

    cudnn.benchmark = True
    alpha = 0
    """load data"""
    dataset = GetLoader(img_root=os.path.join(image_root, 'test'),
                        label_path=os.path.join(image_root, 'test.csv'),
                        transform=transforms.Compose([
                            transforms.ToTensor(),
                            transforms.Normalize(mean=(0.5, 0.5, 0.5),
                                                 std=(0.5, 0.5, 0.5))
                        ]))
    dataloader = torch.utils.data.DataLoader(dataset=dataset,
                                             batch_size=consts.batch_size,
                                             shuffle=False,
                                             num_workers=8)
    """ training """
    my_net = torch.load(os.path.join('models', 'epoch_' + str(epoch) + '.pth'))
    my_net = my_net.eval()
    if cuda:
        my_net = my_net.cuda()

    n_total = 0
    n_correct = 0

    for i, (t_img, t_label) in enumerate(dataloader):
        batch_size = len(t_label)
        input_img = torch.FloatTensor(batch_size, 3, consts.image_size,
                                      consts.image_size)
        class_label = torch.LongTensor(batch_size)

        if cuda:
            t_img = t_img.cuda()
            t_label = t_label.cuda()
            input_img = input_img.cuda()
            class_label = class_label.cuda()

        input_img.resize_as_(t_img).copy_(t_img)
        class_label.resize_as_(t_label).copy_(t_label)

        class_output, _ = my_net(input_data=input_img, alpha=alpha)
        pred = class_output.data.max(1, keepdim=True)[1]
        n_correct += pred.eq(class_label.data.view_as(pred)).cpu().sum()
        n_total += batch_size
    accu = n_correct.data.numpy() * 1.0 / n_total
    print('epoch: %d, accuracy of the %s dataset: %f' %
          (epoch, dataset_name, accu))
Esempio n. 2
0
                        help='tensorboard log path')
    parser.add_argument('--checkpoint_path',
                        type=str,
                        default=None,
                        help='restore from a checkpoint')
    opt = parser.parse_args()

    if not os.path.exists('./weights/'):
        os.makedirs('./weights')

    if not os.path.exists('./log/'):
        os.makedirs('./log/')

    train_data_root = '../data/'
    batch_size = opt.batch_size
    trainset = GetLoader(train_data_root, 'trainset')
    trainloader = torch.utils.data.DataLoader(dataset=trainset,
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=4,
                                              collate_fn=collate_fn)

    test_data_root = '../data/'
    testset = GetLoader(test_data_root, 'testset')
    testloader = torch.utils.data.DataLoader(dataset=testset,
                                             batch_size=2,
                                             shuffle=False,
                                             num_workers=4,
                                             collate_fn=collate_fn)

    G = Generator(32, 256, 512, 32).to(device)
Esempio n. 3
0
])

transform2 = transforms.Compose([
    transforms.Resize(32),
    transforms.Grayscale(),
    transforms.ToTensor(),
    transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
    transforms.Normalize(mean, std)
])

#########################
# if training mnistm to svhn please use transform2 on target_val & target_train
#########################

source_train = GetLoader(img_root=os.path.join(source_image_root, "train"),
                         label_path=os.path.join(source_image_root,
                                                 'train.csv'),
                         transform=transform)

target_val = GetLoader(img_root=os.path.join(target_image_root, "test"),
                       label_path=os.path.join(target_image_root, 'test.csv'),
                       transform=transform)

target_train = GetLoader(img_root=os.path.join(target_image_root, "train"),
                         label_path=os.path.join(target_image_root,
                                                 'train.csv'),
                         transform=transform)

source_trainloader = torch.utils.data.DataLoader(source_train,
                                                 batch_size=consts.batch_size,
                                                 shuffle=True,
                                                 num_workers=consts.workers,
Esempio n. 4
0
    model_path, image_dir_s, label_path_s ,image_dir_t, label_path_t = sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]
    cudnn.benchmark = True
    cuda = True if torch.cuda.is_available() else False

    ######################################################################
    # load data
    ######################################################################
    mean = np.array([0.44, 0.44, 0.44])
    std = np.array([0.19, 0.19, 0.19])

    transform = transforms.Compose([transforms.Resize(32), transforms.ToTensor(), transforms.Normalize(mean,std)])

    dataset_s = GetLoader(
        img_root=image_dir_s,
        label_path=label_path_s,
        transform=transform
    )
    print('# images in dataset:', len(dataset_s))
    dataloader_s = torch.utils.data.DataLoader(
        dataset=dataset_s,
        batch_size=consts.batch_size,
        shuffle=True,
        num_workers=consts.workers
    )
    sample_batch_s = next(iter(dataloader_s))
    print('Image tensor in each batch:', sample_batch_s[0].shape, sample_batch_s[0].dtype)
    print('Label tensor in each batch:', sample_batch_s[1].shape, sample_batch_s[0].dtype)

    dataset_t = GetLoader(
        img_root=image_dir_t,
Esempio n. 5
0
lrD = 0.0002
beta1 = 0.5
beta2 = 0.999
lambda_l = 100 

train_list = os.path.join('.','data','cityscapes','train.txt')
val_list = os.path.join('.','data','cityscapes','val.txt')

transform = transforms.Compose([
     transforms.ToTensor(),
    transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])

trainset = GetLoader(
	data_root=os.path.join('.','data','cityscapes','train'),
	data_list= train_list,
	transform1=transform,
	transform2=transform
)

valset = GetLoader(
	data_root=os.path.join('.','data','cityscapes','val'),
	data_list= val_list,
	transform1=transforms.ToTensor(),
	transform2=transforms.ToTensor()
)

trainloader = torch.utils.data.DataLoader(
    dataset=trainset,
    batch_size=batch_size,
    shuffle=True,
    num_workers=4)
Esempio n. 6
0
os.makedirs("result", exist_ok=True)

model_path, image_dir_s, label_path_s, image_dir_t, label_path_t = sys.argv[
    1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]
cuda = True if torch.cuda.is_available() else False
cudnn.benchmark = True
alpha = 0

######################################################################
# load data
######################################################################
dataset_s = GetLoader(img_root=image_dir_s,
                      label_path=label_path_s,
                      transform=transforms.Compose([
                          transforms.ToTensor(),
                          transforms.Normalize(mean=(0.5, 0.5, 0.5),
                                               std=(0.5, 0.5, 0.5))
                      ]))
print('# images in dataset:', len(dataset_s))
dataloader_s = torch.utils.data.DataLoader(dataset=dataset_s,
                                           batch_size=consts.batch_size,
                                           shuffle=True,
                                           num_workers=consts.workers)
sample_batch_s = next(iter(dataloader_s))
print('Image tensor in each batch:', sample_batch_s[0].shape,
      sample_batch_s[0].dtype)
print('Label tensor in each batch:', sample_batch_s[1].shape,
      sample_batch_s[0].dtype)

dataset_t = GetLoader(img_root=image_dir_t,
Esempio n. 7
0
                           transform=transform)
testloader_m = torch.utils.data.DataLoader(testset_m,
                                           batch_size=batch_size,
                                           shuffle=True)

#MNIST_M
transform_mm = transforms.Compose([
    transforms.Resize(image_size),
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])

train_list = os.path.join('.', 'mnist_m', 'mnist_m_train_labels.txt')
test_list = os.path.join('.', 'mnist_m', 'mnist_m_test_labels.txt')
trainset_mm = GetLoader(data_root=os.path.join('.', 'mnist_m',
                                               'mnist_m_train'),
                        data_list=train_list,
                        transform=transform_mm)
trainloader_mm = torch.utils.data.DataLoader(dataset=trainset_mm,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             num_workers=4)
testset_mm = GetLoader(data_root=os.path.join('.', 'mnist_m', 'mnist_m_test'),
                       data_list=test_list,
                       transform=transform_mm)
testloader_mm = torch.utils.data.DataLoader(dataset=testset_mm,
                                            batch_size=batch_size,
                                            shuffle=True,
                                            num_workers=4)

#target
#dataiter_mm = iter(trainloader_mm)