Exemple #1
0
def test_load_celebA():
    print 'testing data loader'
    BATCH_SIZE = 5
    trainDataset = CELEBA(root=ROOT,
                          train=True,
                          transform=transforms.ToTensor())
    trainLoader = torch.utils.data.DataLoader(trainDataset,
                                              batch_size=BATCH_SIZE,
                                              shuffle=True)

    (x, y) = iter(trainLoader).next()

    assert x.size() == (5, 3, 64, 64)
    assert y.size() == (5, )
Exemple #2
0
        f.close()

    return svm


if __name__ == '__main__':

    opts = get_args()

    #Load data
    print 'Prepare data loaders...'
    transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.RandomHorizontalFlip()])
    trainDataset = CELEBA(root=opts.root,
                          train=True,
                          transform=transforms.ToTensor())
    trainLoader = torch.utils.data.DataLoader(trainDataset,
                                              batch_size=opts.batchSize,
                                              shuffle=True)

    testDataset = CELEBA(root=opts.root,
                         train=False,
                         transform=transforms.ToTensor())
    testLoader = torch.utils.data.DataLoader(testDataset,
                                             batch_size=opts.batchSize,
                                             shuffle=False)
    print 'Data loaders ready.'

    #Create model
    dae = DAE(nz=opts.nz,
Exemple #3
0
    print 'Outputs will be saved to:', exDir
    save_input_args(exDir, opts)

    # Load data (glasses and male labels)
    IM_SIZE = opts.imSize
    print 'Prepare data loader...'
    transform = transforms.Compose([
        transforms.ToPILImage(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    testDataset = CELEBA(
        root=opts.root,
        train=False,
        labels=opts.labels,
        transform=transform,
        Ntest=1000
    )  #most models trained with Ntest=1000, but using 100 to prevent memory errors
    testLoader = torch.utils.data.DataLoader(testDataset,
                                             batch_size=opts.batchSize,
                                             shuffle=False)
    print 'Data loader ready.'

    #Load model
    gen = GEN(imSize=IM_SIZE, nz=opts.nz, fSize=opts.fSize)
    if gen.useCUDA:
        torch.cuda.set_device(opts.gpuNo)
        gen.cuda()
    gen.load_params(opts.exDir, gpuNo=opts.gpuNo)
    gen.eval()
        ####### Save params #######
        gen.save_params(exDir)
        dis.save_params(exDir)

    return gen, dis


if __name__ == '__main__':
    opts = get_args()

    ####### Data set #######
    print 'Prepare data loaders...'
    transform = transforms.Compose([transforms.ToPILImage(), transforms.RandomHorizontalFlip(),\
     transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
    trainDataset = CELEBA(root=opts.root, train=True, transform=transform)
    trainLoader = torch.utils.data.DataLoader(trainDataset,
                                              batch_size=opts.batchSize,
                                              shuffle=True)

    transform = transforms.Compose([ transforms.ToPILImage(), \
     transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
    testDataset = CELEBA(root=opts.root, train=False, transform=transform)
    testLoader = torch.utils.data.DataLoader(testDataset,
                                             batch_size=opts.batchSize,
                                             shuffle=False)
    print 'Data loaders ready.'

    ###### Create model #####
    IM_SIZE = 64