Beispiel #1
0
if opt.dataset == 'mnist':
    os.makedirs('data/mnist', exist_ok=True)
    dataloader_x = torch.utils.data.DataLoader(
        dset.MNIST('data/mnist', train=True, download=True,
                       transform=transforms.Compose([
                           transforms.Resize(opt.imageSize),
                           transforms.ToTensor(),
                           transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
                       ])),
        batch_size=opt.batchSize, shuffle=True)
    
    os.makedirs('data/mnistm', exist_ok=True)
    dataloader_y = torch.utils.data.DataLoader(
        mnistm.MNISTM('data/mnistm', train=True, download=True,
                       transform=transforms.Compose([
                           transforms.Resize(opt.imageSize),
                           transforms.ToTensor(),
                           transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
                       ])),
        batch_size=opt.batchSize, shuffle=True)
#
elif opt.dataset == 'photo-monet':
    dataset1 = dset.ImageFolder(root=opt.dataroot+'/train_monet',
                        transform=transforms.Compose([
                            transforms.Scale(opt.imageSize),
                            transforms.CenterCrop(opt.imageSize),
                            transforms.ToTensor(),
                            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                        ]))
    dataset2 = dset.ImageFolder(root=opt.dataroot+'/train_photo',
                        transform=transforms.Compose([
                            transforms.Scale(opt.imageSize),
Beispiel #2
0
    train=True,
    download=True,
    transform=transforms.Compose([
        transforms.Resize(opt.img_size),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])),
                                          batch_size=opt.batch_size,
                                          shuffle=True)

os.makedirs('../../data/mnistm', exist_ok=True)
dataloader2 = torch.utils.data.DataLoader(mnistm.MNISTM(
    '../../data/mnistm',
    train=True,
    download=True,
    transform=transforms.Compose([
        transforms.Resize(opt.img_size),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])),
                                          batch_size=opt.batch_size,
                                          shuffle=True)

# Optimizers
optimizer_G = torch.optim.Adam(coupled_generators.parameters(),
                               lr=opt.lr,
                               betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(coupled_discriminators.parameters(),
                               lr=opt.lr,
                               betas=(opt.b1, opt.b2))
Beispiel #3
0
    transforms.ToTensor(),
    transforms.Normalize([0.5], [0.5])
    # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

# Loading dataset

train_data = datasets.MNIST("~/Desktop/Datasets/", transform=transform)
train_loader = torch.utils.data.DataLoader(train_data, **kwargs)

train_loader2 = torch.utils.data.DataLoader(
    mnistm.MNISTM(
        "~/Desktop/Datasets/",
        train=True,
        download=True,
        transform=transforms.Compose([
            transforms.Resize(ndf),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ]),
    ),
    batch_size=args.batch_size,
    shuffle=True,
)


# Initialize weights
def weight_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        torch.nn.init.normal_(m.weight, 0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
Beispiel #4
0
adversarial_loss = nn.MSELoss()

# Initialize models
coupled_generators = CoupledGenerators()
coupled_discriminators = CoupledDiscriminators()

print(coupled_generators)
print(coupled_discriminators)

transform = transform.Compose([
    transform.Resize(opt.img_size),
    transform.ImageNormalize(mean=[0.5], std=[0.5]),
])
dataloader1 = MNIST(train=True, transform=transform).set_attrs(batch_size=opt.batch_size, shuffle=True)

dataloader2 = mnistm.MNISTM(mnist_root = "../../data/mnistm", train=True, transform = transform).set_attrs(batch_size=opt.batch_size, shuffle=True)

# Optimizers
optimizer_G = nn.Adam(coupled_generators.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = nn.Adam(coupled_discriminators.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))

# ----------
#  Training
# ----------
for epoch in range(opt.n_epochs):
    for i, ((imgs1, _), (imgs2, _)) in enumerate(zip(dataloader1, dataloader2)):
        jt.sync_all(True)
        batch_size = imgs1.shape[0]

        # Adversarial ground truths
        valid = jt.ones([batch_size, 1]).float32().stop_grad()
                transforms.Resize(args.img_size),
                transforms.ToTensor(),
                transforms.Normalize([0.5], [0.5])
            ]),
        ),
        batch_size=args.batch_size,
        shuffle=True,
    )

    os.makedirs("../../data/mnistm", exist_ok=True)
    dataloader2 = torch.utils.data.DataLoader(
        mnistm.MNISTM(
            "../../data/mnistm",
            train=True,
            download=True,
            transform=transforms.Compose([
                transforms.Resize(args.img_size),
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            ]),
        ),
        batch_size=args.batch_size,
        shuffle=True,
    )

    lr = args.lr
    b1, b2 = args.b1, args.b2

    # Optimizers
    optimizer_G = torch.optim.Adam(coupled_generators.parameters(),
                                   lr=lr,
                                   betas=(b1, b2))