示例#1
0
# initialize discriminator
netD = GAN.Discriminator(args.nz, args.n_hidden)
print("Discriminator loaded")

# initialize generator
netG = GAN.Generator(args.nz, args.n_hidden)
print("Generator loaded")

if torch.cuda.is_available():
    netD.cuda()
    netG.cuda()
    print("Using GPU")

# load data
loader = utils.setup_data_loaders(args.batch_size, args.source_data_file,
                                  args.target_data_file)
print('Data loaded')
sys.stdout.flush()

# setup optimizers
G_opt = optim.Adam(list(netG.parameters()), lr=args.lrG)
D_opt = optim.Adam(list(netD.parameters()), lr=args.lrD)

# loss criteria
logsigmoid = nn.LogSigmoid()
mse = nn.MSELoss(reduce=False)
LOG2 = Variable(torch.from_numpy(np.ones(1) * np.log(2)).float())
print(LOG2)
if torch.cuda.is_available():
    LOG2 = LOG2.cuda()
示例#2
0
# initialize discriminator
netD = GAN.Discriminator(args.nz, args.n_hidden)
print("Discriminator loaded")

# initialize generator
netG = GAN.Generator(args.nz, args.n_hidden)
netGS = GAN.Generator_Scale(args.nz, args.n_hidden)
print("Generator loaded")

if torch.cuda.is_available():
    netD.cuda()
    netG.cuda()
    netGS.cuda()

# load data
loader = utils.setup_data_loaders(args.batch_size)
print('Data loaded')
sys.stdout.flush()

# setup optimizers
G_opt = optim.Adam(list(netG.parameters()), lr=args.lrG)
D_opt = optim.Adam(list(netD.parameters()), lr=args.lrD)
GS_opt = optim.Adam(list(netGS.parameters()), lr=args.lrG)

# loss criteria
logsigmoid = nn.LogSigmoid()
mse = nn.MSELoss(reduce=False)
LOG2 = Variable(torch.from_numpy(np.ones(1) * np.log(2)).float())
print(LOG2)
if torch.cuda.is_available():
    LOG2 = LOG2.cuda()
示例#3
0
    options.add_argument('-bs', action="store", dest="batch_size", default = 128, type = int)
    options.add_argument('-env', action="store", dest="env", default="VAE_MNIST_USPS")

    options.add_argument('-iter', action="store", dest="max_iter", default = 200, type = int)
    options.add_argument('-lr', action="store", dest="lr", default=1e-3, type = float)
    options.add_argument('-nz', action="store", dest="nz", default=20, type = int)
    options.add_argument('-lamb', action="store", dest="lamb", default=0.001, type = float)

    return options.parse_args()

args = setup_args()
print(args)
sys.stdout.flush()

# retrieve dataloaders
train_loader, test_loader = utils.setup_data_loaders(args.batch_size)
print('Data loaded')

model = AENet.VAE(nc=1, latent_size=args.nz)
if args.pretrained_file is not None:
    model.load_state_dict(torch.load(args.pretrained_file))
    print("Pre-trained model loaded")
    sys.stdout.flush()

if torch.cuda.is_available():
    print('Using GPU')
    model.cuda()

optimizer = optim.Adam([
    {'params': model.parameters()}],
    lr = args.lr)