def main():
    # init random seed
    init_random_seed(params.manual_seed)

    # Load dataset
    mnist_data_loader = get_mnist(train=True, download=True)
    mnist_data_loader_eval = get_mnist(train=False, download=True)
    usps_data_loader = get_usps(train=True, download=True)
    usps_data_loader_eval = get_usps(train=False, download=True)

    # Model init Revgard
    tgt_encoder = model_init(Encoder(), params.tgt_encoder_revgrad_path)
    critic = model_init(Discriminator(), params.disc_revgard_path)
    clf = model_init(Classifier(), params.clf_revgrad_path)

    # Train models
    print("====== Training source encoder and classifier in MNIST and USPS domains ======")
    if not (tgt_encoder.pretrained and clf.pretrained and critic.pretrained and params.model_trained):
        tgt_encoder, clf, critic = train_revgrad(tgt_encoder, clf, critic,
                                                 mnist_data_loader, usps_data_loader, robust=False)

    # Eval target encoder on test set of target dataset
    print("====== Evaluating classifier for encoded MNIST and USPS domain ======")
    print("-------- MNIST domain --------")
    eval_tgt(tgt_encoder, clf, mnist_data_loader_eval)
    print("-------- USPS adaption --------")
    eval_tgt(tgt_encoder, clf, usps_data_loader_eval)
Пример #2
0
def main():
    # init random seed
    init_random_seed(params.manual_seed)

    # Load dataset
    svhn_data_loader = get_svhn(split='train', download=True)
    svhn_data_loader_eval = get_svhn(split='test', download=True)
    mnist_data_loader = get_mnist(train=True, download=True)
    mnist_data_loader_eval = get_mnist(train=False, download=True)

    # Model init WDGRL
    tgt_encoder = model_init(Encoder(), params.encoder_wdgrl_path)
    critic = model_init(Discriminator(in_dims=params.d_in_dims,
                                      h_dims=params.d_h_dims,
                                      out_dims=params.d_out_dims),
                                        params.disc_wdgrl_path)
    clf = model_init(Classifier(), params.clf_wdgrl_path)

    # Train critic to optimality
    print("====== Training critic ======")
    if not (critic.pretrained and params.model_trained):
        critic = train_critic_wdgrl(tgt_encoder, critic, svhn_data_loader, mnist_data_loader)

    # Train target encoder
    print("====== Training encoder for both SVHN and MNIST domains ======")
    if not (tgt_encoder.pretrained and clf.pretrained and params.model_trained):
        tgt_encoder, clf = train_tgt_wdgrl(tgt_encoder, clf, critic,
                                     svhn_data_loader, mnist_data_loader, robust=False)

    # Eval target encoder on test set of target dataset
    print("====== Evaluating classifier for encoded SVHN and MNIST domains ======")
    print("-------- SVHN domain --------")
    eval_tgt(tgt_encoder, clf, svhn_data_loader_eval)
    print("-------- MNIST adaption --------")
    eval_tgt(tgt_encoder, clf, mnist_data_loader_eval)
def main():
    # init random seed
    init_random_seed(params.manual_seed)

    # Load dataset
    mnist_data_loader = get_mnist(train=True, download=True)
    mnist_data_loader_eval = get_mnist(train=False, download=True)
    usps_data_loader = get_usps(train=True, download=True)
    usps_data_loader_eval = get_usps(train=False, download=True)

    # Model init WDGRL
    tgt_encoder = model_init(Encoder(), params.encoder_wdgrl_rb_path)
    critic = model_init(Discriminator(), params.disc_wdgrl_rb_path)
    clf = model_init(Classifier(), params.clf_wdgrl_rb_path)

    # Train target encoder
    print("====== Robust Training encoder for both MNIST and USPS domains ======")
    if not (tgt_encoder.pretrained and clf.pretrained and params.model_trained):
        tgt_encoder, clf = train_tgt_wdgrl(tgt_encoder, clf, critic,
                                           mnist_data_loader, usps_data_loader, usps_data_loader_eval, robust=True)

    # Eval target encoder on test set of target dataset
    print("====== Evaluating classifier for encoded MNIST and USPS domains ======")
    print("-------- MNIST domain --------")
    eval_tgt_robust(tgt_encoder, clf, mnist_data_loader_eval)
    print("-------- USPS adaption --------")
    eval_tgt_robust(tgt_encoder, clf, usps_data_loader_eval)
def test(model_config):
	mode = 'test'
	batch_size = 1
	dataset = ShakespeareModern(train_shakespeare_path, test_shakespeare_path, train_modern_path, test_modern_path, mode=mode)
	dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False)

	shakespeare_disc = Discriminator(model_config['embedding_size'], model_config['hidden_dim'], len(dataset.vocab)).cuda()
	shakespeare_disc.load_state_dict(torch.load('./shakespeare_disc.pth'))

	shakespeare_disc.eval()

	num_correct = 0
	total_samples = 0

	for idx, (s, s_addn_feats, m, m_addn_feats) in tqdm(enumerate(dataloader)):
		s = s.transpose(0, 1)
		m = m.transpose(0, 1)

		total_samples += 2

		s = Variable(s).cuda()
		s_output = shakespeare_disc(s, s_addn_feats)

		if round(s_output.item()) == 1.0:
			num_correct += 1

		m = Variable(m).cuda()
		m_output = shakespeare_disc(m, m_addn_feats)

		if round(m_output.item()) == 0.0:
			num_correct += 1

	print ('Accuracy: {}'.format(num_correct/total_samples))
def main():
    # init random seed
    init_random_seed(params.manual_seed)

    # Load dataset
    mnist_data_loader = get_usps(train=True, download=True)
    mnist_data_loader_eval = get_usps(train=False, download=True)
    usps_data_loader = get_usps(train=True, download=True)
    usps_data_loader_eval = get_usps(train=False, download=True)

    # Model init ADDA
    src_encoder = model_init(Encoder(), params.src_encoder_adda_rb_path)
    tgt_encoder = model_init(Encoder(), params.tgt_encoder_adda_rb_path)
    critic = model_init(Discriminator(), params.disc_adda_rb_path)
    clf = model_init(Classifier(), params.clf_adda_rb_path)

    # Train source model for adda
    print(
        "====== Robust training source encoder and classifier in MNIST domain ======"
    )
    if not (src_encoder.pretrained and clf.pretrained
            and params.model_trained):
        src_encoder, clf = train_src_robust(src_encoder, clf,
                                            mnist_data_loader)

    # Eval source model
    print("====== Evaluating classifier for MNIST domain ======")
    eval_tgt(src_encoder, clf, mnist_data_loader_eval)

    # Train target encoder
    print("====== Robust training encoder for USPS domain ======")
    # Initialize target encoder's weights with those of the source encoder
    if not tgt_encoder.pretrained:
        tgt_encoder.load_state_dict(src_encoder.state_dict())

    if not (tgt_encoder.pretrained and critic.pretrained
            and params.model_trained):
        tgt_encoder = train_tgt_adda(src_encoder,
                                     tgt_encoder,
                                     clf,
                                     critic,
                                     mnist_data_loader,
                                     usps_data_loader,
                                     usps_data_loader_eval,
                                     robust=True)

    # Eval target encoder on test set of target dataset
    print("====== Ealuating classifier for encoded USPS domain ======")
    print("-------- Source only --------")
    eval_tgt(src_encoder, clf, usps_data_loader_eval)
    print("-------- Domain adaption --------")
    eval_tgt(tgt_encoder, clf, usps_data_loader_eval)
Пример #6
0
def main():
    opt = get_opt()
    print(opt)
    print("Start to train stage: %s" % (opt.stage))

    # create dataset
    if opt.stage == "Shape":
        dataset = PolyDatasetShape(128)
        train_loader = DataLoader(dataset,
                                  batch_size=opt.b,
                                  shuffle=False,
                                  num_workers=opt.j,
                                  drop_last=True,
                                  pin_memory=True)

    elif opt.stage == "Stitch":
        dataset = PolyDatasetStitch(128)
        train_loader = DataLoader(dataset,
                                  batch_size=opt.b,
                                  shuffle=False,
                                  num_workers=opt.j,
                                  drop_last=True,
                                  pin_memory=True)

    elif opt.stage == "Refine":
        dataset = PolyDatasetRefine(128)
        train_loader = DataLoader(dataset,
                                  batch_size=opt.b,
                                  shuffle=False,
                                  num_workers=opt.j,
                                  drop_last=True,
                                  pin_memory=True)
    else:
        sys.exit("Please mention the Stage from [Shape, Stitch, Refine]")

    if not os.path.exists(opt.results):
        os.makedirs(opt.results)
    netG = GeneratorCoarse(opt.input_channel, 3)
    netD = Discriminator()
    # create model & train & save the final checkpoint
    netG.cuda()

    netD.cuda()

    netG.apply(weights_init_normal)
    netD.apply(weights_init_normal)

    train(opt, train_loader, netG, netD)

    print('Finished training %s!' % (opt.stage))
Пример #7
0
def main():
    # init random seed
    init_random_seed(params.manual_seed)

    # Load dataset
    svhn_data_loader = get_svhn(split='train', download=True)
    svhn_data_loader_eval = get_svhn(split='test', download=True)
    mnist_data_loader = get_mnist(train=True, download=True)
    mnist_data_loader_eval = get_mnist(train=False, download=True)

    # Model init DANN
    tgt_encoder = model_init(Encoder(), params.tgt_encoder_dann_rb_path)
    critic = model_init(
        Discriminator(in_dims=params.d_in_dims,
                      h_dims=params.d_h_dims,
                      out_dims=params.d_out_dims), params.disc_dann_rb_path)
    clf = model_init(Classifier(), params.clf_dann_rb_path)

    # Train models
    print(
        "====== Training source encoder and classifier in SVHN and MNIST domains ======"
    )
    if not (tgt_encoder.pretrained and clf.pretrained and critic.pretrained
            and params.model_trained):
        tgt_encoder, clf, critic = train_dann(tgt_encoder,
                                              clf,
                                              critic,
                                              svhn_data_loader,
                                              mnist_data_loader,
                                              mnist_data_loader_eval,
                                              robust=True)

    # Eval target encoder on test set of target dataset
    print(
        "====== Evaluating classifier for encoded SVHN and MNIST domains ======"
    )
    print("-------- SVHN domain --------")
    eval_tgt_robust(tgt_encoder, clf, svhn_data_loader_eval)
    print("-------- MNIST adaption --------")
    eval_tgt_robust(tgt_encoder, clf, mnist_data_loader_eval)
Пример #8
0
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True

quant = True if 'GS8' in args.base_model_str else False
subnet_model_path = os.path.join('subnet_structures', args.dataset, args.task,
                                 args.base_model_str, 'pth')

## Networks
# G:
dim_lst_path = os.path.join(subnet_model_path, 'epoch%d_netG.npy' % 199)
netG = Generator(args.input_nc,
                 args.output_nc,
                 dim_lst=np.load(dim_lst_path),
                 quant=quant).cuda()
# D:
netD = Discriminator(args.input_nc).cuda()

## results_dir:
optimizer_str = 'adam_lr%s_wd%s' % (args.lr, args.wd)
loss_str = 'beta%s_%s' % (args.beta, args.lc)
results_dir = os.path.join('finetune_results', args.dataset, args.task,
                           args.base_model_str,
                           '%s_%s' % (optimizer_str, loss_str))
img_dir = os.path.join(results_dir, 'img')
pth_dir = os.path.join(results_dir, 'pth')
create_dir(img_dir), create_dir(pth_dir)

# Optimizers
optimizer_G = torch.optim.Adam(netG.parameters(),
                               lr=args.lr,
                               weight_decay=args.wd,
Пример #9
0
parser.add_argument("--lambda_gp",
                    type=int,
                    default=10,
                    help="lambda for gradient penalty")
opt = parser.parse_args()

exp_folder = "{}_{}".format(opt.exp_folder, opt.target_set)
os.makedirs("./exps/" + exp_folder, exist_ok=True)

# Loss function
adversarial_loss = torch.nn.BCEWithLogitsLoss()
distance_loss = torch.nn.L1Loss()

# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if torch.cuda.is_available():
    device = torch.device('cuda:0')
generator.to(device)
discriminator.to(device)
adversarial_loss.to(device)


# Visualize a single batch
def visualizeSingleBatch(fp_loader_test,
                         opt,
                         exp_folder,
                         batches_done,
                         batch_size=8):
    print('Loading saved model ... \n{}'.format(
        './checkpoints/{}_{}.pth'.format(exp_folder, batches_done)))
Пример #10
0
    try:
        model.trans.load_state_dict(torch.load(sys.argv[2]))
        model.atmos.load_state_dict(torch.load(sys.argv[3]))
        '''
        for param in model.trans.parameters():
            param.requires_grad = False
        for param in model.atmos.parameters():
            param.requires_grad = False
        '''
    except Exception as e:
        try:
            model.load_state_dict(torch.load(sys.argv[2]))
        except Exception as e:
            print("No weights. Training from scratch.")
    if MODE == 'GAN':
        model_d = Discriminator().to(device)
        optimizer_d = torch.optim.Adam(model_d.parameters(), lr=learning_rate)
        try:
            model_d.load_state_dict(torch.load(sys.argv[3]))
            if opt['parallel']:
                model_d = nn.DataParallel(model_d)
        except Exception as e:
            print("No weights. Training from scratch discrim.")
else:
    print('MODE INCORRECT : TRANS or ATMOS or FAST or DUAL or GAN')
    exit()

# Wrap in Data Parallel for multi-GPU use
if opt['parallel']:
    model = nn.DataParallel(model)
Пример #11
0
iter_number = (int)(INPUT_SIZE / BATCH_SIZE) + 1
image_root = utils.download_images()
train_ds = utils.load_data(image_root)
train_ds = utils.prepare_train_ds(train_ds,
                                  BATCH_SIZE,
                                  BUFFER_SIZE,
                                  image_size=128)

# Guild generator and discriminator model.
generator_net = Generator(dtype=flags.FLAGS.dtype)
generator_optimizer = tf.train.AdamOptimizer(
    learning_rate=flags.FLAGS.learning_rate_generator,
    beta1=flags.FLAGS.beta1,
    beta2=flags.FLAGS.beta2)

discriminator_net = Discriminator(alpha=flags.FLAGS.alpha,
                                  dtype=flags.FLAGS.dtype)
discriminator_optimizer = tf.train.AdamOptimizer(
    learning_rate=flags.FLAGS.learning_rate_discriminator,
    beta1=flags.FLAGS.beta1,
    beta2=flags.FLAGS.beta2)

# Print the network structure to show that the model is well built.
generator_net.build(input_shape=(None, 128))
discriminator_net.build(input_shape=(None, 128, 128, 3))

generator_net.summary()
discriminator_net.summary()

# Save model parameters and tensorboard information the adversarial network
basepath = "./mnist/" + str(flags.FLAGS.model_id)
logdir = os.path.join(basepath, "logs")
opt = parser.parse_args()
print(opt)

random.seed(opt.seed)
torch.manual_seed(opt.seed)
if torch.cuda.is_available():
    torch.cuda.manual_seed(opt.seed)

# Networks
if opt.upsample == 'ori':
    netG_A2B = Generator_ori(opt.input_nc, opt.output_nc)
    netG_B2A = Generator_ori(opt.output_nc, opt.input_nc)
else:
    netG_A2B = Generator(opt.input_nc, opt.output_nc)
    netG_B2A = Generator(opt.output_nc, opt.input_nc)
netD_A = Discriminator(opt.input_nc)
netD_B = Discriminator(opt.output_nc)

netG_A2B.cuda()
netG_B2A.cuda()
netD_A.cuda()
netD_B.cuda()

netG_A2B.apply(weights_init_normal)
netG_B2A.apply(weights_init_normal)
netD_A.apply(weights_init_normal)
netD_B.apply(weights_init_normal)

torch.save(netG_A2B.state_dict(),
           "initial_weights/netG_A2B_seed_{}.pth.tar".format(opt.seed))
torch.save(netG_B2A.state_dict(),
Пример #13
0
print(args)


os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True

quant = True if 'GS8' in args.base_model_str else False
subnet_model_path = os.path.join('subnet_structures', args.dataset, args.task, args.base_model_str, 'pth')

## Networks
# G:
dim_lst_path = os.path.join(subnet_model_path, 'epoch%d_netG.npy' % 199)
netG = Generator(args.input_nc, args.output_nc, dim_lst=np.load(dim_lst_path), quant=quant).cuda()
# D:
netD = Discriminator(args.input_nc).cuda()

## results_dir:
optimizer_str = 'adam_lr%s_wd%s' % (args.lr, args.wd)
loss_str = 'beta%s_%s' % (args.beta, args.lc)
results_dir = os.path.join('finetune_results', args.dataset, args.task, args.base_model_str, 
    '%s_%s' % (optimizer_str, loss_str))
img_dir = os.path.join(results_dir, 'img')
pth_dir = os.path.join(results_dir, 'pth')
create_dir(img_dir), create_dir(pth_dir)

# Optimizers 
optimizer_G = torch.optim.Adam(netG.parameters(), lr=args.lr, weight_decay=args.wd, betas=(0.5, 0.999)) # lr=1e-3
optimizer_D = torch.optim.Adam(netD.parameters(), lr=args.lr, weight_decay=args.wd, betas=(0.5, 0.999)) # lr=1e-3

# LR schedulers:
Пример #14
0
    return syn_feature, syn_label, syn_att

""""pre-train a classifier on seen classes"""
trc = utils.train_cla(data.train_feature, data.train_label, CLA, device=opts.device, )
trc.run(50, data.test_seen_feature, data.test_seen_label, save_path='./cla_model')
# load best classifier
pre_cla = torch.load("./cla_model/model.pt")


for p in pre_cla.parameters():  # set requires_grad to False
    p.requires_grad = False
    
if(opts.GD == 1):
    netG = Generator(opts).to(opts.device)
    netD = Discriminator(opts).to(opts.device)
else:
    netG = Generator1(opts).to(opts.device)
    netD = Discriminator1(opts).to(opts.device)

# seen reconstructor
netRS = Reconstructor(opts).to(opts.device)
# unseen reconstructor
netRU = Reconstructor(opts).to(opts.device)

if opts.optimizer == "ADAM":
    optimzerF = optim.Adam
else:
    optimzerF = optim.RMSprop

#train setup
Пример #15
0
loss_str = 'alpha%s_beta%s_%s' % (args.alpha, args.beta, args.lc)
results_dir = os.path.join(
    'distill_results', args.dataset, args.task,
    '%s_%s_%s_%s' % (method_str, loss_str, opt_str, W_optimizer_str))
img_dir = os.path.join(results_dir, 'img')
pth_dir = os.path.join(results_dir, 'pth')
create_dir(img_dir), create_dir(pth_dir)

## Networks
# G:
netG = Generator(args.input_nc,
                 args.output_nc,
                 quant=args.quant,
                 alpha=args.alpha).cuda()
# D:
netD = Discriminator(args.input_nc).cuda()

# FLOPs for G:
netG.cpu()
count_ops = measure_model(netG, 256, 256)
print("#parameters: %s" % model_param_num(netG))
f = open(os.path.join(results_dir, 'model_size.txt'), 'a+')
f.write('count_ops: {:.6f}M'.format(count_ops / 1024. / 1024.))
f.write("#parameters: %s" % model_param_num(netG))
f.close()
netG.cuda()

# Optimizers:
optimizer_G = torch.optim.Adam(netG.parameters(),
                               lr=args.lrw,
                               weight_decay=args.wd,
Пример #16
0
opt = parser.parse_args()

cuda = True if torch.cuda.is_available() else False
lambda_gp = 10
multi_gpu = False

exp_folder = "{}_{}".format(opt.exp_folder, opt.target_set)
os.makedirs("./exps/" + exp_folder, exist_ok=True)

# Loss function
adversarial_loss = torch.nn.BCEWithLogitsLoss()
distance_loss = torch.nn.L1Loss()

# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
if cuda:
    generator.cuda()
    discriminator.cuda()
    adversarial_loss.cuda()


# Visualize a single batch
def visualizeSingleBatch(fp_loader_test,
                         opt,
                         exp_folder,
                         batches_done,
                         batch_size=8):
    print('Loading saved model ... \n{}'.format(
        './checkpoints/{}_{}.pth'.format(exp_folder, batches_done)))
    generatorTest = Generator()
Пример #17
0
gamma_optimizer_str = 'sgd_mom%s_lrgamma%s' % (args.momentum, args.lrgamma)
W_optimizer_str = 'adam_lrw%s_wd%s' % (args.lrw, args.wd)
opt_str = 'e%d-b%d' % (args.epochs, args.batch_size)
loss_str = 'rho%s_beta%s_%s' % (args.rho, args.beta, args.lc)
results_dir = os.path.join(
    'results', args.dataset, args.task, '%s_%s_%s_%s_%s' %
    (method_str, loss_str, opt_str, gamma_optimizer_str, W_optimizer_str))
img_dir = os.path.join(results_dir, 'img')
pth_dir = os.path.join(results_dir, 'pth')
create_dir(img_dir), create_dir(pth_dir)

## Networks
# G:
netG = Generator(args.input_nc, args.output_nc, quant=args.quant).cuda()
# D:
netD = Discriminator(args.input_nc).cuda()

# param list:
parameters_G, parameters_D, parameters_gamma = [], [], []
for name, para in netG.named_parameters():
    if 'weight' in name and para.ndimension() == 1:
        parameters_gamma.append(para)
    else:
        parameters_G.append(para)
for name, para in netD.named_parameters():
    # print(name, para.size(), para.ndimension())
    parameters_D.append(para)
print('parameters_gamma:', len(parameters_gamma))

# Optimizers:
optimizer_gamma = torch.optim.SGD(parameters_gamma,
def train(model_config, train_config):
	mode = 'train'

	dataset = ShakespeareModern(train_shakespeare_path, test_shakespeare_path, train_modern_path, test_modern_path, mode=mode)	
	dataloader = DataLoader(dataset, batch_size=train_config['batch_size'], shuffle=False)
	print(dataset.domain_A_max_len)
	shakespeare_disc = Discriminator(model_config['embedding_size'], model_config['hidden_dim'], len(dataset.vocab), batch_size=train_config['batch_size']).cuda()
	shakespeare_disc.train()

	if train_config['continue_train']:
		shakespeare_disc.load_state_dict(torch.load(train_config['model_path']))

	criterion = nn.BCELoss().cuda()
	optimizer = torch.optim.Adam(shakespeare_disc.parameters(), lr=train_config['base_lr'],
								 weight_decay=1e-5)

	real_label = torch.ones((train_config['batch_size'], 1)).cuda()
	fake_label = torch.zeros((train_config['batch_size'], 1)).cuda()

	for epoch in range(train_config['num_epochs']):
		for idx, (s, s_addn_feats, m, m_addn_feats) in tqdm(enumerate(dataloader)):
			s = s.transpose(0, 1)
			m = m.transpose(0, 1)

			s = Variable(s).cuda()
			s_output = shakespeare_disc(s, s_addn_feats)
			s_loss = criterion(s_output, real_label)
			s_loss = 100 * s_loss
			optimizer.zero_grad()
			s_loss.backward()
			optimizer.step()
			shakespeare_disc.hidden = shakespeare_disc.init_hidden()

			m = Variable(m).cuda()
			m_output = shakespeare_disc(m, m_addn_feats)
			m_loss = criterion(m_output, fake_label)
			m_loss = 100 * m_loss
			optimizer.zero_grad()
			m_loss.backward()
			optimizer.step()
			shakespeare_disc.hidden = shakespeare_disc.init_hidden()

			if idx % 100 == 0:
				print('\tepoch [{}/{}], iter: {}, s_loss: {:.4f}, m_loss: {:.4f}, preds: s: {}, {}, m: {}, {}'
					.format(epoch+1, train_config['num_epochs'], idx, s_loss.item(), m_loss.item(), s_output.item(), round(s_output.item()), m_output.item(), round(m_output.item())))

		print('\tepoch [{}/{}]'.format(epoch+1, train_config['num_epochs']))

		torch.save(shakespeare_disc.state_dict(), './shakespeare_disc.pth')
Пример #19
0
def main():

    parser = argparse.ArgumentParser(
        description='Train Cartoon avatar Gan models')
    parser.add_argument('--crop_size',
                        default=64,
                        type=int,
                        help='Training images crop size')
    parser.add_argument('--num_epochs',
                        default=50,
                        type=int,
                        help='Train epoch number')
    parser.add_argument('--data_root',
                        default='data/cartoon',
                        help='Root directory for dataset')
    parser.add_argument('--worker',
                        default=2,
                        type=int,
                        help='Number of workers for dataloader')
    parser.add_argument('--batch_size',
                        default=16,
                        type=int,
                        help='Batch size during training')
    parser.add_argument('--channels',
                        default=3,
                        type=int,
                        help='Number of channels in the training images')
    parser.add_argument('--nz',
                        default=100,
                        type=int,
                        help='Size of generator input')
    parser.add_argument('--ngf',
                        default=64,
                        type=int,
                        help='Size of feature maps in generator')
    parser.add_argument('--ndf',
                        default=64,
                        type=int,
                        help='Size of feature maps in descriminator')
    parser.add_argument('--lr',
                        default=0.0002,
                        type=float,
                        help='Learning rate for optimizer')
    parser.add_argument('--beta1',
                        default=0.5,
                        type=float,
                        help='Beta1 hyperparam for Adam optimizers')
    parser.add_argument('--beta2',
                        default=0.999,
                        type=float,
                        help='Beta2 hyperparam for Adam optimizers')
    parser.add_argument('--ngpu',
                        default=1,
                        type=int,
                        help='Number of GPUs , use 0 for CPU mode')
    parser.add_argument(
        '--latent_vector_num',
        default=8,
        type=int,
        help=
        'latent vectors that we will use to visualize , 8 means that it will visualize 8 images during training'
    )
    opt = parser.parse_args()

    dataroot = opt.data_root
    workers = opt.worker
    batch_size = opt.batch_size
    image_size = opt.crop_size
    nc = opt.channels
    nz = opt.nz
    ngf = opt.ngf
    ndf = opt.ndf
    num_epochs = opt.num_epochs
    lr = opt.lr
    beta1 = opt.beta1
    beta2 = opt.beta2
    ngpu = opt.ngpu
    latent_vector_num = opt.latent_vector_num

    # Create the dataset
    dataset = dset.ImageFolder(root=dataroot,
                               transform=transforms.Compose([
                                   transforms.Resize(image_size),
                                   transforms.CenterCrop(image_size),
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.5, 0.5, 0.5),
                                                        (0.5, 0.5, 0.5)),
                               ]))
    # Create the dataloader
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=batch_size,
                                             shuffle=True,
                                             num_workers=workers)

    # Decide which device we want to run on
    device = torch.device("cuda:0" if (
        torch.cuda.is_available() and ngpu > 0) else "cpu")

    # Create the generator
    netG = Generator(ngpu, nz, ngf, nc).to(device)
    # Create the Discriminator
    netD = Discriminator(ngpu, nc, ndf).to(device)

    # Handle multi-gpu if desired
    if (device.type == 'cuda') and (ngpu > 1):
        netG = nn.DataParallel(netG, list(range(ngpu)))
        netD = nn.DataParallel(netD, list(range(ngpu)))

    # Apply the weights_init function to randomly initialize all weights
    #  to mean=0, stdev=0.2.
    netG.apply(weights_init)
    netD.apply(weights_init)

    # Setup Adam optimizers for both G and D
    optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
    optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))

    # Print models
    print(netG)
    print(netD)

    # Initialize BCELoss function
    criterion = nn.BCELoss()

    # Create batch of latent vectors that we will use to visualize
    fixed_noise = torch.randn(latent_vector_num, nz, 1, 1, device=device)

    #real and fake labels during training
    real_label = 1
    fake_label = 0

    # Lists to keep track of progress
    img_list = []
    G_losses = []
    D_losses = []
    iters = 0

    print("Starting Training ...")

    for epoch in range(num_epochs):
        for i, data in enumerate(dataloader, 0):

            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            ## Train with all-real batch
            netD.zero_grad()
            # Format batch
            real_cpu = data[0].to(device)
            b_size = real_cpu.size(0)
            label = torch.full((b_size, ), real_label, device=device)
            # Forward pass real batch through D
            output = netD(real_cpu).view(-1)
            # Calculate loss on all-real batch
            errD_real = criterion(output, label)
            # Calculate gradients for D in backward pass
            errD_real.backward()
            D_x = output.mean().item()

            ## Train with all-fake batch
            # Generate batch of latent vectors
            noise = torch.randn(b_size, nz, 1, 1, device=device)
            # Generate fake image batch with G
            fake = netG(noise)
            label.fill_(fake_label)
            # Classify all fake batch with D
            output = netD(fake.detach()).view(-1)
            # Calculate D's loss on the all-fake batch
            errD_fake = criterion(output, label)
            # Calculate the gradients for this batch
            errD_fake.backward()
            D_G_z1 = output.mean().item()
            # Add the gradients from the all-real and all-fake batches
            errD = errD_real + errD_fake
            # Update D
            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            netG.zero_grad()
            label.fill_(real_label)
            # fake labels are real for generator cost
            # Since we just updated D, perform another forward pass of all-fake batch through D
            output = netD(fake).view(-1)
            # Calculate G's loss based on this output
            errG = criterion(output, label)
            # Calculate gradients for G
            errG.backward()
            D_G_z2 = output.mean().item()
            # Update G
            optimizerG.step()

            # Output training stats
            if i % 50 == 0:
                # Save model data
                torch.save(netG.state_dict(),
                           'pretrained_model/netG_epoch_%d.pth' % (iters))
                torch.save(netD.state_dict(),
                           'pretrained_model/netD_epoch_%d.pth' % (iters))
                # Print training stats
                print(
                    '[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
                    % (epoch, num_epochs, i, len(dataloader), errD.item(),
                       errG.item(), D_x, D_G_z1, D_G_z2))

            # Save Losses for plotting later
            G_losses.append(errG.item())
            D_losses.append(errD.item())

            # Check how the generator is doing by saving G's output on fixed_noise
            if (iters % 650 == 0) or ((epoch == num_epochs - 1) and
                                      (i == len(dataloader) - 1)):
                with torch.no_grad():
                    fake = netG(fixed_noise).detach().cpu()
                img_list.append(
                    vutils.make_grid(fake, padding=2, normalize=True))

            iters += 1

    # Display and Save samples GIF
    fig = plt.figure(figsize=(8, 8))
    plt.axis("off")
    ims = [[plt.imshow(np.transpose(i, (1, 2, 0)), animated=True)]
           for i in img_list]
    ani = animation.ArtistAnimation(fig,
                                    ims,
                                    interval=1000,
                                    repeat_delay=1000,
                                    blit=True)
    ani.save('output/samples.gif', writer='imagemagick', fps=100)
Пример #20
0
gamma_optimizer_str = 'sgd_mom%s_lrgamma%s' % (args.momentum, args.lrgamma)
W_optimizer_str = 'adam_lrw%s_wd%s' % (args.lrw, args.wd)
opt_str = 'e%d-b%d' % (args.epochs, args.batch_size)
loss_str = 'rho%s_beta%s_%s' % (args.rho, args.beta, args.lc)
results_dir = os.path.join(
    'results', args.dataset, args.task, '%s_%s_%s_%s_%s' %
    (method_str, loss_str, opt_str, gamma_optimizer_str, W_optimizer_str))
img_dir = os.path.join(results_dir, 'img')
pth_dir = os.path.join(results_dir, 'pth')
create_dir(img_dir), create_dir(pth_dir)

## Networks
# G:
netG = Generator(args.input_nc, args.output_nc, quant=args.quant).cuda()
# D:
netD = Discriminator(args.input_nc).cuda()

# param list:
parameters_G, parameters_D, parameters_gamma = [], [], []
for name, para in netG.named_parameters():
    if 'weight' in name and para.ndimension() == 1:
        parameters_gamma.append(para)
    else:
        parameters_G.append(para)
for name, para in netD.named_parameters():
    # print(name, para.size(), para.ndimension())
    parameters_D.append(para)
print('parameters_gamma:', len(parameters_gamma))

# Optimizers:
optimizer_gamma = torch.optim.SGD(parameters_gamma,
def main():
    # init random seed
    init_random_seed(params.manual_seed)

    # Load dataset
    mnist_data_loader = get_mnist(train=True, download=True)
    mnist_data_loader_eval = get_mnist(train=False, download=True)
    usps_data_loader = get_usps(train=True, download=True)
    usps_data_loader_eval = get_usps(train=False, download=True)

    # Model init DANN
    tgt_encoder = model_init(Encoder(), params.tgt_encoder_dann_rb_path)
    critic = model_init(Discriminator(), params.disc_dann_rb_path)
    clf = model_init(Classifier(), params.clf_dann_rb_path)

    # Train models
    print(
        "====== Robust Training source encoder and classifier in MNIST and USPS domains ======"
    )
    if not (tgt_encoder.pretrained and clf.pretrained and critic.pretrained
            and params.model_trained):
        tgt_encoder, clf, critic = train_dann(tgt_encoder,
                                              clf,
                                              critic,
                                              mnist_data_loader,
                                              usps_data_loader,
                                              usps_data_loader_eval,
                                              robust=False)

    # Eval target encoder on test set of target dataset
    print(
        "====== Evaluating classifier for encoded MNIST and USPS domains ======"
    )
    print("-------- MNIST domain --------")
    eval_tgt_robust(tgt_encoder, clf, critic, mnist_data_loader_eval)
    print("-------- USPS adaption --------")
    eval_tgt_robust(tgt_encoder, clf, critic, usps_data_loader_eval)

    print("====== Pseudo labeling on USPS domain ======")
    pseudo_label(tgt_encoder, clf, "usps_train_pseudo", usps_data_loader)

    # Init a new model
    tgt_encoder = model_init(Encoder(), params.tgt_encoder_path)
    clf = model_init(Classifier(), params.clf_path)

    # Load pseudo labeled dataset
    usps_pseudo_loader = get_usps(train=True, download=True, get_pseudo=True)

    print("====== Standard training on USPS domain with pseudo labels ======")
    if not (tgt_encoder.pretrained and clf.pretrained):
        train_src_adda(tgt_encoder, clf, usps_pseudo_loader, mode='ADV')
    print("====== Evaluating on USPS domain with real labels ======")
    eval_tgt(tgt_encoder, clf, usps_data_loader_eval)

    tgt_encoder = model_init(Encoder(), params.tgt_encoder_rb_path)
    clf = model_init(Classifier(), params.clf_rb_path)
    print("====== Robust training on USPS domain with pseudo labels ======")
    if not (tgt_encoder.pretrained and clf.pretrained):
        train_src_robust(tgt_encoder, clf, usps_pseudo_loader, mode='ADV')
    print("====== Evaluating on USPS domain with real labels ======")
    eval_tgt(tgt_encoder, clf, usps_data_loader_eval)
Пример #22
0
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True

quant = True if 'GS8' in args.base_model_str else False
subnet_model_path = os.path.join('subnet_structures_ticket', args.dataset,
                                 args.task, args.base_model_str, 'pth')

## Networks
# G:
dim_lst_path = os.path.join(subnet_model_path, 'epoch%d_netG.npy' % 199)
netG = Generator(args.input_nc,
                 args.output_nc,
                 dim_lst=np.load(dim_lst_path),
                 quant=quant).cuda()
# D:
netD = Discriminator(args.input_nc).cuda()

## results_dir:
optimizer_str = 'adam_lr%s_wd%s' % (args.lr, args.wd)
loss_str = ''
results_dir = os.path.join('cp_finetune_results', args.dataset, args.task,
                           args.base_model_str,
                           '%s_%s' % (optimizer_str, loss_str))
img_dir = os.path.join(results_dir, 'img')
pth_dir = os.path.join(results_dir, 'pth')
create_dir(img_dir), create_dir(pth_dir)

# Optimizers
optimizer_G = torch.optim.Adam(netG.parameters(),
                               lr=args.lr,
                               weight_decay=args.wd,
Пример #23
0
else:
    source_str, target_str = 'B', 'A'
foreign_dir = '/home/haotao/PyTorch-CycleGAN/'
print(args)


os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True

## Networks
# G:
dim_lst_path = os.path.join(subnet_model_path, 'epoch%d_netG.npy' % 199)
netG = Generator(args.input_nc, args.output_nc, dim_lst=np.load(dim_lst_path), quant=quant).cuda()
# D:
netD = Discriminator(args.input_nc).cuda()


# load sub G extracted from latest.pth
g_path = os.path.join(args.base_model_str, 'epoch%d_netG.pth' % 199) 
netG.load_state_dict(torch.load(g_path))
print('load G from %s' % g_path)
# load full D directly from latest.pth
d_path = os.path.join('cp_results', args.dataset, args.task, args.base_model_str, 'pth', 'latest.pth')
netD.load_state_dict(torch.load(d_path)['netD'])
print('load D from %s' % d_path)
start_epoch = 0
best_FID = 1e9
loss_G_lst, loss_G_perceptual_lst, loss_G_GAN_lst, loss_D_lst = [], [], [], []

# Dataset loader: img shape=(256,256)