Example #1
0
        netC_swa=netC_swa,
        optim_G=optim_G,
        optim_D=optim_D,
        optim_c=optim_c,
    )
else:
    checkpoint_io.register_modules(
        netG=netG,
        netD=netD,
        netC=netC,
        netC_T=netC_T,
        optim_G=optim_G,
        optim_D=optim_D,
        optim_c=optim_c,
    )
logger = Logger(log_dir=SUMMARIES_FOLDER)

# train
print_interval = 50
image_interval = 500
max_iter = FLAGS.n_iter
pretrain_inter = FLAGS.n_iter_pretrain
loss_func_g = loss_triplegan.g_loss_dict[FLAGS.gan_type]
loss_func_d = loss_triplegan.d_loss_dict[FLAGS.gan_type]
loss_func_c_adv = loss_triplegan.c_loss_dict[FLAGS.gan_type]
loss_func_c = loss_classifier.c_loss_dict[FLAGS.c_loss]
step_func = loss_classifier.c_step_func[FLAGS.c_step]

logger_prefix = "Itera {}/{} ({:.0f}%)"

for i in range(pretrain_inter):  # 1w
Example #2
0
    __file__, FLAGS, CONFIG)
shutil.copy(FLAGS.config_file, os.path.join(SUMMARIES_FOLDER, "config.yaml"))
torch.manual_seed(1234)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(12345)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

model = Res18_Quadratic(3, FLAGS.n_chan, 32).to(device)
optimizer = torch.optim.Adam(model.parameters(), 5e-5)

checkpoint_io = Torture.utils.checkpoint.CheckpointIO(
    checkpoint_dir=MODELS_FOLDER)
checkpoint_io.register_modules(model=model)

logger = Logger(log_dir=SUMMARIES_FOLDER)

torch.cuda.manual_seed(FLAGS.rand_seed)
if FLAGS.dataset == 'cifar':
    itr = inf_train_gen_cifar(FLAGS.batch_size, flip=False)
    netE = Res18_Quadratic(3, FLAGS.n_chan, 32, normalize=False, AF=nn.ELU())
else:
    NotImplementedError('{} unknown dataset'.format(FLAGS.dataset))

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
netE = netE.to(device)
if FLAGS.n_gpus > 1:
    netE = nn.DataParallel(netE)

# setup optimizer and lr scheduler
params = {'lr': FLAGS.max_lr, 'betas': (0.9, 0.95), 'weight_decay': 1e-5}
Example #3
0
import Torture

FLAGS = flags.FLAGS
KEY_ARGUMENTS = config.load_config(FLAGS.config_file)
FILES_TO_BE_SAVED = ["./", "./configs", "./ESM"]
CONFIG = {"FILES_TO_BE_SAVED": FILES_TO_BE_SAVED, "KEY_ARGUMENTS": KEY_ARGUMENTS}
text_logger, MODELS_FOLDER, SUMMARIES_FOLDER = save_context(__file__, CONFIG)

torch.manual_seed(1234)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
np.random.seed(1235)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.cuda.manual_seed(1236)

logger = Logger(log_dir=SUMMARIES_FOLDER)

itr = inputs.get_data_iter()
netE = inputs.get_model()
netE = netE.to(device)
netE = nn.DataParallel(netE)

optimizerE, scheduler = inputs.get_optimizer_scheduler(netE)
loss_func = inputs.get_train_loss()
loss_eval = inputs.get_eval_loss()

checkpoint_io = Torture.utils.checkpoint.CheckpointIO(checkpoint_dir=MODELS_FOLDER)
checkpoint_io.register_modules(netE=netE)

# train
print_interval = 50
Example #4
0
    netC_swa, _ = inputs.get_classifier_optimizer()
    netC_swa = netC_swa.to(device)
    netC_swa = nn.DataParallel(netC_swa)
    netC_swa.train()
    swa_optim = optim_weight_swa.WeightSWA(netC_swa)
    for p in netC_swa.parameters():
        p.requires_grad_(False)
    Torture.update_average(netC_swa, netC, 0)

checkpoint_io = Torture.utils.checkpoint.CheckpointIO(
    checkpoint_dir=MODELS_FOLDER)
if FLAGS.c_step == "ramp_swa":
    checkpoint_io.register_modules(netC=netC, netC_T=netC_T, netC_swa=netC_swa)
else:
    checkpoint_io.register_modules(netC=netC, netC_T=netC_T)
logger = Logger(log_dir=SUMMARIES_FOLDER)
# train
print_interval = 50
test_interval = 500
max_iter = FLAGS.n_iter
loss_func = loss_classifier.c_loss_dict[FLAGS.c_loss]
step_func = loss_classifier.c_step_func[FLAGS.c_step]

logger_prefix = "Itera {}/{} ({:.0f}%)"
for i in range(max_iter):
    tloss, l_loss, u_loss = loss_func(netC, netC_T, i, itr, itr_u, device)
    if FLAGS.c_step == "ramp_swa":
        step_func(optim_c, swa_optim, netC, netC_T, i, tloss)
    else:
        step_func(optim_c, netC, netC_T, i, tloss)
Example #5
0
itr = inputs.get_data_iter(subset=FLAGS.n_labels)
netG, optim_G = inputs.get_generator_optimizer()
# netG_T, _ = inputs.get_generator_optimizer()
netD, optim_D = inputs.get_discriminator_optimizer()

netG, netD = netG.to(device), netD.to(device)
netG = nn.DataParallel(netG)
netD = nn.DataParallel(netD)

checkpoint_io = Torture.utils.checkpoint.CheckpointIO(
    checkpoint_dir=MODELS_FOLDER)
checkpoint_io.register_modules(netG=netG,
                               netD=netD,
                               optim_G=optim_G,
                               optim_D=optim_D)
logger = Logger(log_dir=SUMMARIES_FOLDER)

# train
print_interval = 250
image_interval = 2500
max_iter = FLAGS.n_iter
batch_size = FLAGS.batch_size
loss_func_g = loss_gan.g_loss_dict[FLAGS.gan_type]
loss_func_d = loss_gan.d_loss_dict[FLAGS.gan_type]

logger_prefix = "Itera {}/{} ({:.0f}%)"
for i in range(max_iter):
    x_real, label = itr.__next__()
    x_real, label = x_real.to(device), label.to(device)

    sample_z = torch.randn(batch_size, FLAGS.g_z_dim).to(device)