'loss': 'l1_plus',
    'loss_params': loss_params,
    'g_optimizer': 'adam',
    'd_optimizer': 'adam',
    'g_optim_opts': paper_opts,
    'd_optim_opts': paper_opts,
    'sample_interval': 205,
    'batch_size': 2,
    'epochs': 201,
    'save_model_interval': None,
    'save_img_interval': None,
    'save_dir': os.getenv('SDIR') + '/patchgan_variations/patch_70_l1_1_res1/',
    'save_summary': {
        'epochs': np.arange(10, 210, 10).tolist(),
        'box_size': (100,100),
        'transform': FCS(k=4, inverse=True),
        'n': 4,
        'grid_size': (2,2)
    }
}



train_loader = BahamasLoaderPaired([os.getenv('D32'), os.getenv('G32')],
                                   batch_size=schedule['batch_size'],
                                   ntest=10,
                                   transform=transform_fcs,
                                   train_set=True)


test_loader = BahamasLoaderPaired([os.getenv('D32'), os.getenv('G32')],
예제 #2
0
paper_opts = adam_opts
paper_opts['betas'] = (0.5, 0.999)
paper_opts['lr'] = 1e-3

from src.configs.resnet.b6relu import g_structure
from src.configs.patchgan.nobn_nosig_bfalse import d_structure

epoch_end = 45
ntest = 36

sets = [os.getenv('D32'), os.getenv('G32')]
grouping = [[0], [1]]

transforms = []
for i, val in enumerate(sets):
    transforms.append(FCS(k=4, inverse=False, totorch=True))

schedule = {
    'type': 'translator',
    'subtype': 'wgp',
    'warm_start': True,
    'loss': 'l1_plus',
    'loss_params': loss_params,
    'g_optimizer': 'adam',
    'd_optimizer': 'adam',
    'g_optim_opts': paper_opts,
    'd_optim_opts': paper_opts,
    'g_decay': torch.optim.lr_scheduler.StepLR,
    'd_decay': torch.optim.lr_scheduler.StepLR,
    'lrdecay_opts': {
        'step_size': 10
예제 #3
0
    os.getenv('D32Z00'),
    os.getenv('D32Z05'),
    os.getenv('D32Z10'),
    os.getenv('D32Z20'),
    os.getenv('D32V2Z00'),
    os.getenv('G32Z00'),
    os.getenv('G32Z05'),
    os.getenv('G32Z10'),
    os.getenv('G32Z20'),
    os.getenv('G32V2Z00')
]
grouping = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]

transforms = []
for i, val in enumerate(sets):
    transforms.append(FCS(k=4, inverse=False, totorch=True, scale=1.75))


def Schedule(name):
    schedule = {
        'type': 'translator',
        'subtype': 'wgp',
        'warm_start': True,
        'loss': 'l1_plus',
        'loss_params': loss_params,
        'g_optimizer': 'adam',
        'd_optimizer': 'adam',
        'g_optim_opts': paper_opts,
        'd_optim_opts': paper_opts,
        'g_decay': torch.optim.lr_scheduler.StepLR,
        'd_decay': torch.optim.lr_scheduler.StepLR,
예제 #4
0
    os.getenv('D32Z10'),
    os.getenv('D32Z20'),
    os.getenv('D32V2Z00'),
    os.getenv('G32Z00'),
    os.getenv('G32Z05'),
    os.getenv('G32Z10'),
    os.getenv('G32Z20'),
    os.getenv('G32V2Z00')
]
grouping = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]

transforms = []
scales = [1, 1, 1, 1, 1, -1, -1, -1, -1, -1]
for i, val in enumerate(sets):
    transforms.append(
        FCS(k=4, inverse=False, totorch=True, scale=scales[i], shift=0))


def Schedule(name):
    schedule = {
        'type': 'translator',
        'subtype': 'wgp',
        'warm_start': True,
        'loss': 'l1_plus',
        'loss_params': loss_params,
        'g_optimizer': 'adam',
        'd_optimizer': 'adam',
        'g_optim_opts': paper_opts,
        'd_optim_opts': paper_opts,
        'g_decay': torch.optim.lr_scheduler.StepLR,
        'd_decay': torch.optim.lr_scheduler.StepLR,