Esempio n. 1
0
def train(args, get_dataloader_func=get_pix2pix_maps_dataloader):
    logger = Logger(save_path=args.save, json_name='img2map')
    epoch_now = len(logger.get_data('G_loss'))

    model_saver = ModelSaver(
        save_path=args.save,
        name_list=[
            'G', 'D', 'E', 'G_optimizer', 'D_optimizer', 'E_optimizer',
            'G_scheduler', 'D_scheduler', 'E_scheduler', 'DLV3P',
            "DLV3P_global_optimizer", "DLV3P_backbone_optimizer",
            "DLV3P_global_scheduler", "DLV3P_backbone_scheduler"
        ])
    visualizer = Visualizer(
        keys=['image', 'encode_feature', 'fake', 'label', 'instance'])
    sw = SummaryWriter(args.tensorboard_path)
    G = get_G(args)
    D = get_D(args)
    model_saver.load('G', G)
    model_saver.load('D', D)

    params_G = sum([param.nelement() for param in G.parameters()])
    params_D = sum([param.nelement() for param in D.parameters()])
    print(f"{params_G}, {params_D}")
    print(f"{params_G}")
    sys.exit(0)  # 测完退出
Esempio n. 2
0
from src.perceptual_loss.utils import normalize_tensor_transform, load_image, gram
from src.perceptual_loss.network import ImageTransformNet
from perceptual_loss.image_dataset import get_image_dataset, get_dataloader_from_dir
from perceptual_loss.train_config import config
from perceptual_loss.perceptual_loss import PerceptualLoss
from src.utils.visualizer import Visualizer
from src.utils.train_utils import model_accelerate, get_device
from tqdm import tqdm
from src.utils.logger import ModelSaver, Logger
from src.perceptual_loss.utils import save_image

if __name__ == '__main__':

    args = config()
    visualizer = Visualizer(keys=['img'])
    style_name = osp.split(args.style_image)[-1].split('.')[0]
    # style_name = osp.split(args.style_image)[-1].split('.')[0]
    logger = Logger(save_path=args.save, json_name=f'{style_name}')
    model_saver = ModelSaver(save_path=args.save,
                             name_list=[
                                 f'{style_name}',
                                 f'{style_name}_{args.optimizer}',
                                 f'{style_name}_{args.scheduler}'
                             ])
    criterion = PerceptualLoss(args)
    model = ImageTransformNet()
    model = model_accelerate(args, model)
    model_saver.load(f'{style_name}', model=model)

    optimizer = Adam(model.parameters(), lr=args.lr)
Esempio n. 3
0
def train(args, get_dataloader_func=get_pix2pix_maps_dataloader):
    logger = Logger(save_path=args.save, json_name='img2map')

    model_saver = ModelSaver(save_path=args.save,
                             name_list=[
                                 'G', 'D', 'E', 'G_optimizer', 'D_optimizer',
                                 'E_optimizer', 'G_scheduler', 'D_scheduler',
                                 'E_scheduler'
                             ])
    visualizer = Visualizer(
        keys=['image', 'encode_feature', 'fake', 'label', 'instance'])
    sw = SummaryWriter(args.tensorboard_path)
    G = get_G(args)
    D = get_D(args)
    model_saver.load('G', G)
    model_saver.load('D', D)

    # fid = get_fid(args)
    # logger.log(key='FID', data=fid)
    # logger.save_log()
    # logger.visualize()

    G_optimizer = Adam(G.parameters(), lr=args.G_lr, betas=(args.beta1, 0.999))
    D_optimizer = Adam(D.parameters(), lr=args.D_lr, betas=(args.beta1, 0.999))

    model_saver.load('G_optimizer', G_optimizer)
    model_saver.load('D_optimizer', D_optimizer)

    G_scheduler = get_hinge_scheduler(args, G_optimizer)
    D_scheduler = get_hinge_scheduler(args, D_optimizer)

    model_saver.load('G_scheduler', G_scheduler)
    model_saver.load('D_scheduler', D_scheduler)

    device = get_device(args)

    GANLoss = get_GANLoss(args)

    if args.use_ganFeat_loss:
        DFLoss = get_DFLoss(args)
    if args.use_vgg_loss:
        VGGLoss = get_VGGLoss(args)
    if args.use_low_level_loss:
        LLLoss = get_low_level_loss(args)

    epoch_now = len(logger.get_data('G_loss'))
    for epoch in range(epoch_now, args.epochs):
        G_loss_list = []
        D_loss_list = []

        data_loader = get_dataloader_func(args, train=True)
        data_loader = tqdm(data_loader)

        for step, sample in enumerate(data_loader):
            imgs = sample['image'].to(device)
            maps = sample['map'].to(device)
            # print(smasks.shape)

            # train the Discriminator
            D_optimizer.zero_grad()
            reals_maps = torch.cat([imgs.float(), maps.float()], dim=1)
            fakes = G(imgs).detach()
            fakes_maps = torch.cat([imgs.float(), fakes.float()], dim=1)

            D_real_outs = D(reals_maps)
            D_real_loss = GANLoss(D_real_outs, True)

            D_fake_outs = D(fakes_maps)
            D_fake_loss = GANLoss(D_fake_outs, False)

            D_loss = 0.5 * (D_real_loss + D_fake_loss)
            D_loss = D_loss.mean()
            D_loss.backward()
            D_loss = D_loss.item()
            D_optimizer.step()

            # train generator and encoder
            G_optimizer.zero_grad()
            fakes = G(imgs)
            fakes_maps = torch.cat([imgs.float(), fakes.float()], dim=1)
            D_fake_outs = D(fakes_maps)

            gan_loss = GANLoss(D_fake_outs, True)

            G_loss = 0
            G_loss += gan_loss
            gan_loss = gan_loss.mean().item()

            if args.use_vgg_loss:
                vgg_loss = VGGLoss(fakes, imgs)
                G_loss += args.lambda_feat * vgg_loss
                vgg_loss = vgg_loss.mean().item()
            else:
                vgg_loss = 0.

            if args.use_ganFeat_loss:
                df_loss = DFLoss(D_fake_outs, D_real_outs)
                G_loss += args.lambda_feat * df_loss
                df_loss = df_loss.mean().item()
            else:
                df_loss = 0.

            if args.use_low_level_loss:
                ll_loss = LLLoss(fakes, maps)
                G_loss += args.lambda_feat * ll_loss
                ll_loss = ll_loss.mean().item()
            else:
                ll_loss = 0.

            G_loss = G_loss.mean()
            G_loss.backward()
            G_loss = G_loss.item()

            G_optimizer.step()

            data_loader.write(
                f'Epochs:{epoch} | Dloss:{D_loss:.6f} | Gloss:{G_loss:.6f}'
                f'| GANloss:{gan_loss:.6f} | VGGloss:{vgg_loss:.6f} | DFloss:{df_loss:.6f} '
                f'| LLloss:{ll_loss:.6f} | lr:{get_lr(G_optimizer):.8f}')

            G_loss_list.append(G_loss)
            D_loss_list.append(D_loss)

            # display
            if args.display and step % args.display == 0:
                visualizer.display(transforms.ToPILImage()(imgs[0].cpu()),
                                   'image')
                visualizer.display(transforms.ToPILImage()(fakes[0].cpu()),
                                   'fake')
                visualizer.display(transforms.ToPILImage()(maps[0].cpu()),
                                   'label')

            # tensorboard log
            if args.tensorboard_log and step % args.tensorboard_log == 0:
                total_steps = epoch * len(data_loader) + step
                sw.add_scalar('Loss/G', G_loss, total_steps)
                sw.add_scalar('Loss/D', D_loss, total_steps)
                sw.add_scalar('Loss/gan', gan_loss, total_steps)
                sw.add_scalar('Loss/vgg', vgg_loss, total_steps)
                sw.add_scalar('Loss/df', df_loss, total_steps)
                sw.add_scalar('Loss/ll', ll_loss, total_steps)

                sw.add_scalar('LR/G', get_lr(G_optimizer), total_steps)
                sw.add_scalar('LR/D', get_lr(D_optimizer), total_steps)

                sw.add_image('img/real', imgs[0].cpu(), step)
                sw.add_image('img/fake', fakes[0].cpu(), step)
                sw.add_image('visual/label', maps[0].cpu(), step)

        D_scheduler.step(epoch)
        G_scheduler.step(epoch)
        if epoch % 10 == 0 or epoch == args.epochs:
            fid = eval(args,
                       model=G,
                       data_loader=get_dataloader_func(args, train=False))
            logger.log(key='FID', data=fid)
            if fid > logger.get_max(key='FID'):
                model_saver.save(f'G_{fid:.4f}', G)
                model_saver.save(f'D_{fid:.4f}', D)

        logger.log(key='D_loss',
                   data=sum(D_loss_list) / float(len(D_loss_list)))
        logger.log(key='G_loss',
                   data=sum(G_loss_list) / float(len(G_loss_list)))
        logger.save_log()
        logger.visualize()

        model_saver.save('G', G)
        model_saver.save('D', D)

        model_saver.save('G_optimizer', G_optimizer)
        model_saver.save('D_optimizer', D_optimizer)

        model_saver.save('G_scheduler', G_scheduler)
        model_saver.save('D_scheduler', D_scheduler)
Esempio n. 4
0
def get_fid(args):
    fake_dir = osp.join(args.save, 'fake_result')
    real_dir = osp.join(args.save, 'real_result')
    fid = fid_score(real_path=real_dir, fake_path=fake_dir, gpu=str(args.gpu))
    print(f'===> fid score:{fid:.4f}')
    return fid


if __name__ == '__main__':
    args = config()
    get_fid(args)
    try:
        get_fid(args)
    except Exception:
        assert args.feat_num == 0
        assert args.use_instance == 0
        model_saver = ModelSaver(save_path=args.save,
                                 name_list=[
                                     'G', 'D', 'E', 'G_optimizer',
                                     'D_optimizer', 'E_optimizer',
                                     'G_scheduler', 'D_scheduler',
                                     'E_scheduler'
                                 ])
        visualizer = Visualizer(
            keys=['image', 'encode_feature', 'fake', 'label', 'instance'])
        sw = SummaryWriter(args.tensorboard_path)
        G = get_G(args)
        model_saver.load('G', G)
        eval(args, G, get_pix2pix_maps_dataloader(args, train=False))
        pass
Esempio n. 5
0
def train(args, get_dataloader_func=get_cityscapes_dataloader):
    logger = Logger(save_path=args.save, json_name='seg2img')

    model_saver = ModelSaver(save_path=args.save,
                             name_list=['G', 'D', 'E', 'G_optimizer', 'D_optimizer', 'E_optimizer',
                                        'G_scheduler', 'D_scheduler', 'E_scheduler'])
    visualizer = Visualizer(keys=['image', 'encode_feature', 'fake', 'label', 'instance'])
    sw = SummaryWriter(args.tensorboard_path)
    G = get_G(args)
    D = get_D(args)
    E = get_E(args)
    model_saver.load('G', G)
    model_saver.load('D', D)
    model_saver.load('E', E)

    G_optimizer = Adam(G.parameters(), lr=args.G_lr, betas=(args.beta1, 0.999))
    D_optimizer = Adam(D.parameters(), lr=args.D_lr, betas=(args.beta1, 0.999))
    E_optimizer = Adam(E.parameters(), lr=args.E_lr, betas=(args.beta1, 0.999))

    model_saver.load('G_optimizer', G_optimizer)
    model_saver.load('D_optimizer', D_optimizer)
    model_saver.load('E_optimizer', E_optimizer)

    G_scheduler = get_hinge_scheduler(args, G_optimizer)
    D_scheduler = get_hinge_scheduler(args, D_optimizer)
    E_scheduler = get_hinge_scheduler(args, E_optimizer)

    model_saver.load('G_scheduler', G_scheduler)
    model_saver.load('D_scheduler', D_scheduler)
    model_saver.load('E_scheduler', E_scheduler)

    device = get_device(args)

    GANLoss = get_GANLoss(args)
    if args.use_ganFeat_loss:
        DFLoss = get_DFLoss(args)
    if args.use_vgg_loss:
        VGGLoss = get_VGGLoss(args)

    epoch_now = len(logger.get_data('G_loss'))
    for epoch in range(epoch_now, args.epochs):
        G_loss_list = []
        D_loss_list = []

        data_loader = get_dataloader_func(args, train=True)
        data_loader = tqdm(data_loader)

        for step, sample in enumerate(data_loader):
            imgs = sample['image'].to(device)
            instances = sample['instance'].to(device)
            labels = sample['label'].to(device)
            smasks = sample['smask'].to(device)
            # print(smasks.shape)

            instances_edge = get_edges(instances)
            one_hot_labels = label_to_one_hot(smasks.long(), n_class=args.label_nc)

            # Encoder out
            encode_features = E(imgs, instances)

            # train the Discriminator
            D_optimizer.zero_grad()
            labels_instE_encodeF = torch.cat([one_hot_labels.float(), instances_edge.float(), encode_features.float()],
                                             dim=1)
            fakes = G(labels_instE_encodeF).detach()

            labels_instE_realimgs = torch.cat([one_hot_labels.float(), instances_edge.float(), imgs.float()], dim=1)
            D_real_outs = D(labels_instE_realimgs)
            D_real_loss = GANLoss(D_real_outs, True)

            labels_instE_fakeimgs = torch.cat([one_hot_labels.float(), instances_edge.float(), fakes.float()], dim=1)
            D_fake_outs = D(labels_instE_fakeimgs)
            D_fake_loss = GANLoss(D_fake_outs, False)

            D_loss = 0.5 * (D_real_loss + D_fake_loss)
            D_loss = D_loss.mean()
            D_loss.backward()
            D_loss = D_loss.item()
            D_optimizer.step()

            # train generator and encoder
            G_optimizer.zero_grad()
            E_optimizer.zero_grad()
            fakes = G(labels_instE_encodeF)
            labels_instE_fakeimgs = torch.cat([one_hot_labels.float(), instances_edge.float(), fakes.float()], dim=1)
            D_fake_outs = D(labels_instE_fakeimgs)

            gan_loss = GANLoss(D_fake_outs, True)

            G_loss = 0
            G_loss += gan_loss
            gan_loss = gan_loss.mean().item()

            if args.use_vgg_loss:
                vgg_loss = VGGLoss(fakes, imgs)
                G_loss += args.lambda_feat * vgg_loss
                vgg_loss = vgg_loss.mean().item()
            else:
                vgg_loss = 0.

            if args.use_ganFeat_loss:
                df_loss = DFLoss(D_fake_outs, D_real_outs)
                G_loss += args.lambda_feat * df_loss
                df_loss = df_loss.mean().item()
            else:
                df_loss = 0.

            G_loss = G_loss.mean()
            G_loss.backward()
            G_loss = G_loss.item()

            G_optimizer.step()
            E_optimizer.step()

            data_loader.write(f'Epochs:{epoch} | Dloss:{D_loss:.6f} | Gloss:{G_loss:.6f}'
                              f'| GANloss:{gan_loss:.6f} | VGGloss:{vgg_loss:.6f} '
                              f'| DFloss:{df_loss:.6f} | lr:{get_lr(G_optimizer):.8f}')

            G_loss_list.append(G_loss)
            D_loss_list.append(D_loss)

            # display
            if args.display and step % args.display == 0:
                visualizer.display(transforms.ToPILImage()(encode_features[0].cpu()), 'encode_feature')
                visualizer.display(transforms.ToPILImage()(imgs[0].cpu()), 'image')
                visualizer.display(transforms.ToPILImage()(fakes[0].cpu()), 'fake')
                visualizer.display(transforms.ToPILImage()(labels[0].cpu() * 15), 'label')
                visualizer.display(transforms.ToPILImage()(instances[0].cpu() * 15), 'instance')

            # tensorboard log
            if args.tensorboard_log and step % args.tensorboard_log == 0:
                total_steps = epoch * len(data_loader) + step
                sw.add_scalar('Loss/G', G_loss, total_steps)
                sw.add_scalar('Loss/D', D_loss, total_steps)
                sw.add_scalar('Loss/gan', gan_loss, total_steps)
                sw.add_scalar('Loss/vgg', vgg_loss, total_steps)
                sw.add_scalar('Loss/df', df_loss, total_steps)

                sw.add_scalar('LR/G', get_lr(G_optimizer), total_steps)
                sw.add_scalar('LR/D', get_lr(D_optimizer), total_steps)
                sw.add_scalar('LR/E', get_lr(E_optimizer), total_steps)

                sw.add_image('img/real', imgs[0].cpu(), step)
                sw.add_image('img/fake', fakes[0].cpu(), step)
                sw.add_image('visual/encode_feature', encode_features[0].cpu(), step)
                sw.add_image('visual/instance', instances[0].cpu(), step)
                sw.add_image('visual/label', labels[0].cpu(), step)

        D_scheduler.step(epoch)
        G_scheduler.step(epoch)
        E_scheduler.step(epoch)

        logger.log(key='D_loss', data=sum(D_loss_list) / float(len(D_loss_list)))
        logger.log(key='G_loss', data=sum(G_loss_list) / float(len(G_loss_list)))
        logger.save_log()
        logger.visualize()

        model_saver.save('G', G)
        model_saver.save('D', D)
        model_saver.save('E', E)

        model_saver.save('G_optimizer', G_optimizer)
        model_saver.save('D_optimizer', D_optimizer)
        model_saver.save('E_optimizer', E_optimizer)

        model_saver.save('G_scheduler', G_scheduler)
        model_saver.save('D_scheduler', D_scheduler)
        model_saver.save('E_scheduler', E_scheduler)
Esempio n. 6
0
from src.perceptual_loss.utils import normalize_tensor_transform, load_image, gram
from src.perceptual_loss.network import ImageTransformNet
from perceptual_loss.image_dataset import get_image_dataset
from perceptual_loss.train_config import config
from perceptual_loss.perceptual_loss import PerceptualLoss
from src.utils.visualizer import Visualizer
from src.utils.train_utils import model_accelerate, get_device
from tqdm import tqdm
from src.utils.logger import ModelSaver, Logger
from src.perceptual_loss.utils import save_image

if __name__ == '__main__':

    args = config()
    visualizer = Visualizer(keys=['img'])
    style_name = osp.split(args.style_image)[-1].split('.')[0]
    logger = Logger(save_path=args.save, json_name=f'{style_name}')
    model_saver = ModelSaver(save_path=args.save,
                             name_list=[
                                 f'{style_name}',
                                 f'{style_name}_{args.optimizer}',
                                 f'{style_name}_{args.scheduler}'
                             ])
    criterion = PerceptualLoss(args)
    model = ImageTransformNet()
    model = model_accelerate(args, model)
    model_saver.load(f'{style_name}', model=model)

    optimizer = Adam(model.parameters(), lr=args.lr)
    model_saver.load(f'{style_name}_{args.optimizer}', model=optimizer)
Esempio n. 7
0
"""
cases = DataSets.Yeast()
lrate = 0.001
optimizer = tf.train.AdagradOptimizer(lrate)
cman = CustomCaseManager(cases, None, 1, .1, .1)
options = Scenarios.get_custom_options([80, 80], tf.nn.relu, tf.nn.softmax, tf.losses.softmax_cross_entropy,
                                       lrate, [-.5, .5], optimizer, 10, 1000, 10, session_tracker, cman, 10,
                                       cman.get_testing_cases)
"""


options = Scenarios.get_mnist_options(session_tracker)
gann = G(options)
GT(gann.generate_full_run_sequence())

# Visualization tool
v = Visualizer(session_tracker, options, False)
v.insert_error_frame()
v.insert_dendro_frame(layers='all')
v.insert_output_hinton_frame(layers='all')
v.insert_bias_frame(hinton=False)
v.insert_weight_frame(hinton=False)
v.insert_bias_frame()
v.insert_weight_frame()
v.start()

#autoex()

#print(get_data("mnist_training"))