Пример #1
0
    def TrainDiscriminator(real_A: tp.Numpy.Placeholder(
        (1, 3, args.crop_size, args.crop_size),
        dtype=flow.float32), fake_A: tp.Numpy.Placeholder(
            (1, 3, args.crop_size, args.crop_size), dtype=flow.float32),
                           real_B: tp.Numpy.Placeholder(
                               (1, 3, args.crop_size, args.crop_size),
                               dtype=flow.float32),
                           fake_B: tp.Numpy.Placeholder(
                               (1, 3, args.crop_size, args.crop_size),
                               dtype=flow.float32)):
        with flow.scope.placement("gpu", "0:0-0"):
            # Calculate GAN loss for discriminator D_A
            # Real
            pred_real_B = networks.define_D(real_B,
                                            "netD_A",
                                            ndf=args.ndf,
                                            n_layers_D=3,
                                            trainable=True,
                                            reuse=True)
            loss_D_A_real = networks.GANLoss(pred_real_B, True)
            # Fake
            pred_fake_B = networks.define_D(fake_B,
                                            "netD_A",
                                            ndf=args.ndf,
                                            n_layers_D=3,
                                            trainable=True,
                                            reuse=True)
            loss_D_A_fake = networks.GANLoss(pred_fake_B, False)
            # Combined loss and calculate gradients
            loss_D_A = (loss_D_A_real + loss_D_A_fake) * 0.5

            # Calculate GAN loss for discriminator D_B
            # Real
            pred_real_A = networks.define_D(real_A,
                                            "netD_B",
                                            ndf=args.ndf,
                                            n_layers_D=3,
                                            trainable=True,
                                            reuse=True)
            loss_D_B_real = networks.GANLoss(pred_real_A, True)
            # Fake
            pred_fake_A = networks.define_D(fake_A,
                                            "netD_B",
                                            ndf=args.ndf,
                                            n_layers_D=3,
                                            trainable=True,
                                            reuse=True)
            loss_D_B_fake = networks.GANLoss(pred_fake_A, False)
            # Combined loss and calculate gradients
            loss_D_B = (loss_D_B_real + loss_D_B_fake) * 0.5

            loss_D = loss_D_A + loss_D_B

            flow.optimizer.Adam(flow.optimizer.PiecewiseConstantScheduler(
                [], [args.learning_rate]),
                                beta1=0.5).minimize(loss_D)

        return loss_D
Пример #2
0
    def __init__(self, opt):
        super().__init__()
        self.is_train = opt.is_train
        self.gpu_ids = opt.gpu_ids
        self.save_dir = opt.checkpoints_dir
        self.device = torch.device('cuda:{}'.format(
            self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')

        # load/define networks
        #self.netG = networks.define_X(1, 3, opt.init_type, opt.init_gain, self.gpu_ids)
        self.netG = networks.define_G(3, 2, opt.ngf_S12, opt.netG,
                                      opt.norm_G_D, not opt.no_dropout,
                                      opt.init_type, opt.init_gain,
                                      self.gpu_ids)

        if self.is_train:
            # define loss functions
            self.criterionGAN = networks.GANLoss(
                use_lsgan=not opt.no_lsgan).to(self.device)
            self.criterionL1 = torch.nn.L1Loss()

            # initialize optimizers
            self.optimizers = []
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
Пример #3
0
    def __init__(self, input_nc=3, output_nc=3, gpu_id=None):
        self.device = torch.device(
            f"cuda:{gpu_id}" if gpu_id is not None else 'cpu')
        print(f"Using device {self.device}")

        # Hyperparameters
        self.lambda_idt = 0.5
        self.lambda_A = 10.0
        self.lambda_B = 10.0

        # Define generator networks
        self.netG_A = networks.define_netG(input_nc,
                                           output_nc,
                                           ngf=64,
                                           n_blocks=9,
                                           device=self.device)
        self.netG_B = networks.define_netG(output_nc,
                                           input_nc,
                                           ngf=64,
                                           n_blocks=9,
                                           device=self.device)

        # Define discriminator networks
        self.netD_A = networks.define_netD(output_nc,
                                           ndf=64,
                                           n_layers=3,
                                           device=self.device)
        self.netD_B = networks.define_netD(input_nc,
                                           ndf=64,
                                           n_layers=3,
                                           device=self.device)

        # Define image pools
        self.fake_A_pool = utils.ImagePool(pool_size=50)
        self.fake_B_pool = utils.ImagePool(pool_size=50)

        # Define loss functions
        self.criterionGAN = networks.GANLoss().to(self.device)
        self.criterionCycle = torch.nn.L1Loss()
        self.criterionIdt = torch.nn.L1Loss()

        # Define optimizers
        netG_params = itertools.chain(self.netG_A.parameters(),
                                      self.netG_B.parameters())
        netD_params = itertools.chain(self.netD_A.parameters(),
                                      self.netD_B.parameters())
        self.optimizer_G = torch.optim.Adam(netG_params,
                                            lr=0.0002,
                                            betas=(0.5, 0.999))
        self.optimizer_D = torch.optim.Adam(netD_params,
                                            lr=0.0002,
                                            betas=(0.5, 0.999))

        # Learning rate schedulers
        self.scheduler_G = utils.get_lr_scheduler(self.optimizer_G)
        self.scheduler_D = utils.get_lr_scheduler(self.optimizer_D)
Пример #4
0
    def __init__(self, opt):
        """Initialize the CycleGAN class.

        Parameters:
            opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseModel.__init__(self, opt)
        # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
        self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
        # # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
        # visual_names_A = ['real_A', 'fake_B', 'rec_A']
        # visual_names_B = ['real_B', 'fake_A', 'rec_B']
        # if self.isTrain and self.opt.lambda_identity > 0.0:  # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
        #     visual_names_A.append('idt_B')
        #     visual_names_B.append('idt_A')
        #
        # self.visual_names = visual_names_A + visual_names_B  # combine visualizations for A and B
        # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
        if self.isTrain:
            self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
        else:  # during test time, only load Gs
            self.model_names = ['G_A', 'G_B']

        # define networks (both Generators and discriminators)
        # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
        # input file shape (batch, length, dims-256)
        self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
                                        not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
        self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
                                        not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:  # define discriminators
            self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
            self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:
            if opt.lambda_identity > 0.0:  # only works when input and output images have the same number of channels
                assert(opt.input_nc == opt.output_nc)
            self.fake_A_pool = AudioPool(opt.pool_size)  # create image buffer to store previously generated images
            self.fake_B_pool = AudioPool(opt.pool_size)  # create image buffer to store previously generated images
            # define loss functions
            self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)  # define GAN loss.
            self.criterionCycle = nn.L1Loss()
            self.criterionIdt = nn.L1Loss()
            # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
            self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
Пример #5
0
    def __init__(self, opt):
        """ 
        Parameters:
        """
        super(CycleGANModel, self).__init__()
        
        # Stuff
        ensure_existance_paths(opt) # Create necessary directories for saving stuff
        
        self.opt = opt
        self.gpu_ids = opt.gpu_ids
        self.device = torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids and torch.cuda.is_available() else torch.device('cpu')
        self.lambda_idt = 0.5
        self.loss_lambda = 10.0
        self.visual_names = ['real_A', 'fake_B', 'rec_A', 'idt_B', 'real_B', 'fake_A', 'rec_B', 'idt_A']
        self.name = self.opt.name
        
        # Define models
        self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
        self.netG_A = networks.to_device(networks.ResNetGenerator(opt.n_input, opt.n_output, opt.forward_mask),self.gpu_ids)
        self.netG_B = networks.to_device(networks.ResNetGenerator(opt.n_input, opt.n_output, opt.forward_mask),self.gpu_ids)
        self.netD_A = networks.to_device(networks.NLayerDiscriminator(opt.n_output),self.gpu_ids)
        self.netD_B = networks.to_device(networks.NLayerDiscriminator(opt.n_output),self.gpu_ids)
        
        # Define losses
        self.criterionGAN = networks.GANLoss().to(self.device)
        self.criterionCycle = nn.L1Loss().to(self.device)
        self.criterionIdt = nn.L1Loss().to(self.device)
        self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
        
        # Define optimizers
        self.optimizers = []
        self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizers.append(self.optimizer_G)
        self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizers.append(self.optimizer_D)
        
        # Define schedulers
        self.schedulers = [networks.get_optimizer_scheduler(optimizer,opt) for optimizer in self.optimizers]

        self.set_train()

        self.step = 0

        wandb.init(
            entity="star-witchers",
            project="cycleGAN",
            config=self.opt,
            name=self.name,
            ) 
Пример #6
0
    def initialize(self, opt, writer=None):
        BaseModel.initialize(self, opt)
        self.writer = writer
        self.num_step = 0
        self.opt = opt
        if self.opt.use_lbp_network:
            self.model_names = ['G', 'LBP', 'D', 'D2']
        else:
            self.model_names = ['G', 'D']

        self.netG = networks.define_G(self.opt)
        if self.opt.use_lbp_network:
            self.netLBP = networks.define_LBP(self.opt)
        self.netD = networks.define_D(
            opt.input_nc, opt.ndf, self.opt.device)  # Discriminator for netG
        if self.opt.use_lbp_network:
            self.netD2 = networks.define_D(
                opt.input_nc - 2, opt.ndf,
                self.opt.device)  # Discriminator for netLBP

        self.vgg16_extractor = util.VGG16FeatureExtractor().to(self.opt.device)

        self.criterionGAN = networks.GANLoss(gan_type=opt.gan_type).to(
            self.opt.device)
        self.criterionL1 = torch.nn.L1Loss()
        self.criterionL2 = torch.nn.MSELoss()
        self.criterionL1_mask = networks.Discounted_L1(opt).to(self.opt.device)

        self.criterionL2_style_loss = torch.nn.MSELoss()
        self.criterionL2_perceptual_loss = torch.nn.MSELoss()

        self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                            lr=opt.lr,
                                            betas=(0.5, 0.999))
        if self.opt.use_lbp_network:
            self.optimizer_LBP = torch.optim.Adam(self.netLBP.parameters(),
                                                  lr=opt.lr,
                                                  betas=(0.5, 0.999))
        self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=opt.lr,
                                            betas=(0.5, 0.999))

        if self.opt.use_lbp_network:
            self.optimizer_D2 = torch.optim.Adam(self.netD2.parameters(),
                                                 lr=opt.lr,
                                                 betas=(0.5, 0.999))

        _, self.rand_t, self.rand_l = util.create_rand_mask(self.opt)
Пример #7
0
    def initialize(self, opt):
        self.opt = opt
        self.init_architecture(opt)

        if opt.use_gpu:
            self.Tensor = torch.cuda.FloatTensor
        else:
            self.Tensor = torch.FloatTensor

        self.critGAN = networks.GANLoss(mse_loss=True, tensor=self.Tensor)
        self.critL1 = torch.nn.L1Loss()

        if self.opt.isTrain:
            self.schedulers = []
            for optimizer in self.optimizers:
                self.schedulers.append(get_scheduler(optimizer, opt))
Пример #8
0
    def initialize(self, opt, train_mode=True):
        # Model transforms from A --> B and uses Adv as the
        # adversarial example.
        BaseModel.initialize(self, opt)
        self.train_mode = train_mode
        # define tensors
        self.input_B = self.Tensor(opt['batchSize'], opt['input_nc'],
                                   opt['B_height'], opt['B_width'])

        self.input_A = self.Tensor(opt['batchSize'], opt['output_nc'],
                                   opt['A_height'], opt['A_width'])

        # load/define networks
        self.netG = networks.define_G(opt['input_nc'], opt['output_nc'],
                                      opt['ngf'], opt['norm'], self.gpu_ids)

        if self.train_mode:
            use_sigmoid = opt['no_lsgan']
            self.netD = networks.define_D(opt['input_nc'] + opt['output_nc'],
                                          opt['ndf'], opt['which_model_netD'],
                                          opt['n_layers_D'], use_sigmoid,
                                          self.gpu_ids)

        if self.train_mode:
            # self.fake_AB_pool = ImagePool(opt['pool_size'])
            self.old_lr = opt['lr']
            # define loss functions
            self.criterionGAN = networks.GANLoss(use_lsgan=not opt['no_lsgan'],
                                                 tensor=self.Tensor)
            self.content_loss = torch.nn.MSELoss()

            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                                lr=opt['lr'],
                                                betas=(opt['beta1'], 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt['lr'],
                                                betas=(opt['beta1'], 0.999))

            print('---------- Networks initialized -------------')
            networks.print_network(self.netG)
            networks.print_network(self.netD)
            print('-----------------------------------------------')
Пример #9
0
    def __init__(self, opt):
        self.opt = opt
        self.device = "cuda"
        self.netG_A = networks.ResnetGenerator(n_blocks=opt.n_blocks).to(
            self.device)
        self.netG_B = networks.ResnetGenerator(n_blocks=opt.n_blocks).to(
            self.device)

        self.netD_A = networks.NLayerDiscriminator().to(self.device)
        self.netD_B = networks.NLayerDiscriminator().to(self.device)

        self.fake_A_pool = ImagePool(opt.pool_size)
        self.fake_B_pool = ImagePool(opt.pool_size)

        self.criterionGAN = networks.GANLoss("lsgan").to(self.device)
        self.criterionCycle = flow.nn.L1Loss()
        self.criterionIdt = flow.nn.L1Loss()

        self.optimizer_G = flow.optim.Adam(
            itertools.chain(self.netG_A.parameters(),
                            self.netG_B.parameters()),
            lr=opt.lr,
            betas=(opt.beta1, opt.beta2),
        )
        self.optimizer_D = flow.optim.Adam(
            itertools.chain(self.netD_A.parameters(),
                            self.netD_B.parameters()),
            lr=opt.lr,
            betas=(opt.beta1, opt.beta2),
        )
        self.optimizers = [self.optimizer_G, self.optimizer_D]
        self.schedulers = [
            flow.optim.lr_scheduler.CosineDecayLR(optimizer,
                                                  decay_steps=100,
                                                  alpha=0.0)
            for optimizer in self.optimizers
        ]
Пример #10
0
    def initialize(self, opt):
        super(DesignerGAN, self).initialize(opt)
        ###################################
        # define data tensors
        ###################################
        # self.input['img'] = self.Tensor()
        # self.input['img_attr'] = self.Tensor()
        # self.input['lm_map'] = self.Tensor()
        # self.input['seg_mask'] = self.Tensor()
        # self.input['attr_label'] = self.Tensor()
        # self.input['id'] = []

        ###################################
        # load/define networks
        ###################################

        # Todo modify networks.define_G
        # 1. add specified generator networks

        self.netG = networks.define_G(opt)
        self.netAE, self.opt_AE = network_loader.load_attribute_encoder_net(
            id=opt.which_model_AE, gpu_ids=opt.gpu_ids)
        if opt.which_model_FeatST != 'none':
            self.netFeatST, self.opt_FeatST = network_loader.load_feature_spatial_transformer_net(
                id=opt.which_model_FeatST, gpu_ids=opt.gpu_ids)
            self.use_FeatST = True
            # assert self.opt_FeatST.shape_encode == self.opt.shape_encode, 'GAN model and FeatST model has different shape encode mode'
            # assert self.opt_FeatST.input_mask_mode == self.opt.input_mask_mode, 'GAN model and FeatST model has different segmentation input mode'
        else:
            self.use_FeatST = False

        if self.is_train:
            self.netD = networks.define_D(opt)
            if opt.which_model_init_netG != 'none' and not opt.continue_train:
                self.load_network(self.netG, 'G', 'latest',
                                  opt.which_model_init_netG)

        if not self.is_train or opt.continue_train:
            self.load_network(self.netG, 'G', opt.which_epoch)
            if self.is_train:
                self.load_network(self.netD, 'D', opt.which_epoch)

        if self.is_train:
            self.fake_pool = ImagePool(opt.pool_size)

            ###################################
            # define loss functions and loss buffers
            ###################################
            if opt.which_gan in {'dcgan', 'lsgan'}:
                self.crit_GAN = networks.GANLoss(
                    use_lsgan=opt.which_gan == 'lsgan', tensor=self.Tensor)
            else:
                # WGAN loss will be calculated in self.backward_D_wgangp and self.backward_G
                self.crit_GAN = None
            self.crit_L1 = nn.L1Loss()
            self.crit_attr = nn.BCELoss()

            self.loss_functions = []
            self.loss_functions.append(self.crit_GAN)
            self.loss_functions.append(self.crit_L1)
            self.loss_functions.append(self.crit_attr)

            if self.opt.loss_weight_vgg > 0:
                self.crit_vgg = networks.VGGLoss(self.gpu_ids)
                self.loss_functions.append(self.crit_vgg)

            ###################################
            # create optimizers
            ###################################
            self.schedulers = []
            self.optimizers = []

            self.optim_G = torch.optim.Adam(self.netG.parameters(),
                                            lr=opt.lr,
                                            betas=(opt.beta1, opt.beta2))
            self.optim_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=opt.lr,
                                            betas=(opt.beta1, opt.beta2))
            self.optimizers.append(self.optim_G)
            self.optimizers.append(self.optim_D)

            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))

        # color transformation from std to imagenet
        # img_imagenet = img_std * a + b
        self.trans_std_to_imagenet = {
            'a':
            Variable(self.Tensor([0.5 / 0.229, 0.5 / 0.224, 0.5 / 0.225]),
                     requires_grad=False).view(3, 1, 1),
            'b':
            Variable(self.Tensor([(0.5 - 0.485) / 0.229, (0.5 - 0.456) / 0.224,
                                  (0.5 - 0.406) / 0.225]),
                     requires_grad=False).view(3, 1, 1)
        }
Пример #11
0
    def initialize(self, opt):
        super(VUnetPoseTransferModel, self).initialize(opt)
        ###################################
        # define transformer
        ###################################
        self.netT = networks.VariationalUnet(
            input_nc_dec = self.get_pose_dim(opt.pose_type),
            input_nc_enc = self.get_appearance_dim(opt.appearance_type),
            output_nc = self.get_output_dim(opt.output_type),
            nf = opt.vunet_nf,
            max_nf = opt.vunet_max_nf,
            input_size = opt.fine_size,
            n_latent_scales = opt.vunet_n_latent_scales,
            bottleneck_factor = opt.vunet_bottleneck_factor,
            box_factor = opt.vunet_box_factor,
            n_residual_blocks = 2,
            norm_layer = networks.get_norm_layer(opt.norm),
            activation = nn.ReLU(False),
            use_dropout = False,
            gpu_ids = opt.gpu_ids,
            output_tanh = False,
            )
        if opt.gpu_ids:
            self.netT.cuda()
        networks.init_weights(self.netT, init_type=opt.init_type)
        ###################################
        # define discriminator
        ###################################
        self.use_GAN = self.is_train and opt.loss_weight_gan > 0
        if self.use_GAN:
            self.netD = networks.define_D_from_params(
                input_nc=3+self.get_pose_dim(opt.pose_type) if opt.D_cond else 3,
                ndf=opt.D_nf,
                which_model_netD='n_layers',
                n_layers_D=opt.D_n_layer,
                norm=opt.norm,
                which_gan=opt.which_gan,
                init_type=opt.init_type,
                gpu_ids=opt.gpu_ids)
        else:
            self.netD = None
        ###################################
        # loss functions
        ###################################
        self.crit_psnr = networks.PSNR()
        self.crit_ssim = networks.SSIM()

        if self.is_train:
            self.optimizers =[]
            self.crit_vgg = networks.VGGLoss_v2(self.gpu_ids, opt.content_layer_weight, opt.style_layer_weight, opt.shifted_style)
            # self.crit_vgg_old = networks.VGGLoss(self.gpu_ids)
            self.optim = torch.optim.Adam(self.netT.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2), weight_decay=opt.weight_decay)
            self.optimizers += [self.optim]

            if self.use_GAN:
                self.crit_GAN = networks.GANLoss(use_lsgan=opt.which_gan=='lsgan', tensor=self.Tensor)
                self.optim_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr_D, betas=(opt.beta1, opt.beta2))
                self.optimizers.append(self.optim_D)
            # todo: add pose loss
            self.fake_pool = ImagePool(opt.pool_size)

        ###################################
        # load trained model
        ###################################
        if not self.is_train:
            self.load_network(self.netT, 'netT', opt.which_epoch)
        elif opt.continue_train:
            self.load_network(self.netT, 'netT', opt.which_epoch)
            self.load_optim(self.optim, 'optim', opt.which_epoch)
            if self.use_GAN:
                self.load_network(self.netD, 'netD', opt.which_epoch)
                self.load_optim(self.optim_D, 'optim_D', opt.which_epoch)
        ###################################
        # schedulers
        ###################################
        if self.is_train:
            self.schedulers = []
            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))
Пример #12
0
    def initialize(self, cfg):
        self.cfg = cfg

        ## set devices
        if cfg['GPU_IDS']:
            assert (torch.cuda.is_available())
            self.device = torch.device('cuda:{}'.format(cfg['GPU_IDS'][0]))
            torch.backends.cudnn.benchmark = True
            print('Using %d GPUs' % len(cfg['GPU_IDS']))
        else:
            self.device = torch.device('cpu')

        # define network
        if cfg['ARCHI'] == 'alexnet':
            self.netB = networks.netB_alexnet()
            self.netH = networks.netH_alexnet()
            if self.cfg['USE_DA'] and self.cfg['TRAIN']:
                self.netD = networks.netD_alexnet(self.cfg['DA_LAYER'])
        elif cfg['ARCHI'] == 'vgg16':
            raise NotImplementedError
            self.netB = networks.netB_vgg16()
            self.netH = networks.netH_vgg16()
            if self.cfg['USE_DA'] and self.cfg['TRAIN']:
                self.netD = netD_vgg16(self.cfg['DA_LAYER'])
        elif 'resnet' in cfg['ARCHI']:
            self.netB = networks.netB_resnet34()
            self.netH = networks.netH_resnet34()
            if self.cfg['USE_DA'] and self.cfg['TRAIN']:
                self.netD = networks.netD_resnet(self.cfg['DA_LAYER'])
        else:
            raise ValueError('Un-supported network')

        ## initialize network param.
        self.netB = networks.init_net(self.netB, cfg['GPU_IDS'], 'xavier')
        self.netH = networks.init_net(self.netH, cfg['GPU_IDS'], 'xavier')

        if self.cfg['USE_DA'] and self.cfg['TRAIN']:
            self.netD = networks.init_net(self.netD, cfg['GPU_IDS'], 'xavier')

        # loss, optimizer, and scherduler
        if cfg['TRAIN']:
            self.total_steps = 0
            ## Output path
            self.save_dir = os.path.join(
                cfg['OUTPUT_PATH'], cfg['ARCHI'],
                datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
            if not os.path.isdir(self.save_dir):
                os.makedirs(self.save_dir)
            # self.logger = Logger(self.save_dir)

            ## model names
            self.model_names = ['netB', 'netH']
            ## loss
            self.criterionGAN = networks.GANLoss().to(self.device)
            self.criterionDepth1 = torch.nn.MSELoss().to(self.device)
            self.criterionNorm = torch.nn.CosineEmbeddingLoss().to(self.device)
            self.criterionEdge = torch.nn.BCELoss().to(self.device)

            ## optimizers
            self.lr = cfg['LR']
            self.optimizers = []
            self.optimizer_B = torch.optim.Adam(self.netB.parameters(),
                                                lr=cfg['LR'],
                                                betas=(cfg['BETA1'],
                                                       cfg['BETA2']))
            self.optimizer_H = torch.optim.Adam(self.netH.parameters(),
                                                lr=cfg['LR'],
                                                betas=(cfg['BETA1'],
                                                       cfg['BETA2']))
            self.optimizers.append(self.optimizer_B)
            self.optimizers.append(self.optimizer_H)
            if cfg['USE_DA']:
                self.real_pool = ImagePool(cfg['POOL_SIZE'])
                self.syn_pool = ImagePool(cfg['POOL_SIZE'])
                self.model_names.append('netD')
                ## use SGD for discriminator
                self.optimizer_D = torch.optim.SGD(
                    self.netD.parameters(),
                    lr=cfg['LR'],
                    momentum=cfg['MOMENTUM'],
                    weight_decay=cfg['WEIGHT_DECAY'])
                self.optimizers.append(self.optimizer_D)
            ## LR scheduler
            self.schedulers = [
                networks.get_scheduler(optimizer, cfg)
                for optimizer in self.optimizers
            ]
        else:
            ## testing
            self.model_names = ['netB', 'netH']
            self.criterionDepth1 = torch.nn.MSELoss().to(self.device)
            self.criterionNorm = torch.nn.CosineEmbeddingLoss(
                reduction='none').to(self.device)

        self.load_dir = os.path.join(cfg['CKPT_PATH'])
        self.criterionNorm_eval = torch.nn.CosineEmbeddingLoss(
            reduction='none').to(self.device)

        if cfg['TEST'] or cfg['RESUME']:
            self.load_networks(cfg['EPOCH_LOAD'])
Пример #13
0
    def __init__(self, hyperparameters):
        super(myMUNIT_Trainer, self).__init__()
        lr = hyperparameters['lr']
        # Initiate the networks
        self.style_dim = hyperparameters['gen']['style_dim']
        self.enc_a = networks.define_E(input_nc=3,
                                       output_nc=self.style_dim,
                                       ndf=64)  # encoder for domain a
        self.enc_b = networks.define_E(input_nc=3,
                                       output_nc=self.style_dim,
                                       ndf=64)  # encoder for domain b
        self.gen_a = networks.define_G(input_nc=3,
                                       output_nc=3,
                                       nz=self.style_dim,
                                       ngf=64)  # generator for domain a
        self.gen_b = networks.define_G(input_nc=3,
                                       output_nc=3,
                                       nz=self.style_dim,
                                       ngf=64)  # generator for domain b
        self.dis_a = networks.define_D(input_nc=3,
                                       ndf=64,
                                       norm='instance',
                                       num_Ds=2)  # discriminator for domain a
        self.dis_b = networks.define_D(input_nc=3,
                                       ndf=64,
                                       norm='instance',
                                       num_Ds=2)  # discriminator for domain b
        self.netVGGF = networks.define_VGGF()
        self.instancenorm = nn.InstanceNorm2d(512, affine=False)

        # Initiate the criterions or loss functions
        self.criterionGAN = networks.GANLoss(
            mse_loss=True,
            tensor=torch.cuda.FloatTensor)  # criterion GAN adversarial loss
        self.wGANloss = networks.wGANLoss(
            tensor=torch.cuda.FloatTensor)  # wGAN adversarial loss
        self.criterionL1 = torch.nn.L1Loss()  # L1 loss
        self.criterionL2 = networks.L2Loss()  # L2 loss
        self.criterionZ = torch.nn.L1Loss()  # L1 loss between code
        self.criterionC = networks.ContentLoss(
            vgg_features=self.netVGGF)  # content loss
        self.criterionS = networks.StyleLoss(
            vgg_features=self.netVGGF)  # style loss
        self.criterionC_l = networks.ContentLoss(
            vgg_features=self.netVGGF)  # local content loss
        self.criterionS_l = networks.StyleLoss(
            vgg_features=self.netVGGF)  # local style loss
        self.criterionHisogram = networks.HistogramLoss(
            vgg_features=self.netVGGF)  # histogram loss
        self.Feature_map_im = networks.Feature_map_im(
            vgg_features=self.netVGGF)  # show feature map

        # fix the noise used in sampling
        self.s_a = torch.randn(8, self.style_dim, 1, 1).cuda()
        self.s_b = torch.randn(8, self.style_dim, 1, 1).cuda()

        # Setup the optimizers
        beta1 = hyperparameters['beta1']
        beta2 = hyperparameters['beta2']
        dis_params = list(self.dis_a.parameters()) + list(
            self.dis_b.parameters())
        gen_params = list(self.gen_a.parameters()) + list(
            self.gen_b.parameters())
        self.dis_opt = torch.optim.Adam(
            [p for p in dis_params if p.requires_grad],
            lr=lr,
            betas=(beta1, beta2),
            weight_decay=hyperparameters['weight_decay'])
        self.gen_opt = torch.optim.Adam(
            [p for p in gen_params if p.requires_grad],
            lr=lr,
            betas=(beta1, beta2),
            weight_decay=hyperparameters['weight_decay'])
        self.dis_scheduler = get_scheduler(self.dis_opt, hyperparameters)
        self.gen_scheduler = get_scheduler(self.gen_opt, hyperparameters)

        # Load VGG model if needed
        if 'vgg_w' in hyperparameters.keys() and hyperparameters['vgg_w'] > 0:
            self.vgg = load_vgg16(hyperparameters['vgg_model_path'] +
                                  '/models')
            self.vgg.eval()
            for param in self.vgg.parameters():
                param.requires_grad = False
Пример #14
0
    def __init__(self, opt):
        """Initialize the CycleGAN class.

                Parameters:
                    opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
                """

        self.opt = opt
        self.isTrain = opt.isTrain
        self.device = opt.device
        self.model_save_dir = opt.model_dir
        self.loss_names = []
        self.model_names = []
        self.optimizers = []
        self.image_paths = []

        self.epoch = 0
        self.num_epochs = opt.nr_epochs

        # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
        self.loss_names = ['D_A', 'G_A', 'cycle_A', 'D_B', 'G_B', 'cycle_B']
        # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>

        if self.isTrain:
            self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
        else:  # during test time, only load Gs
            self.model_names = ['G_A', 'G_B']

        # define networks (both Generators and discriminators)
        # The naming is different from those used in the paper.
        # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
        self.netG_A = networks.Generator(opt.input_nc, opt.output_nc, opt.ngf,
                                         opt.n_layers_G).to(self.device)
        self.netG_B = networks.Generator(opt.output_nc, opt.input_nc, opt.ngf,
                                         opt.n_layers_G).to(self.device)

        if self.isTrain:  # define discriminators
            self.netD_A = networks.NLayerDiscriminator(
                opt.output_nc, opt.ndf, opt.n_layers_D).to(self.device)
            self.netD_B = networks.NLayerDiscriminator(
                opt.input_nc, opt.ndf, opt.n_layers_D).to(self.device)
            # self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
            #                                 opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
            # self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
            #                                 opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:
            self.fake_A_pool = networks.MotionPool(
                opt.pool_size
            )  # create image buffer to store previously generated images
            self.fake_B_pool = networks.MotionPool(
                opt.pool_size
            )  # create image buffer to store previously generated images
            # define loss functions
            self.criterionGAN = networks.GANLoss(opt.gan_mode).to(
                self.device)  # define GAN loss.
            self.criterionCycle = torch.nn.L1Loss()
            # self.criterionIdt = torch.nn.L1Loss()
            # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
            self.optimizer_G = torch.optim.Adam(itertools.chain(
                self.netG_A.parameters(), self.netG_B.parameters()),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(itertools.chain(
                self.netD_A.parameters(), self.netD_B.parameters()),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)

            self.schedulers = [
                networks.get_scheduler(optimizer, opt)
                for optimizer in self.optimizers
            ]
Пример #15
0
device = torch.device("cuda:3")

# Shouldn't be patchGAN, add fc layer at end
#model = networks.define_D(6, 64, 'n_layers', n_layers_D=3, use_sigmoid=False, out_channels=256, glob=True)
model = networks.define_D(6, 64, 'n_layers', n_layers_D=3, use_sigmoid=False)
#model = networks.Siamese()
#model = networks.GlobalLocal()
#model = torch.nn.DataParallel(model, device_ids=(0,1,3,4))
#model = networks.SiameseResnet()
chkpt = torch.load('checkpoints/patch/179.pth')
#chkpt = torch.load('checkpoints/localGlobal/190.pth')
#chkpt = torch.load('checkpoints/SiameseResnet/17.pth')
model.load_state_dict(chkpt['state_dict'])
model.to(device)

patch_loss = networks.GANLoss(use_lsgan=False).to(device)

total_steps = 0

dataset = FrankensteinDataset()
#dataset.initialize('../datasets/street_view/sides/')
#dataset.initialize('../../../data/semanticLandscapes512/train_img')
dataset.initialize('../../../data/MITCVCL/imgs')

train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)

total = 0
correct = 0
real = []
fake = []
fake_pred = []
Пример #16
0
    def __init__(self, conf):
        # Acquire configuration
        self.conf = conf
        self.cur_iter = 0
        self.max_iters = conf.max_iters

        # Define input tensor
        self.input_tensor = torch.FloatTensor(1, 3, conf.input_crop_size,
                                              conf.input_crop_size).cuda()
        self.real_example = torch.FloatTensor(1, 3, conf.output_crop_size,
                                              conf.output_crop_size).cuda()

        # Define networks
        # self.G = networks.Generator(conf.G_base_channels, conf.G_num_resblocks, conf.G_num_downscales, conf.G_use_bias,
        #                            conf.G_skip)
        self.G = Generator(32, 32)
        self.D = networks.MultiScaleDiscriminator(conf.output_crop_size,
                                                  self.conf.D_max_num_scales,
                                                  self.conf.D_scale_factor,
                                                  self.conf.D_base_channels)
        self.GAN_loss_layer = networks.GANLoss()
        self.Reconstruct_loss = networks.WeightedMSELoss(use_L1=conf.use_L1)
        self.RandCrop = networks.RandomCrop(
            [conf.input_crop_size, conf.input_crop_size],
            must_divide=conf.must_divide)
        self.SwapCrops = networks.SwapCrops(conf.crop_swap_min_size,
                                            conf.crop_swap_max_size)

        # Make all networks run on GPU
        self.G.cuda()
        self.D.cuda()
        self.GAN_loss_layer.cuda()
        self.Reconstruct_loss.cuda()
        self.RandCrop.cuda()
        self.SwapCrops.cuda()

        # Define loss function
        self.criterionGAN = self.GAN_loss_layer.forward
        self.criterionReconstruction = self.Reconstruct_loss.forward

        # Keeping track of losses- prepare tensors
        self.losses_G_gan = torch.FloatTensor(conf.print_freq).cuda()
        self.losses_D_real = torch.FloatTensor(conf.print_freq).cuda()
        self.losses_D_fake = torch.FloatTensor(conf.print_freq).cuda()
        self.losses_G_reconstruct = torch.FloatTensor(conf.print_freq).cuda()
        if self.conf.reconstruct_loss_stop_iter > 0:
            self.losses_D_reconstruct = torch.FloatTensor(
                conf.print_freq).cuda()

        # Initialize networks
        self.G.apply(networks.weights_init)
        self.D.apply(networks.weights_init)

        # Initialize optimizers
        if self.conf.gopt_sgd:
            g_opt = torch.optim.SGD(self.G.parameters(), lr=conf.g_lr)
        else:
            g_opt = torch.optim.Adam(self.G.parameters(),
                                     lr=conf.g_lr,
                                     betas=(conf.beta1, 0.999))
        self.optimizer_G = g_opt
        self.optimizer_D = torch.optim.Adam(self.D.parameters(),
                                            lr=conf.d_lr,
                                            betas=(conf.beta1, 0.999))

        # Learning rate scheduler
        # First define linearly decaying functions (decay starts at a special iter)
        start_decay = conf.lr_start_decay_iter
        end_decay = conf.max_iters
        # def lr_function(n_iter):
        #     return 1 - max(0, 1.0 * (n_iter - start_decay) / (conf.max_iters - start_decay))
        lr_function = LRPolicy(start_decay, end_decay)
        # Define learning rate schedulers
        self.lr_scheduler_G = torch.optim.lr_scheduler.LambdaLR(
            self.optimizer_G, lr_function)
        self.lr_scheduler_D = torch.optim.lr_scheduler.LambdaLR(
            self.optimizer_D, lr_function)
    def initialize(self, opt):
        super(TwoStagePoseTransferModel, self).initialize(opt)
        ###################################
        # load pretrained stage-1 (coarse) network
        ###################################
        self._create_stage_1_net(opt)
        ###################################
        # define stage-2 (refine) network
        ###################################
        # local patch encoder
        if opt.which_model_s2e == 'patch_embed':
            self.netT_s2e = networks.LocalPatchEncoder(
                n_patch=len(opt.patch_indices),
                input_nc=3,
                output_nc=opt.s2e_nof,
                nf=opt.s2e_nf,
                max_nf=opt.s2e_max_nf,
                input_size=opt.patch_size,
                bottleneck_factor=opt.s2e_bottleneck_factor,
                n_residual_blocks=2,
                norm_layer=networks.get_norm_layer(opt.norm),
                activation=nn.ReLU(False),
                use_dropout=False,
                gpu_ids=opt.gpu_ids,
            )
            s2e_nof = opt.s2e_nof
        elif opt.which_model_s2e == 'patch':
            self.netT_s2e = networks.LocalPatchRearranger(
                n_patch=len(opt.patch_indices),
                image_size=opt.fine_size,
            )
            s2e_nof = 3
        elif opt.which_model_s2e == 'seg_embed':
            self.netT_s2e = networks.SegmentRegionEncoder(
                seg_nc=self.opt.seg_nc,
                input_nc=3,
                output_nc=opt.s2e_nof,
                nf=opt.s2d_nf,
                input_size=opt.fine_size,
                n_blocks=3,
                norm_layer=networks.get_norm_layer(opt.norm),
                activation=nn.ReLU,
                use_dropout=False,
                grid_level=opt.s2e_grid_level,
                gpu_ids=opt.gpu_ids,
            )
            s2e_nof = opt.s2e_nof + opt.s2e_grid_level
        else:
            raise NotImplementedError()
        if opt.gpu_ids:
            self.netT_s2e.cuda()

        # decoder
        if self.opt.which_model_s2d == 'resnet':
            self.netT_s2d = networks.ResnetGenerator(
                input_nc=3 + s2e_nof,
                output_nc=3,
                ngf=opt.s2d_nf,
                norm_layer=networks.get_norm_layer(opt.norm),
                activation=nn.ReLU,
                use_dropout=False,
                n_blocks=opt.s2d_nblocks,
                gpu_ids=opt.gpu_ids,
                output_tanh=False,
            )
        elif self.opt.which_model_s2d == 'unet':
            self.netT_s2d = networks.UnetGenerator_v2(
                input_nc=3 + s2e_nof,
                output_nc=3,
                num_downs=8,
                ngf=opt.s2d_nf,
                max_nf=opt.s2d_nf * 2**3,
                norm_layer=networks.get_norm_layer(opt.norm),
                use_dropout=False,
                gpu_ids=opt.gpu_ids,
                output_tanh=False,
            )
        elif self.opt.which_model_s2d == 'rpresnet':
            self.netT_s2d = networks.RegionPropagationResnetGenerator(
                input_nc=3 + s2e_nof,
                output_nc=3,
                ngf=opt.s2d_nf,
                norm_layer=networks.get_norm_layer(opt.norm),
                activation=nn.ReLU,
                use_dropout=False,
                nblocks=opt.s2d_nblocks,
                gpu_ids=opt.gpu_ids,
                output_tanh=False)
        else:
            raise NotImplementedError()
        if opt.gpu_ids:
            self.netT_s2d.cuda()
        ###################################
        # define discriminator
        ###################################
        self.use_GAN = self.is_train and opt.loss_weight_gan > 0
        if self.use_GAN:
            self.netD = networks.define_D_from_params(
                input_nc=3 +
                self.get_pose_dim(opt.pose_type) if opt.D_cond else 3,
                ndf=opt.D_nf,
                which_model_netD='n_layers',
                n_layers_D=opt.D_n_layer,
                norm=opt.norm,
                which_gan=opt.which_gan,
                init_type=opt.init_type,
                gpu_ids=opt.gpu_ids)
        else:
            self.netD = None
        ###################################
        # loss functions
        ###################################
        self.crit_psnr = networks.PSNR()
        self.crit_ssim = networks.SSIM()

        if self.is_train:
            self.optimizers = []
            self.crit_vgg = networks.VGGLoss_v2(self.gpu_ids,
                                                opt.content_layer_weight,
                                                opt.style_layer_weight,
                                                opt.shifted_style)

            self.optim = torch.optim.Adam([{
                'params': self.netT_s2e.parameters()
            }, {
                'params': self.netT_s2d.parameters()
            }],
                                          lr=opt.lr,
                                          betas=(opt.beta1, opt.beta2))
            self.optimizers.append(self.optim)

            if opt.train_s1:
                self.optim_s1 = torch.optim.Adam(self.netT_s1.parameters(),
                                                 lr=opt.lr_s1,
                                                 betas=(opt.beta1, opt.beta2))
                self.optimizers.append(self.optim_s1)

            if self.use_GAN:
                self.crit_GAN = networks.GANLoss(
                    use_lsgan=opt.which_gan == 'lsgan', tensor=self.Tensor)
                self.optim_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt.lr_D,
                                                betas=(opt.beta1, opt.beta2))
                self.optimizers.append(self.optim_D)
                self.fake_pool = ImagePool(opt.pool_size)
        ###################################
        # init/load model
        ###################################
        if self.is_train:
            if not opt.continue_train:
                self.load_network(self.netT_s1, 'netT', 'latest',
                                  self.opt_s1.id)
                networks.init_weights(self.netT_s2e, init_type=opt.init_type)
                networks.init_weights(self.netT_s2d, init_type=opt.init_type)
                if self.use_GAN:
                    networks.init_weights(self.netD, init_type=opt.init_type)
            else:
                self.load_network(self.netT_s1, 'netT_s1', opt.which_epoch)
                self.load_network(self.netT_s2e, 'netT_s2e', opt.which_epoch)
                self.load_network(self.netT_s2d, 'netT_s2d', opt.which_epoch)
                self.load_optim(self.optim, 'optim', opt.which_epoch)
                if self.use_GAN:
                    self.load_network(self.netD, 'netD', opt.which_epoch)
                    self.load_optim(self.optim_D, 'optim_D', opt.which_epoch)
        else:
            self.load_network(self.netT_s1, 'netT_s1', opt.which_epoch)
            self.load_network(self.netT_s2e, 'netT_s2e', opt.which_epoch)
            self.load_network(self.netT_s2d, 'netT_s2d', opt.which_epoch)
        ###################################
        # schedulers
        ###################################
        if self.is_train:
            self.schedulers = []
            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))
    def initialize(self, opt):
        super(SupervisedPoseTransferModel, self).initialize(opt)
        ###################################
        # define transformer
        ###################################
        if opt.which_model_T == 'resnet':
            self.netT = networks.ResnetGenerator(
                input_nc=3 + self.get_pose_dim(opt.pose_type),
                output_nc=3,
                ngf=opt.T_nf,
                norm_layer=networks.get_norm_layer(opt.norm),
                use_dropout=not opt.no_dropout,
                n_blocks=9,
                gpu_ids=opt.gpu_ids)
        elif opt.which_model_T == 'unet':
            self.netT = networks.UnetGenerator_v2(
                input_nc=3 + self.get_pose_dim(opt.pose_type),
                output_nc=3,
                num_downs=8,
                ngf=opt.T_nf,
                norm_layer=networks.get_norm_layer(opt.norm),
                use_dropout=not opt.no_dropout,
                gpu_ids=opt.gpu_ids)
        else:
            raise NotImplementedError()

        if opt.gpu_ids:
            self.netT.cuda()
        networks.init_weights(self.netT, init_type=opt.init_type)
        ###################################
        # define discriminator
        ###################################
        self.use_GAN = self.is_train and opt.loss_weight_gan > 0
        if self.use_GAN > 0:
            self.netD = networks.define_D_from_params(
                input_nc=3 +
                self.get_pose_dim(opt.pose_type) if opt.D_cond else 3,
                ndf=opt.D_nf,
                which_model_netD='n_layers',
                n_layers_D=3,
                norm=opt.norm,
                which_gan=opt.which_gan,
                init_type=opt.init_type,
                gpu_ids=opt.gpu_ids)
        else:
            self.netD = None
        ###################################
        # loss functions
        ###################################
        if self.is_train:
            self.loss_functions = []
            self.schedulers = []
            self.optimizers = []

            self.crit_L1 = nn.L1Loss()
            self.crit_vgg = networks.VGGLoss_v2(self.gpu_ids)
            # self.crit_vgg_old = networks.VGGLoss(self.gpu_ids)
            self.crit_psnr = networks.PSNR()
            self.crit_ssim = networks.SSIM()
            self.loss_functions += [self.crit_L1, self.crit_vgg]
            self.optim = torch.optim.Adam(self.netT.parameters(),
                                          lr=opt.lr,
                                          betas=(opt.beta1, opt.beta2))
            self.optimizers += [self.optim]

            if self.use_GAN:
                self.crit_GAN = networks.GANLoss(
                    use_lsgan=opt.which_gan == 'lsgan', tensor=self.Tensor)
                self.optim_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt.lr_D,
                                                betas=(opt.beta1, opt.beta2))
                self.loss_functions.append(self.use_GAN)
                self.optimizers.append(self.optim_D)
            # todo: add pose loss
            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))

            self.fake_pool = ImagePool(opt.pool_size)

        ###################################
        # load trained model
        ###################################
        if not self.is_train:
            self.load_network(self.netT, 'netT', opt.which_model)
Пример #19
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)
        if opt.resize_or_crop != 'none' or not opt.isTrain:  # when training at full res this causes OOM
            torch.backends.cudnn.benchmark = True
        self.isTrain = opt.isTrain
        input_nc = opt.label_nc if opt.label_nc != 0 else opt.input_nc

        ##### define networks
        # Generator network
        netG_input_nc = input_nc
        # Main Generator
        self.netG = networks.define_G(11,
                                      opt.output_nc,
                                      opt.ngf,
                                      opt.netG,
                                      opt.n_downsample_global,
                                      opt.n_blocks_global,
                                      opt.n_local_enhancers,
                                      opt.n_blocks_local,
                                      opt.norm,
                                      gpu_ids=self.gpu_ids)

        self.netP = networks.define_P(44,
                                      20,
                                      opt.ngf,
                                      opt.netG,
                                      opt.n_downsample_global,
                                      opt.n_blocks_global,
                                      opt.n_local_enhancers,
                                      opt.n_blocks_local,
                                      opt.norm,
                                      gpu_ids=self.gpu_ids)
        self.netP.load_state_dict(
            torch.load(
                os.path.dirname(os.path.realpath(__file__)) +
                "/checkpoints/generate/parse.pth"))

        # Discriminator network
        if self.isTrain:
            use_sigmoid = opt.no_lsgan
            netD_input_nc = input_nc + opt.output_nc
            netB_input_nc = opt.output_nc * 2
            self.netD = networks.define_D(netD_input_nc,
                                          opt.ndf,
                                          opt.n_layers_D,
                                          opt.norm,
                                          use_sigmoid,
                                          opt.num_D,
                                          not opt.no_ganFeat_loss,
                                          gpu_ids=self.gpu_ids)
            #self.netB = networks.define_B(netB_input_nc, opt.output_nc, 32, 3, 3, opt.norm, gpu_ids=self.gpu_ids)

        if self.opt.verbose:
            print('---------- Networks initialized -------------')

        # load networks
        if not self.isTrain or opt.continue_train or opt.load_pretrain:
            pretrained_path = '' if not self.isTrain else opt.load_pretrain
            self.load_network(self.netG, 'G', opt.which_epoch, pretrained_path)

            if self.isTrain:
                self.load_network(self.netD, 'D', opt.which_epoch,
                                  pretrained_path)

        # set loss functions and optimizers
        if self.isTrain:
            if opt.pool_size > 0 and (len(self.gpu_ids)) > 1:
                raise NotImplementedError(
                    "Fake Pool Not Implemented for MultiGPU")
            self.fake_pool = ImagePool(opt.pool_size)
            self.old_lr = opt.lr

            # define loss functions
            self.loss_filter = self.init_loss_filter(not opt.no_ganFeat_loss,
                                                     not opt.no_vgg_loss)

            self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan,
                                                 tensor=self.Tensor)
            self.criterionFeat = torch.nn.L1Loss()
            if not opt.no_vgg_loss:
                self.criterionVGG = networks.VGGLoss(self.gpu_ids)
            self.criterionStyle = networks.StyleLoss(self.gpu_ids)
            # Names so we can breakout loss
            self.loss_names = self.loss_filter('G_GAN', 'G_GAN_Feat', 'G_VGG',
                                               'D_real', 'D_fake')
            # initialize optimizers
            # optimizer G
            if opt.niter_fix_global > 0:
                import sys
                if sys.version_info >= (3, 0):
                    finetune_list = set()
                else:
                    from sets import Set
                    finetune_list = Set()

                params_dict = dict(self.netG.named_parameters())
                params = []
                for key, value in params_dict.items():
                    if key.startswith('model' + str(opt.n_local_enhancers)):
                        params += [value]
                        finetune_list.add(key.split('.')[0])
                print(
                    '------------- Only training the local enhancer network (for %d epochs) ------------'
                    % opt.niter_fix_global)
                print('The layers that are finetuned are ',
                      sorted(finetune_list))
            else:
                params = list(self.netG.parameters())+list(self.netimage.parameters())+list(self.netcolor.parameters())\
                         +list(self.netlabel.parameters())+list(self.netsketch.parameters())+list(self.classfier.parameters())
                # params.extend(list(self.netimage.parameters()))

            self.optimizer_G = torch.optim.Adam(params,
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))

            # optimizer D
            params = list(self.netD.parameters())
            self.optimizer_D = torch.optim.Adam(params,
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
    def initialize(self, opt):
        super(PoseTransferModel, self).initialize(opt)
        ###################################
        # define generator
        ###################################
        if opt.which_model_G == 'unet':
            self.netG = networks.UnetGenerator(
                input_nc=self.get_tensor_dim('+'.join(
                    [opt.G_appearance_type, opt.G_pose_type])),
                output_nc=3,
                nf=opt.G_nf,
                max_nf=opt.G_max_nf,
                num_scales=opt.G_n_scale,
                n_residual_blocks=2,
                norm=opt.G_norm,
                activation=nn.LeakyReLU(0.1)
                if opt.G_activation == 'leaky_relu' else nn.ReLU(),
                use_dropout=opt.use_dropout,
                gpu_ids=opt.gpu_ids)
        elif opt.which_model_G == 'dual_unet':
            self.netG = networks.DualUnetGenerator(
                pose_nc=self.get_tensor_dim(opt.G_pose_type),
                appearance_nc=self.get_tensor_dim(opt.G_appearance_type),
                output_nc=3,
                aux_output_nc=[],
                nf=opt.G_nf,
                max_nf=opt.G_max_nf,
                num_scales=opt.G_n_scale,
                num_warp_scales=opt.G_n_warp_scale,
                n_residual_blocks=2,
                norm=opt.G_norm,
                vis_mode=opt.G_vis_mode,
                activation=nn.LeakyReLU(0.1)
                if opt.G_activation == 'leaky_relu' else nn.ReLU(),
                use_dropout=opt.use_dropout,
                no_end_norm=opt.G_no_end_norm,
                gpu_ids=opt.gpu_ids,
            )
        if opt.gpu_ids:
            self.netG.cuda()
        networks.init_weights(self.netG, init_type=opt.init_type)
        ###################################
        # define external pixel warper
        ###################################
        if opt.G_pix_warp:
            pix_warp_n_scale = opt.G_n_scale
            self.netPW = networks.UnetGenerator_MultiOutput(
                input_nc=self.get_tensor_dim(opt.G_pix_warp_input_type),
                output_nc=[1],  # only use one output branch (weight mask)
                nf=32,
                max_nf=128,
                num_scales=pix_warp_n_scale,
                n_residual_blocks=2,
                norm=opt.G_norm,
                activation=nn.ReLU(False),
                use_dropout=False,
                gpu_ids=opt.gpu_ids)
            if opt.gpu_ids:
                self.netPW.cuda()
            networks.init_weights(self.netPW, init_type=opt.init_type)
        ###################################
        # define discriminator
        ###################################
        self.use_gan = self.is_train and self.opt.loss_weight_gan > 0
        if self.use_gan:
            self.netD = networks.NLayerDiscriminator(
                input_nc=self.get_tensor_dim(opt.D_input_type_real),
                ndf=opt.D_nf,
                n_layers=opt.D_n_layers,
                use_sigmoid=(opt.gan_type == 'dcgan'),
                output_bias=True,
                gpu_ids=opt.gpu_ids,
            )
            if opt.gpu_ids:
                self.netD.cuda()
            networks.init_weights(self.netD, init_type=opt.init_type)
        ###################################
        # load optical flow model
        ###################################
        if opt.flow_on_the_fly:
            self.netF = load_flow_network(opt.pretrained_flow_id,
                                          opt.pretrained_flow_epoch,
                                          opt.gpu_ids)
            self.netF.eval()
            if opt.gpu_ids:
                self.netF.cuda()
        ###################################
        # loss and optimizers
        ###################################
        self.crit_psnr = networks.PSNR()
        self.crit_ssim = networks.SSIM()

        if self.is_train:
            self.crit_vgg = networks.VGGLoss(
                opt.gpu_ids,
                shifted_style=opt.shifted_style_loss,
                content_weights=opt.vgg_content_weights)
            if opt.G_pix_warp:
                # only optimze netPW
                self.optim = torch.optim.Adam(self.netPW.parameters(),
                                              lr=opt.lr,
                                              betas=(opt.beta1, opt.beta2),
                                              weight_decay=opt.weight_decay)
            else:
                self.optim = torch.optim.Adam(self.netG.parameters(),
                                              lr=opt.lr,
                                              betas=(opt.beta1, opt.beta2),
                                              weight_decay=opt.weight_decay)
            self.optimizers = [self.optim]
            if self.use_gan:
                self.crit_gan = networks.GANLoss(
                    use_lsgan=(opt.gan_type == 'lsgan'))
                if self.gpu_ids:
                    self.crit_gan.cuda()
                self.optim_D = torch.optim.Adam(
                    self.netD.parameters(),
                    lr=opt.lr_D,
                    betas=(opt.beta1, opt.beta2),
                    weight_decay=opt.weight_decay_D)
                self.optimizers += [self.optim_D]

        ###################################
        # load trained model
        ###################################
        if not self.is_train:
            # load trained model for testing
            self.load_network(self.netG, 'netG', opt.which_epoch)
            if opt.G_pix_warp:
                self.load_network(self.netPW, 'netPW', opt.which_epoch)
        elif opt.pretrained_G_id is not None:
            # load pretrained network
            self.load_network(self.netG, 'netG', opt.pretrained_G_epoch,
                              opt.pretrained_G_id)
        elif opt.resume_train:
            # resume training
            self.load_network(self.netG, 'netG', opt.which_epoch)
            self.load_optim(self.optim, 'optim', opt.which_epoch)
            if self.use_gan:
                self.load_network(self.netD, 'netD', opt.which_epoch)
                self.load_optim(self.optim_D, 'optim_D', opt.which_epoch)
            if opt.G_pix_warp:
                self.load_network(self.netPW, 'netPW', opt.which_epoch)
        ###################################
        # schedulers
        ###################################
        if self.is_train:
            self.schedulers = []
            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))
    def initialize(self, opt):
        super(MultimodalDesignerGAN_V2, self).initialize(opt)
        ###################################
        # define networks
        ###################################
        self.modules = {}
        # shape branch
        if opt.which_model_netG != 'unet':
            self.shape_encoder = networks.define_image_encoder(opt, 'shape')
            self.modules['shape_encoder'] = self.shape_encoder
        else:
            self.shape_encoder = None
        # edge branch
        if opt.use_edge:
            self.edge_encoder = networks.define_image_encoder(opt, 'edge')
            self.modules['edge_encoder'] = self.edge_encoder
        else:
            self.encoder_edge = None
        # color branch
        if opt.use_color:
            self.color_encoder = networks.define_image_encoder(opt, 'color')
            self.modules['color_encoder'] = self.color_encoder
        else:
            self.color_encoder = None

        # fusion model
        if opt.ftn_model == 'none':
            # shape_feat, edge_feat and color_feat will be simply upmpled to same size (size of shape_feat) and concatenated
            pass
        elif opt.ftn_model == 'concat':
            assert opt.use_edge or opt.use_color
            if opt.use_edge:
                self.edge_trans_net = networks.define_feature_fusion_network(
                    name='FeatureConcatNetwork',
                    feat_nc=opt.edge_nof,
                    guide_nc=opt.shape_nof,
                    nblocks=opt.ftn_nblocks,
                    norm=opt.norm,
                    gpu_ids=self.gpu_ids,
                    init_type=opt.init_type)
                self.modules['edge_trans_net'] = self.edge_trans_net
            if opt.use_color:
                self.color_trans_net = networks.define_feature_fusion_network(
                    name='FeatureConcatNetwork',
                    feat_nc=opt.color_nof,
                    guide_nc=opt.shape_nof,
                    nblocks=opt.ftn_nblocks,
                    norm=opt.norm,
                    gpu_ids=self.gpu_ids,
                    init_type=opt.init_type)
                self.modules['color_trans_net'] = self.color_trans_net
        elif opt.ftn_model == 'reduce':
            assert opt.use_edge or opt.use_color
            if opt.use_edge:
                self.edge_trans_net = networks.define_feature_fusion_network(
                    name='FeatureReduceNetwork',
                    feat_nc=opt.edge_nof,
                    guide_nc=opt.shape_nof,
                    nblocks=opt.ftn_nblocks,
                    ndowns=opt.ftn_ndowns,
                    norm=opt.norm,
                    gpu_ids=self.gpu_ids,
                    init_type=opt.init_type)
                self.modules['edge_trans_net'] = self.edge_trans_net
            if opt.use_color:
                self.color_trans_net = networks.define_feature_fusion_network(
                    name='FeatureReduceNetwork',
                    feat_nc=opt.color_nof,
                    guide_nc=opt.shape_nof,
                    nblocks=opt.ftn_nblocks,
                    ndowns=opt.ftn_ndowns,
                    norm=opt.norm,
                    gpu_ids=self.gpu_ids,
                    init_type=opt.init_type)
                self.modules['color_trans_net'] = self.color_trans_net

        elif opt.ftn_model == 'trans':
            assert opt.use_edge or opt.use_color
            if opt.use_edge:
                self.edge_trans_net = networks.define_feature_fusion_network(
                    name='FeatureTransformNetwork',
                    feat_nc=opt.edge_nof,
                    guide_nc=opt.shape_nof,
                    nblocks=opt.ftn_nblocks,
                    feat_size=opt.feat_size_lr,
                    norm=opt.norm,
                    gpu_ids=self.gpu_ids,
                    init_type=opt.init_type)
                self.modules['edge_trans_net'] = self.edge_trans_net
            if opt.use_color:
                self.color_trans_net = networks.define_feature_fusion_network(
                    name='FeatureTransformNetwork',
                    feat_nc=opt.color_nof,
                    guide_nc=opt.shape_nof,
                    nblocks=opt.ftn_nblocks,
                    feat_size=opt.feat_size_lr,
                    norm=opt.norm,
                    gpu_ids=self.gpu_ids,
                    init_type=opt.init_type)
                self.modules['color_trans_net'] = self.color_trans_net

        # netG
        self.netG = networks.define_generator(opt)
        self.modules['netG'] = self.netG

        # netD
        if self.is_train:
            self.netD = networks.define_D(opt)
            self.modules['netD'] = self.netD

        ###################################
        # load weights
        ###################################
        if self.is_train:
            if opt.continue_train:
                for label, net in self.modules.iteritems():
                    self.load_network(net, label, opt.which_epoch)
            else:
                if opt.which_model_init != 'none':
                    # load pretrained entire model
                    for label, net in self.modules.iteritems():
                        self.load_network(net,
                                          label,
                                          'latest',
                                          opt.which_model_init,
                                          forced=False)
                else:
                    # load pretrained encoder
                    if opt.which_model_netG != 'unet' and opt.pretrain_shape:
                        self.load_network(self.shape_encoder, 'shape_encoder',
                                          'latest',
                                          opt.which_model_init_shape_encoder)
                    if opt.use_edge and opt.pretrain_edge:
                        self.load_network(self.edge_encoder, 'edge_encoder',
                                          'latest',
                                          opt.which_model_init_edge_encoder)
                    if opt.use_color and opt.pretrain_color:
                        self.load_network(self.color_encoder, 'color_encoder',
                                          'latest',
                                          opt.which_model_init_color_encoder)
        else:
            for label, net in self.modules.iteritems():
                if label != 'netD':
                    self.load_network(net, label, opt.which_epoch)

        ###################################
        # prepare for training
        ###################################
        if self.is_train:
            self.fake_pool = ImagePool(opt.pool_size)
            ###################################
            # define loss functions
            ###################################
            self.loss_functions = []
            if opt.which_gan in {'dcgan', 'lsgan'}:
                self.crit_GAN = networks.GANLoss(
                    use_lsgan=opt.which_gan == 'lsgan', tensor=self.Tensor)
                self.loss_functions.append(self.crit_GAN)
            else:
                # WGAN loss will be calculated in self.backward_D_wgangp and self.backward_G
                self.crit_GAN = None

            self.crit_L1 = nn.L1Loss()
            self.loss_functions.append(self.crit_L1)

            if self.opt.loss_weight_vgg > 0:
                self.crit_vgg = networks.VGGLoss(self.gpu_ids)
                self.loss_functions.append(self.crit_vgg)

            if self.opt.G_output_seg:
                self.crit_CE = nn.CrossEntropyLoss()
                self.loss_functions.append(self.crit_CE)

            self.crit_psnr = networks.SmoothLoss(networks.PSNR())
            self.loss_functions.append(self.crit_psnr)
            ###################################
            # create optimizers
            ###################################
            self.schedulers = []
            self.optimizers = []

            # G optimizer
            G_module_list = [
                'shape_encoder', 'edge_encoder', 'color_encoder', 'netG'
            ]
            G_param_groups = [{
                'params': self.modules[m].parameters()
            } for m in G_module_list if m in self.modules]
            self.optim_G = torch.optim.Adam(G_param_groups,
                                            lr=opt.lr,
                                            betas=(opt.beta1, opt.beta2))
            self.optimizers.append(self.optim_G)
            # D optimizer
            self.optim_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=opt.lr_D,
                                            betas=(opt.beta1, opt.beta2))
            self.optimizers.append(self.optim_D)
            # feature transfer network optimizer
            FTN_module_list = ['edge_trans_net', 'color_trans_net']
            FTN_param_groups = [{
                'params': self.modules[m].parameters()
            } for m in FTN_module_list if m in self.modules]
            if len(FTN_param_groups) > 0:
                self.optim_FTN = torch.optim.Adam(FTN_param_groups,
                                                  lr=opt.lr_FTN,
                                                  betas=(0.9, 0.999))
                self.optimizers.append(self.optim_FTN)
            else:
                self.optim_FTN = None
            # schedulers
            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))
Пример #22
0
    def __init__(self, p):

        super(CycleGAN, self).__init__(p)
        nb = p.batchSize
        size = p.cropSize

        # load/define models
        # The naming conversion is different from those used in the paper
        # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)

        self.netG_A = networks.define_G(p.input_nc, p.output_nc, p.ngf,
                                        p.which_model_netG, p.norm,
                                        not p.no_dropout, p.init_type,
                                        self.gpu_ids)
        self.netG_B = networks.define_G(p.output_nc, p.input_nc, p.ngf,
                                        p.which_model_netG, p.norm,
                                        not p.no_dropout, p.init_type,
                                        self.gpu_ids)

        if self.isTrain:
            use_sigmoid = p.no_lsgan
            self.netD_A = networks.define_D(p.output_nc, p.ndf,
                                            p.which_model_netD, p.n_layers_D,
                                            p.norm, use_sigmoid, p.init_type,
                                            self.gpu_ids)
            self.netD_B = networks.define_D(p.input_nc, p.ndf,
                                            p.which_model_netD, p.n_layers_D,
                                            p.norm, use_sigmoid, p.init_type,
                                            self.gpu_ids)

        if not self.isTrain or p.continue_train:
            which_epoch = p.which_epoch
            self.load_model(self.netG_A, 'G_A', which_epoch)
            self.load_model(self.netG_B, 'G_B', which_epoch)
            if self.isTrain:
                self.load_model(self.netD_A, 'D_A', which_epoch)
                self.load_model(self.netD_B, 'D_B', which_epoch)

        if self.isTrain:
            self.old_lr = p.lr
            self.fake_A_pool = ImagePool(p.pool_size)
            self.fake_B_pool = ImagePool(p.pool_size)
            # define loss functions
            self.criterionGAN = networks.GANLoss(use_lsgan=not p.no_lsgan,
                                                 tensor=self.Tensor)
            self.criterionCycle = torch.nn.L1Loss()
            self.criterionIdt = torch.nn.L1Loss()

            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(itertools.chain(
                self.netG_A.parameters(), self.netG_B.parameters()),
                                                lr=p.lr,
                                                betas=(p.beta1, 0.999))
            self.optimizer_D_A = torch.optim.Adam(self.netD_A.parameters(),
                                                  lr=p.lr,
                                                  betas=(p.beta1, 0.999))
            self.optimizer_D_B = torch.optim.Adam(self.netD_B.parameters(),
                                                  lr=p.lr,
                                                  betas=(p.beta1, 0.999))

            self.optimizers = []
            self.schedulers = []
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D_A)
            self.optimizers.append(self.optimizer_D_B)
            for optimizer in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optimizer, p))
    def initialize(self, opt):
        super(EncoderDecoderFramework_DFN, self).initialize(opt)
        ###################################
        # define encoder
        ###################################
        self.encoder = networks.define_encoder_v2(opt)
        ###################################
        # define decoder
        ###################################
        self.decoder = networks.define_decoder_v2(opt)
        ###################################
        # guide encoder
        ###################################        
        self.guide_encoder, self.opt_guide = networks.load_encoder_v2(opt, opt.which_model_guide)
        self.guide_encoder.eval()
        for p in self.guide_encoder.parameters():
            p.requires_grad = False
        ###################################
        # DFN Modules
        ###################################
        if self.opt.use_dfn:
            self.dfn = networks.define_DFN_from_params(nf=opt.nof, ng=self.opt_guide.nof, nmid=opt.dfn_nmid, feat_size=opt.feat_size, local_size=opt.dfn_local_size, nblocks=opt.dfn_nblocks, norm=opt.norm, gpu_ids=opt.gpu_ids, init_type=opt.init_type)
        else:
            self.dfn = None
        ###################################
        # Discriminator
        ###################################
        self.use_GAN = self.is_train and opt.loss_weight_gan > 0
        if self.is_train:
            if self.use_GAN:
                # if not self.opt.D_cond:
                #     input_nc = self.decoder.output_nc
                # else:
                #     input_nc = self.decoder.output_nc + self.encoder.input_nc
                if self.opt.gan_level == 'image':
                    input_nc = self.decoder.output_nc
                elif self.opt.gan_level == 'feature':
                    input_nc = self.opt.nof
                if self.opt.D_cond:
                    if self.opt.D_cond_type == 'cond':
                        input_nc += self.encoder.input_nc
                    elif self.opt.D_cond_type == 'pair':
                        input_nc += input_nc
                self.netD = networks.define_D_from_params(input_nc=input_nc, ndf=64, which_model_netD='n_layers', n_layers_D=3, norm=opt.norm, which_gan=opt.which_gan, init_type=opt.init_type, gpu_ids=opt.gpu_ids)
            else:
                self.netD = None
        ###################################
        # loss functions
        ###################################
        if self.is_train:
            self.loss_functions = []
            self.schedulers = []
            self.crit_image = nn.L1Loss()
            self.crit_seg = nn.CrossEntropyLoss()
            self.crit_edge = nn.BCELoss()
            self.loss_functions += [self.crit_image, self.crit_seg, self.crit_edge]
            if self.opt.use_dfn:
                self.optim = torch.optim.Adam([{'params': self.encoder.parameters()}, {'params': self.decoder.parameters()}, {'params': self.dfn.parameters()}], lr=opt.lr, betas=(opt.beta1, opt.beta2))
            else:
                self.optim = torch.optim.Adam([{'params': self.encoder.parameters()}, {'params': self.decoder.parameters()}], lr=opt.lr, betas=(opt.beta1, opt.beta2))
            self.optimizers = [self.optim]
            # GAN loss and optimizers
            if self.use_GAN > 0:
                self.crit_GAN = networks.GANLoss(use_lsgan=opt.which_gan=='lsgan', tensor=self.Tensor)
                self.optim_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr_D, betas=(0.5, 0.999))
                self.loss_functions += [self.crit_GAN]
                self.optimizers += [self.optim_D]

            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))

        ###################################
        # load trained model
        ###################################
        if not self.is_train:
            self.load_network(self.encoder, 'encoder', opt.which_epoch)
            self.load_network(self.decoder, 'decoder', opt.which_epoch)
            if opt.use_dfn:
                self.load_network(self.dfn, 'dfn', opt.which_epoch)
Пример #24
0
netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD, opt.n_layers_D,
                           opt.norm, False)

device = 'cpu'
if opt.cuda:
    netG.cuda()
    netD_B.cuda()
    device = 'cuda'

# netG.apply(weights_init_normal)
# netD_A.apply(weights_init_normal)
netD_B.apply(weights_init_normal)

# Lossess

criterionGAN = networks.GANLoss().to(device)
criterion_MSE = torch.nn.MSELoss(reduce=False)
criterion_cycle = torch.nn.L1Loss()
criterion_identity = torch.nn.L1Loss()

############### only use B to A ###########################
optimizer_G = torch.optim.Adam(netG.parameters(),
                               lr=opt.lr,
                               betas=(0.5, 0.999))
optimizer_D_B = torch.optim.Adam(netD_B.parameters(),
                                 lr=opt.lr,
                                 betas=(0.5, 0.999))

lr_scheduler_G = torch.optim.lr_scheduler.LambdaLR(optimizer_G,
                                                   lr_lambda=LambdaLR(
                                                       opt.n_epochs, opt.epoch,
    def initialize(self, opt):
        super(MultimodalDesignerGAN, self).initialize(opt)
        ###################################
        # load/define networks
        ###################################

        # basic G
        self.netG = networks.define_G(opt)

        # encoders
        self.encoders = {}
        if opt.use_edge:
            self.edge_encoder = networks.define_image_encoder(opt, 'edge')
            self.encoders['edge_encoder'] = self.edge_encoder
        if opt.use_color:
            self.color_encoder = networks.define_image_encoder(opt, 'color')
            self.encoders['color_encoder'] = self.color_encoder
        if opt.use_attr:
            self.attr_encoder, self.opt_AE = network_loader.load_attribute_encoder_net(
                id=opt.which_model_AE, gpu_ids=opt.gpu_ids)

        # basic D and auxiliary Ds
        if self.is_train:
            # basic D
            self.netD = networks.define_D(opt)
            # auxiliary Ds
            self.auxiliaryDs = {}
            if opt.use_edge_D:
                assert opt.use_edge
                self.netD_edge = networks.define_D_from_params(
                    input_nc=opt.edge_nof + 3,
                    ndf=opt.ndf,
                    which_model_netD=opt.which_model_netD,
                    n_layers_D=opt.n_layers_D,
                    norm=opt.norm,
                    which_gan='dcgan',
                    init_type=opt.init_type,
                    gpu_ids=opt.gpu_ids)
                self.auxiliaryDs['D_edge'] = self.netD_edge
            if opt.use_color_D:
                assert opt.use_color
                self.netD_color = networks.define_D_from_params(
                    input_nc=opt.color_nof + 3,
                    ndf=opt.ndf,
                    which_model_netD=opt.which_model_netD,
                    n_layers_D=opt.n_layers_D,
                    norm=opt.norm,
                    which_gan='dcgan',
                    init_type=opt.init_type,
                    gpu_ids=opt.gpu_ids)
                self.auxiliaryDs['D_color'] = self.netD_color
            if opt.use_attr_D:
                assert opt.use_attr
                attr_nof = opt.n_attr_feat if opt.attr_cond_type in {
                    'feat', 'feat_map'
                } else opt.n_attr
                self.netD_attr = networks.define_D_from_params(
                    input_nc=attr_nof + 3,
                    ndf=opt.ndf,
                    which_model_netD=opt.which_model_netD,
                    n_layers_D=opt.n_layers_D,
                    norm=opt.norm,
                    which_gan='dcgan',
                    init_type=opt.init_type,
                    gpu_ids=opt.gpu_ids)
                self.auxiliaryDs['D_attr'] = self.netD_attr
            # load weights
            if not opt.continue_train:
                if opt.which_model_init != 'none':
                    self.load_network(self.netG, 'G', 'latest',
                                      opt.which_model_init)
                    self.load_network(self.netD, 'D', 'latest',
                                      opt.which_model_init)
                    for l, net in self.encoders.iteritems():
                        self.load_network(net, l, 'latest',
                                          opt.which_model_init)
                    for l, net in self.auxiliaryDs.iteritems():
                        self.load_network(net, l, 'latest',
                                          opt.which_model_init)
            else:
                self.load_network(self.netG, 'G', opt.which_epoch)
                self.load_network(self.netD, 'D', opt.which_epoch)
                for l, net in self.encoders.iteritems():
                    self.load_network(net, l, opt.which_epoch)
                for l, net in self.auxiliaryDs.iteritems():
                    self.load_network(net, l, opt.which_epoch)
        else:
            self.load_network(self.netG, 'G', opt.which_epoch)
            for l, net in self.encoders.iteritems():
                self.load_network(net, l, opt.which_epoch)

        if self.is_train:
            self.fake_pool = ImagePool(opt.pool_size)
            ###################################
            # define loss functions and loss buffers
            ###################################
            self.loss_functions = []
            if opt.which_gan in {'dcgan', 'lsgan'}:
                self.crit_GAN = networks.GANLoss(
                    use_lsgan=opt.which_gan == 'lsgan', tensor=self.Tensor)
            else:
                # WGAN loss will be calculated in self.backward_D_wgangp and self.backward_G
                self.crit_GAN = None

            self.loss_functions.append(self.crit_GAN)

            self.crit_L1 = nn.L1Loss()
            self.loss_functions.append(self.crit_L1)

            if self.opt.loss_weight_vgg > 0:
                self.crit_vgg = networks.VGGLoss(self.gpu_ids)
                self.loss_functions.append(self.crit_vgg)

            self.crit_psnr = networks.SmoothLoss(networks.PSNR())
            self.loss_functions.append(self.crit_psnr)
            ###################################
            # create optimizers
            ###################################
            self.schedulers = []
            self.optimizers = []

            # optim_G will optimize parameters of netG and all image encoders (except attr_encoder)
            G_param_groups = [{'params': self.netG.parameters()}]
            for l, net in self.encoders.iteritems():
                G_param_groups.append({'params': net.parameters()})
            self.optim_G = torch.optim.Adam(G_param_groups,
                                            lr=opt.lr,
                                            betas=(opt.beta1, opt.beta2))
            self.optimizers.append(self.optim_G)
            # optim_D will optimize parameters of netD
            self.optim_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=opt.lr_D,
                                            betas=(opt.beta1, opt.beta2))
            self.optimizers.append(self.optim_D)
            # optim_D_aux will optimize parameters of auxiliaryDs
            if len(self.auxiliaryDs) > 0:
                aux_D_param_groups = [{
                    'params': net.parameters()
                } for net in self.auxiliaryDs.values()]
                self.optim_D_aux = torch.optim.Adam(aux_D_param_groups,
                                                    lr=opt.lr_D,
                                                    betas=(opt.beta1,
                                                           opt.beta2))
                self.optimizers.append(self.optim_D_aux)
            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))

        # color transformation from std to imagenet
        # img_imagenet = img_std * a + b
        self.trans_std_to_imagenet = {
            'a':
            Variable(self.Tensor([0.5 / 0.229, 0.5 / 0.224, 0.5 / 0.225]),
                     requires_grad=False).view(3, 1, 1),
            'b':
            Variable(self.Tensor([(0.5 - 0.485) / 0.229, (0.5 - 0.456) / 0.224,
                                  (0.5 - 0.406) / 0.225]),
                     requires_grad=False).view(3, 1, 1)
        }
Пример #26
0
    def TrainGenerator(real_A: tp.Numpy.Placeholder(
        (1, 3, args.crop_size, args.crop_size), dtype=flow.float32),
                       real_B: tp.Numpy.Placeholder(
                           (1, 3, args.crop_size, args.crop_size),
                           dtype=flow.float32)):
        with flow.scope.placement("gpu", "0:0-0"):
            # G_A(A)
            fake_B = networks.define_G(real_A,
                                       "netG_A",
                                       ngf=args.ngf,
                                       n_blocks=9,
                                       trainable=True,
                                       reuse=True)
            # G_B(G_A(A))
            rec_A = networks.define_G(fake_B,
                                      "netG_B",
                                      ngf=args.ngf,
                                      n_blocks=9,
                                      trainable=True,
                                      reuse=True)
            # G_B(B)
            fake_A = networks.define_G(real_B,
                                       "netG_B",
                                       ngf=args.ngf,
                                       n_blocks=9,
                                       trainable=True,
                                       reuse=True)
            # G_A(G_B(B))
            rec_B = networks.define_G(fake_A,
                                      "netG_A",
                                      ngf=args.ngf,
                                      n_blocks=9,
                                      trainable=True,
                                      reuse=True)

            # Identity loss
            # G_A should be identity if real_B is fed: ||G_A(B) - B||
            idt_A = networks.define_G(real_B,
                                      "netG_A",
                                      ngf=args.ngf,
                                      n_blocks=9,
                                      trainable=True,
                                      reuse=True)
            loss_idt_A = networks.L1Loss(
                idt_A - real_B) * args.lambda_B * args.lambda_identity

            # G_B should be identity if real_A is fed: ||G_B(A) - A||
            idt_B = networks.define_G(real_A,
                                      "netG_B",
                                      ngf=args.ngf,
                                      n_blocks=9,
                                      trainable=True,
                                      reuse=True)
            loss_idt_B = networks.L1Loss(
                idt_B - real_A) * args.lambda_A * args.lambda_identity

            # GAN loss D_A(G_A(A))
            netD_A_out = networks.define_D(fake_B,
                                           "netD_A",
                                           ndf=args.ndf,
                                           n_layers_D=3,
                                           trainable=False,
                                           reuse=True)
            loss_G_A = networks.GANLoss(netD_A_out, True)

            # GAN loss D_B(G_B(B))
            netD_B_out = networks.define_D(fake_A,
                                           "netD_B",
                                           ndf=args.ndf,
                                           n_layers_D=3,
                                           trainable=False,
                                           reuse=True)
            loss_G_B = networks.GANLoss(netD_B_out, True)

            # Forward cycle loss || G_B(G_A(A)) - A||
            loss_cycle_A = networks.L1Loss(rec_A - real_A) * args.lambda_A
            # Backward cycle loss || G_A(G_B(B)) - B||
            loss_cycle_B = networks.L1Loss(rec_B - real_B) * args.lambda_B
            # combined loss and calculate gradients
            loss_G = loss_G_A + loss_G_B + loss_cycle_A + loss_cycle_B + loss_idt_A + loss_idt_B

            flow.optimizer.Adam(flow.optimizer.PiecewiseConstantScheduler(
                [], [args.learning_rate]),
                                beta1=0.5).minimize(loss_G)

        return fake_B, rec_A, fake_A, rec_B, loss_G
Пример #27
0
import torch.nn.functional as F
import torch
import numpy as np

# Panorama parameters
batch_size = 1
NUM_SLICES = 3
SAMPLES = 5  # Number of horizon offsets to sample before finetuning

# Set up deep learning stuff
device = torch.device("cuda")
model = networks.define_D(6, 64, 'n_layers', n_layers_D=3, use_sigmoid=False)
chkpt = torch.load('checkpoints/patch_horizon/49.pth')
model.load_state_dict(chkpt['state_dict'])
model.to(device)
patch_loss = networks.GANLoss()

# Create dataset
dataset = EvalDataset()
#dataset.initialize('../../../data/semanticLandscapes512/train_img', allrandom=True, return_idx=True)
#dataset.initialize('../../../data/MITCVCL/coast', allrandom=True)
dataset.initialize('../../../data/instagram_landscapes/anne_karin_69',
                   allrandom=True)


def convertImage(im):
    '''
    Takes output of dataset and returns a np image
    '''
    # undo right image flip when cat-ing
    im = np.concatenate((im[:3, :, :], im[3:, :, ::-1]), 2)
Пример #28
0
    def __init__(
        self, name="experiment", phase="train", which_epoch="latest",
        batch_size=1, image_size=128, map_nc=1, input_nc=3, output_nc=3,
        num_downs=7, ngf=64, ndf=64, norm_layer="batch", pool_size=50,
        lr=0.0002, beta1=0.5, lambda_D=0.5, lambda_MSE=10,
        lambda_P=5.0, use_dropout=True, gpu_ids=[], n_layers=3,
        use_sigmoid=False, use_lsgan=True, upsampling="nearest",
        continue_train=False, checkpoints_dir="checkpoints/"
    ):
        # Define input data that will be consumed by networks
        self.input_A = torch.FloatTensor(
            batch_size, 3, image_size, image_size
        )
        self.input_map = torch.FloatTensor(
            batch_size, map_nc, image_size, image_size
        )
        norm_layer = nn.BatchNorm2d \
            if norm_layer == "batch" else nn.InstanceNorm2d

        # Define netD and netG
        self.netG = networks.UnetGenerator(
            input_nc=input_nc, output_nc=map_nc,
            num_downs=num_downs, ngf=ngf,
            use_dropout=use_dropout, gpu_ids=gpu_ids, norm_layer=norm_layer,
            upsampling_layer=upsampling
        )
        self.netD = networks.NLayerDiscriminator(
            input_nc=input_nc + map_nc, ndf=ndf,
            n_layers=n_layers, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids
        )

        # Transfer data to GPU
        if len(gpu_ids) > 0:
            self.input_A = self.input_A.cuda()
            self.input_map = self.input_map.cuda()
            self.netD.cuda()
            self.netG.cuda()

        # Initialize parameters of netD and netG
        self.netG.apply(networks.weights_init)
        self.netD.apply(networks.weights_init)

        # Load trained netD and netG
        if phase == "test" or continue_train:
            netG_checkpoint_file = os.path.join(
                checkpoints_dir, name, "netG_{}.pth".format(which_epoch)
            )
            self.netG.load_state_dict(
                torch.load(netG_checkpoint_file)
            )
            print("Restoring netG from {}".format(netG_checkpoint_file))

        if continue_train:
            netD_checkpoint_file = os.path.join(
                checkpoints_dir, name, "netD_{}.pth".format(which_epoch)
            )
            self.netD.load_state_dict(
                torch.load(netD_checkpoint_file)
            )
            print("Restoring netD from {}".format(netD_checkpoint_file))

        self.name = name
        self.gpu_ids = gpu_ids
        self.checkpoints_dir = checkpoints_dir

        # Criterions
        if phase == "train":
            self.count = 0
            self.lr = lr
            self.lambda_D = lambda_D
            self.lambda_MSE = lambda_MSE

            self.image_pool = ImagePool(pool_size)
            self.criterionGAN = networks.GANLoss(use_lsgan=use_lsgan)
            self.criterionL1 = torch.nn.L1Loss()
            self.criterionMSE = torch.nn.MSELoss()  # Landmark loss

            self.optimizer_G = torch.optim.Adam(
                self.netG.parameters(), lr=self.lr, betas=(beta1, 0.999)
            )
            self.optimizer_D = torch.optim.Adam(
                self.netD.parameters(), lr=self.lr, betas=(beta1, 0.999)
            )

            print('---------- Networks initialized -------------')
            networks.print_network(self.netG)
            networks.print_network(self.netD)
            print('-----------------------------------------------')