Example #1
0
    def init_models(self):
        """ Models: G_UM, G_MU, D_M, D_U """
        # Networks
        self.G_UM = networks.define_G(input_nc=1, output_nc=1, ngf=self.config.g_conv_dim,
                                      which_model_netG=self.config.which_model_netG, norm='batch', init_type='normal',
                                      gpu_ids=self.gpu_ids)
        self.G_MU = networks.define_G(input_nc=1, output_nc=1, ngf=self.config.g_conv_dim,
                                      which_model_netG=self.config.which_model_netG, norm='batch', init_type='normal',
                                      gpu_ids=self.gpu_ids)
        self.D_M = networks.define_D(input_nc=1, ndf=self.config.d_conv_dim,
                                     which_model_netD=self.config.which_model_netD,
                                     n_layers_D=3, norm='instance', use_sigmoid=True, init_type='normal',
                                     gpu_ids=self.gpu_ids)
        self.D_U = networks.define_D(input_nc=1, ndf=self.config.d_conv_dim,
                                     which_model_netD=self.config.which_model_netD,
                                     n_layers_D=3, norm='instance', use_sigmoid=True, init_type='normal',
                                     gpu_ids=self.gpu_ids)

        # Optimisers
        # single optimiser for both generators
        self.G_optim = optim.Adam(itertools.chain(self.G_UM.parameters(), self.G_MU.parameters()),
                                  self.config.lr, betas=(self.config.beta1, self.config.beta2))
        self.D_M_optim = optim.Adam(self.D_M.parameters(),
                                    lr=self.config.lr, betas=(self.config.beta1, self.config.beta2))
        self.D_U_optim = optim.Adam(self.D_U.parameters(),
                                    lr=self.config.lr, betas=(self.config.beta1, self.config.beta2))
        self.optimizers = [self.G_optim, self.D_M_optim, self.D_U_optim]

        # Schedulers
        self.schedulers = []
        for optimizer in self.optimizers:
            self.schedulers.append(networks.get_scheduler(optimizer, self.config))
Example #2
0
    def TrainDiscriminator(real_A: tp.Numpy.Placeholder(
        (1, 3, args.crop_size, args.crop_size),
        dtype=flow.float32), fake_A: tp.Numpy.Placeholder(
            (1, 3, args.crop_size, args.crop_size), dtype=flow.float32),
                           real_B: tp.Numpy.Placeholder(
                               (1, 3, args.crop_size, args.crop_size),
                               dtype=flow.float32),
                           fake_B: tp.Numpy.Placeholder(
                               (1, 3, args.crop_size, args.crop_size),
                               dtype=flow.float32)):
        with flow.scope.placement("gpu", "0:0-0"):
            # Calculate GAN loss for discriminator D_A
            # Real
            pred_real_B = networks.define_D(real_B,
                                            "netD_A",
                                            ndf=args.ndf,
                                            n_layers_D=3,
                                            trainable=True,
                                            reuse=True)
            loss_D_A_real = networks.GANLoss(pred_real_B, True)
            # Fake
            pred_fake_B = networks.define_D(fake_B,
                                            "netD_A",
                                            ndf=args.ndf,
                                            n_layers_D=3,
                                            trainable=True,
                                            reuse=True)
            loss_D_A_fake = networks.GANLoss(pred_fake_B, False)
            # Combined loss and calculate gradients
            loss_D_A = (loss_D_A_real + loss_D_A_fake) * 0.5

            # Calculate GAN loss for discriminator D_B
            # Real
            pred_real_A = networks.define_D(real_A,
                                            "netD_B",
                                            ndf=args.ndf,
                                            n_layers_D=3,
                                            trainable=True,
                                            reuse=True)
            loss_D_B_real = networks.GANLoss(pred_real_A, True)
            # Fake
            pred_fake_A = networks.define_D(fake_A,
                                            "netD_B",
                                            ndf=args.ndf,
                                            n_layers_D=3,
                                            trainable=True,
                                            reuse=True)
            loss_D_B_fake = networks.GANLoss(pred_fake_A, False)
            # Combined loss and calculate gradients
            loss_D_B = (loss_D_B_real + loss_D_B_fake) * 0.5

            loss_D = loss_D_A + loss_D_B

            flow.optimizer.Adam(flow.optimizer.PiecewiseConstantScheduler(
                [], [args.learning_rate]),
                                beta1=0.5).minimize(loss_D)

        return loss_D
Example #3
0
    def __init__(self, opt):
        """Initialize the CycleGAN class.

        Parameters:
            opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseModel.__init__(self, opt)
        # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
        self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
        # # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
        # visual_names_A = ['real_A', 'fake_B', 'rec_A']
        # visual_names_B = ['real_B', 'fake_A', 'rec_B']
        # if self.isTrain and self.opt.lambda_identity > 0.0:  # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
        #     visual_names_A.append('idt_B')
        #     visual_names_B.append('idt_A')
        #
        # self.visual_names = visual_names_A + visual_names_B  # combine visualizations for A and B
        # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
        if self.isTrain:
            self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
        else:  # during test time, only load Gs
            self.model_names = ['G_A', 'G_B']

        # define networks (both Generators and discriminators)
        # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
        # input file shape (batch, length, dims-256)
        self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
                                        not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
        self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
                                        not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:  # define discriminators
            self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
            self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:
            if opt.lambda_identity > 0.0:  # only works when input and output images have the same number of channels
                assert(opt.input_nc == opt.output_nc)
            self.fake_A_pool = AudioPool(opt.pool_size)  # create image buffer to store previously generated images
            self.fake_B_pool = AudioPool(opt.pool_size)  # create image buffer to store previously generated images
            # define loss functions
            self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)  # define GAN loss.
            self.criterionCycle = nn.L1Loss()
            self.criterionIdt = nn.L1Loss()
            # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
            self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
Example #4
0
    def initialize(self, opt, writer=None):
        BaseModel.initialize(self, opt)
        self.writer = writer
        self.num_step = 0
        self.opt = opt
        if self.opt.use_lbp_network:
            self.model_names = ['G', 'LBP', 'D', 'D2']
        else:
            self.model_names = ['G', 'D']

        self.netG = networks.define_G(self.opt)
        if self.opt.use_lbp_network:
            self.netLBP = networks.define_LBP(self.opt)
        self.netD = networks.define_D(
            opt.input_nc, opt.ndf, self.opt.device)  # Discriminator for netG
        if self.opt.use_lbp_network:
            self.netD2 = networks.define_D(
                opt.input_nc - 2, opt.ndf,
                self.opt.device)  # Discriminator for netLBP

        self.vgg16_extractor = util.VGG16FeatureExtractor().to(self.opt.device)

        self.criterionGAN = networks.GANLoss(gan_type=opt.gan_type).to(
            self.opt.device)
        self.criterionL1 = torch.nn.L1Loss()
        self.criterionL2 = torch.nn.MSELoss()
        self.criterionL1_mask = networks.Discounted_L1(opt).to(self.opt.device)

        self.criterionL2_style_loss = torch.nn.MSELoss()
        self.criterionL2_perceptual_loss = torch.nn.MSELoss()

        self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                            lr=opt.lr,
                                            betas=(0.5, 0.999))
        if self.opt.use_lbp_network:
            self.optimizer_LBP = torch.optim.Adam(self.netLBP.parameters(),
                                                  lr=opt.lr,
                                                  betas=(0.5, 0.999))
        self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=opt.lr,
                                            betas=(0.5, 0.999))

        if self.opt.use_lbp_network:
            self.optimizer_D2 = torch.optim.Adam(self.netD2.parameters(),
                                                 lr=opt.lr,
                                                 betas=(0.5, 0.999))

        _, self.rand_t, self.rand_l = util.create_rand_mask(self.opt)
Example #5
0
    def init_models(self):
        # Networks
        self.G_AB = networks.define_G(
            input_nc=self.config.input_nc,
            output_nc=self.config.output_nc,
            ngf=self.config.g_conv_dim,
            which_model_netG=self.config.which_model_netG,
            norm='batch',
            init_type='normal',
            gpu_ids=self.gpu_ids)
        self.D_B = networks.define_D(
            input_nc=self.config.input_nc,
            ndf=self.config.d_conv_dim,
            which_model_netD=self.config.which_model_netD,
            n_layers_D=3,
            norm='instance',
            use_sigmoid=True,
            init_type='normal',
            gpu_ids=self.gpu_ids,
            image_size=self.config.image_size)

        # Optimisers
        self.G_optim = optim.Adam(self.G_AB.parameters(),
                                  lr=self.config.lr,
                                  betas=(self.config.beta1, self.config.beta2))
        self.D_optim = optim.Adam(self.D_B.parameters(),
                                  lr=self.config.lr,
                                  betas=(self.config.beta1, self.config.beta2))
        self.optimizers = [self.G_optim, self.D_optim]

        # Schedulers
        self.schedulers = []
        for optimizer in self.optimizers:
            self.schedulers.append(
                networks.get_scheduler(optimizer, self.config))
    def __init__(self, args):
        super().__init__(args)

        if args.mode == 'train':
            self.D = define_D(args)
            self.D = self.D.to(self.device)

            self.fake_right_pool = ImagePool(50)

            self.criterion = define_generator_loss(args)
            self.criterion = self.criterion.to(self.device)
            self.criterionGAN = define_discriminator_loss(args)
            self.criterionGAN = self.criterionGAN.to(self.device)

            self.optimizer_G = optim.Adam(self.G.parameters(),
                                          lr=args.learning_rate)
            self.optimizer_D = optim.SGD(self.D.parameters(),
                                         lr=args.learning_rate)

        # Load the correct networks, depending on which mode we are in.
        if args.mode == 'train':
            self.model_names = ['G', 'D']
            self.optimizer_names = ['G', 'D']
        else:
            self.model_names = ['G']

        self.loss_names = ['G', 'G_MonoDepth', 'G_GAN', 'D']
        self.losses = {}

        if self.args.resume:
            self.load_checkpoint()

        if 'cuda' in self.device:
            torch.cuda.synchronize()
    def __init__(self, args):
        super().__init__(args)

        if args.mode == 'train':
            self.D = define_D(args)
            self.D = self.D.to(self.device)

            self.fake_right_pool = ImagePool(50)

            self.criterionMonoDepth = define_generator_loss(args)
            self.criterionMonoDepth = self.criterionMonoDepth.to(self.device)

            self.criterionGAN = define_discriminator_loss(args)
            self.criterionGAN = self.criterionGAN.to(self.device)

        # Load the correct networks, depending on which mode we are in.
        if args.mode == 'train':
            self.model_names = ['G', 'D']
            self.optimizer_names = ['G', 'D']
        else:
            self.model_names = ['G']

        self.loss_names = ['G', 'D']

        # We do Resume Training for this architecture.
        if args.resume == '':
            pass
        else:
            self.load_checkpoint(load_optim=False)

        if args.mode == 'train':
            # After resuming, set new optimizers.
            self.optimizer_G = optim.SGD(self.G.parameters(),
                                         lr=args.learning_rate)
            self.optimizer_D = optim.SGD(self.D.parameters(),
                                         lr=args.learning_rate)

            # Reset epoch.
            self.start_epoch = 0

        self.trainG = True
        self.count_trained_G = 0
        self.count_trained_D = 0
        self.regime = args.resume_regime

        if 'cuda' in self.device:
            torch.cuda.synchronize()
Example #8
0
 def __init__(self, input_nc):
     super(FogNet, self).__init__()
     self.atmconv1x1 = nn.Conv2d(input_nc,
                                 input_nc,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)
     self.atmnet = define_D(input_nc,
                            64,
                            'n_estimator',
                            n_layers_D=5,
                            norm='batch',
                            use_sigmoid=True,
                            gpu_ids=[])
     self.transnet = TransUNet(input_nc, 1)
     self.htanh = nn.Hardtanh(0, 1)
     self.relu1 = ReLU1()
     self.relu = nn.ReLU()
Example #9
0
    def initialize(self, opt, train_mode=True):
        # Model transforms from A --> B and uses Adv as the
        # adversarial example.
        BaseModel.initialize(self, opt)
        self.train_mode = train_mode
        # define tensors
        self.input_B = self.Tensor(opt['batchSize'], opt['input_nc'],
                                   opt['B_height'], opt['B_width'])

        self.input_A = self.Tensor(opt['batchSize'], opt['output_nc'],
                                   opt['A_height'], opt['A_width'])

        # load/define networks
        self.netG = networks.define_G(opt['input_nc'], opt['output_nc'],
                                      opt['ngf'], opt['norm'], self.gpu_ids)

        if self.train_mode:
            use_sigmoid = opt['no_lsgan']
            self.netD = networks.define_D(opt['input_nc'] + opt['output_nc'],
                                          opt['ndf'], opt['which_model_netD'],
                                          opt['n_layers_D'], use_sigmoid,
                                          self.gpu_ids)

        if self.train_mode:
            # self.fake_AB_pool = ImagePool(opt['pool_size'])
            self.old_lr = opt['lr']
            # define loss functions
            self.criterionGAN = networks.GANLoss(use_lsgan=not opt['no_lsgan'],
                                                 tensor=self.Tensor)
            self.content_loss = torch.nn.MSELoss()

            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                                lr=opt['lr'],
                                                betas=(opt['beta1'], 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt['lr'],
                                                betas=(opt['beta1'], 0.999))

            print('---------- Networks initialized -------------')
            networks.print_network(self.netG)
            networks.print_network(self.netD)
            print('-----------------------------------------------')
Example #10
0
    def __init__(self, args):
        super().__init__(args)

        if args.mode == 'train':
            self.D = define_D(args)
            self.D = self.D.to(self.device)

            self.criterionMonoDepth = define_generator_loss(args)
            self.criterionMonoDepth = self.criterionMonoDepth.to(self.device)

            self.optimizer_G = optim.Adam(self.G.parameters(),
                                          lr=args.learning_rate)
            self.optimizer_D = optim.SGD(self.D.parameters(),
                                         lr=args.learning_rate)

            self.one = torch.tensor(1.0).to(self.device)
            self.mone = (self.one * -1).to(self.device)

            self.loader_iterator = None
            self.current_epoch = 0
            self.critic_iters = args.wgan_critics_num

        # Load the correct networks, depending on which mode we are in.
        if args.mode == 'train':
            self.model_names = ['G', 'D']
            self.optimizer_names = ['G', 'D']
        else:
            self.model_names = ['G']

        self.loss_names = ['G', 'G_MonoDepth', 'G_GAN', 'D', 'D_Wasserstein']
        self.losses = {}

        if self.args.resume:
            self.load_checkpoint()

        if 'cuda' in self.device:
            torch.cuda.synchronize()
Example #11
0
train_set = get_dataset(path=opt.dataset)
training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batch_size,
                                  shuffle=True)
device = torch.device("cuda:0" if opt.cuda else "cpu")
print('===> Building models')
net_g = define_G(opt.input_nc,
                 opt.output_nc,
                 opt.ngf,
                 'batch',
                 False,
                 'normal',
                 0.02,
                 gpu_id=device)
net_d = define_D(opt.input_nc + opt.output_nc, opt.ndf, 'basic', gpu_id=device)

criterionGAN = GANLoss().to(device)
criterionL1 = nn.L1Loss().to(device)
criterionMSE = nn.MSELoss().to(device)

# setup optimizer
optimizer_g = optim.Adam(net_g.parameters(),
                         lr=opt.lr,
                         betas=(opt.beta1, 0.999))
optimizer_d = optim.Adam(net_d.parameters(),
                         lr=opt.lr,
                         betas=(opt.beta1, 0.999))
net_g_scheduler = get_scheduler(optimizer_g, opt)
net_d_scheduler = get_scheduler(optimizer_d, opt)
face_descriptor = FaceDescriptor().to(device)
Example #12
0
    def initialize(self, opt):
        super(DesignerGAN, self).initialize(opt)
        ###################################
        # define data tensors
        ###################################
        # self.input['img'] = self.Tensor()
        # self.input['img_attr'] = self.Tensor()
        # self.input['lm_map'] = self.Tensor()
        # self.input['seg_mask'] = self.Tensor()
        # self.input['attr_label'] = self.Tensor()
        # self.input['id'] = []

        ###################################
        # load/define networks
        ###################################

        # Todo modify networks.define_G
        # 1. add specified generator networks

        self.netG = networks.define_G(opt)
        self.netAE, self.opt_AE = network_loader.load_attribute_encoder_net(
            id=opt.which_model_AE, gpu_ids=opt.gpu_ids)
        if opt.which_model_FeatST != 'none':
            self.netFeatST, self.opt_FeatST = network_loader.load_feature_spatial_transformer_net(
                id=opt.which_model_FeatST, gpu_ids=opt.gpu_ids)
            self.use_FeatST = True
            # assert self.opt_FeatST.shape_encode == self.opt.shape_encode, 'GAN model and FeatST model has different shape encode mode'
            # assert self.opt_FeatST.input_mask_mode == self.opt.input_mask_mode, 'GAN model and FeatST model has different segmentation input mode'
        else:
            self.use_FeatST = False

        if self.is_train:
            self.netD = networks.define_D(opt)
            if opt.which_model_init_netG != 'none' and not opt.continue_train:
                self.load_network(self.netG, 'G', 'latest',
                                  opt.which_model_init_netG)

        if not self.is_train or opt.continue_train:
            self.load_network(self.netG, 'G', opt.which_epoch)
            if self.is_train:
                self.load_network(self.netD, 'D', opt.which_epoch)

        if self.is_train:
            self.fake_pool = ImagePool(opt.pool_size)

            ###################################
            # define loss functions and loss buffers
            ###################################
            if opt.which_gan in {'dcgan', 'lsgan'}:
                self.crit_GAN = networks.GANLoss(
                    use_lsgan=opt.which_gan == 'lsgan', tensor=self.Tensor)
            else:
                # WGAN loss will be calculated in self.backward_D_wgangp and self.backward_G
                self.crit_GAN = None
            self.crit_L1 = nn.L1Loss()
            self.crit_attr = nn.BCELoss()

            self.loss_functions = []
            self.loss_functions.append(self.crit_GAN)
            self.loss_functions.append(self.crit_L1)
            self.loss_functions.append(self.crit_attr)

            if self.opt.loss_weight_vgg > 0:
                self.crit_vgg = networks.VGGLoss(self.gpu_ids)
                self.loss_functions.append(self.crit_vgg)

            ###################################
            # create optimizers
            ###################################
            self.schedulers = []
            self.optimizers = []

            self.optim_G = torch.optim.Adam(self.netG.parameters(),
                                            lr=opt.lr,
                                            betas=(opt.beta1, opt.beta2))
            self.optim_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=opt.lr,
                                            betas=(opt.beta1, opt.beta2))
            self.optimizers.append(self.optim_G)
            self.optimizers.append(self.optim_D)

            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))

        # color transformation from std to imagenet
        # img_imagenet = img_std * a + b
        self.trans_std_to_imagenet = {
            'a':
            Variable(self.Tensor([0.5 / 0.229, 0.5 / 0.224, 0.5 / 0.225]),
                     requires_grad=False).view(3, 1, 1),
            'b':
            Variable(self.Tensor([(0.5 - 0.485) / 0.229, (0.5 - 0.456) / 0.224,
                                  (0.5 - 0.406) / 0.225]),
                     requires_grad=False).view(3, 1, 1)
        }
Example #13
0
print('===> Loading datasets')
root_path = "dataset/"
train_set = get_training_set(root_path + dataset)
test_set = get_test_set(root_path + dataset)
training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=threads,
                                  batch_size=batchSize,
                                  shuffle=True)
testing_data_loader = DataLoader(dataset=test_set,
                                 num_workers=threads,
                                 batch_size=testBatchSize,
                                 shuffle=False)

print('===> Building model')
netG = define_G(input_nc, output_nc, ngf, 'batch', False, [0])
netD = define_D(input_nc + output_nc, ndf, 'batch', False, [0])

criterionGAN = GANLoss()
criterionL1 = nn.L1Loss()
criterionMSE = nn.MSELoss()

# setup optimizer
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))

print('---------- Networks initialized -------------')
print_network(netG)
print_network(netD)
print('-----------------------------------------------')

real_a = torch.FloatTensor(batchSize, input_nc, 256, 256)
Example #14
0
    def __init__(self, hyperparameters):
        super(myMUNIT_Trainer, self).__init__()
        lr = hyperparameters['lr']
        # Initiate the networks
        self.style_dim = hyperparameters['gen']['style_dim']
        self.enc_a = networks.define_E(input_nc=3,
                                       output_nc=self.style_dim,
                                       ndf=64)  # encoder for domain a
        self.enc_b = networks.define_E(input_nc=3,
                                       output_nc=self.style_dim,
                                       ndf=64)  # encoder for domain b
        self.gen_a = networks.define_G(input_nc=3,
                                       output_nc=3,
                                       nz=self.style_dim,
                                       ngf=64)  # generator for domain a
        self.gen_b = networks.define_G(input_nc=3,
                                       output_nc=3,
                                       nz=self.style_dim,
                                       ngf=64)  # generator for domain b
        self.dis_a = networks.define_D(input_nc=3,
                                       ndf=64,
                                       norm='instance',
                                       num_Ds=2)  # discriminator for domain a
        self.dis_b = networks.define_D(input_nc=3,
                                       ndf=64,
                                       norm='instance',
                                       num_Ds=2)  # discriminator for domain b
        self.netVGGF = networks.define_VGGF()
        self.instancenorm = nn.InstanceNorm2d(512, affine=False)

        # Initiate the criterions or loss functions
        self.criterionGAN = networks.GANLoss(
            mse_loss=True,
            tensor=torch.cuda.FloatTensor)  # criterion GAN adversarial loss
        self.wGANloss = networks.wGANLoss(
            tensor=torch.cuda.FloatTensor)  # wGAN adversarial loss
        self.criterionL1 = torch.nn.L1Loss()  # L1 loss
        self.criterionL2 = networks.L2Loss()  # L2 loss
        self.criterionZ = torch.nn.L1Loss()  # L1 loss between code
        self.criterionC = networks.ContentLoss(
            vgg_features=self.netVGGF)  # content loss
        self.criterionS = networks.StyleLoss(
            vgg_features=self.netVGGF)  # style loss
        self.criterionC_l = networks.ContentLoss(
            vgg_features=self.netVGGF)  # local content loss
        self.criterionS_l = networks.StyleLoss(
            vgg_features=self.netVGGF)  # local style loss
        self.criterionHisogram = networks.HistogramLoss(
            vgg_features=self.netVGGF)  # histogram loss
        self.Feature_map_im = networks.Feature_map_im(
            vgg_features=self.netVGGF)  # show feature map

        # fix the noise used in sampling
        self.s_a = torch.randn(8, self.style_dim, 1, 1).cuda()
        self.s_b = torch.randn(8, self.style_dim, 1, 1).cuda()

        # Setup the optimizers
        beta1 = hyperparameters['beta1']
        beta2 = hyperparameters['beta2']
        dis_params = list(self.dis_a.parameters()) + list(
            self.dis_b.parameters())
        gen_params = list(self.gen_a.parameters()) + list(
            self.gen_b.parameters())
        self.dis_opt = torch.optim.Adam(
            [p for p in dis_params if p.requires_grad],
            lr=lr,
            betas=(beta1, beta2),
            weight_decay=hyperparameters['weight_decay'])
        self.gen_opt = torch.optim.Adam(
            [p for p in gen_params if p.requires_grad],
            lr=lr,
            betas=(beta1, beta2),
            weight_decay=hyperparameters['weight_decay'])
        self.dis_scheduler = get_scheduler(self.dis_opt, hyperparameters)
        self.gen_scheduler = get_scheduler(self.gen_opt, hyperparameters)

        # Load VGG model if needed
        if 'vgg_w' in hyperparameters.keys() and hyperparameters['vgg_w'] > 0:
            self.vgg = load_vgg16(hyperparameters['vgg_model_path'] +
                                  '/models')
            self.vgg.eval()
            for param in self.vgg.parameters():
                param.requires_grad = False
Example #15
0
training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batchSize,
                                  shuffle=True)
testing_data_loader = DataLoader(dataset=test_set,
                                 num_workers=opt.threads,
                                 batch_size=opt.testBatchSize,
                                 shuffle=False)

train_logger = Logger(opt.nEpochs, len(training_data_loader), opt.date)
test_logger = Logger(opt.nEpochs, len(testing_data_loader), opt.date)

print('===> Building model')
netG = define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.batch_mode, False,
                [0])
netD = define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.batch_mode, False,
                [0])

criterionGAN = GANLoss()
criterionL1 = nn.L1Loss()
criterionMSE = nn.MSELoss()

# setup optimizer
optimizerG = optim.Adam(netG.parameters(),
                        lr=opt.glr,
                        betas=(opt.beta1, 0.999))
optimizerD = optim.Adam(netD.parameters(),
                        lr=opt.dlr,
                        betas=(opt.beta1, 0.999))

print('---------- Networks initialized -------------')
#print_network(netG)
parser.add_argument('--data_order', nargs='+', default=['B', 'A'])

opt = parser.parse_args()
print(opt)

if torch.cuda.is_available() and not opt.cuda:
    print(
        "WARNING: You have a CUDA device, so you should probably run with --cuda"
    )

###### Definition of variables ######
# Networks
netG = Generator(opt.output_nc, opt.input_nc)
# netG = GeneratorNORM(opt.output_nc, opt.input_nc)
# netG = GeneratorCat(opt.output_nc, opt.input_nc)
netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD, opt.n_layers_D,
                           opt.norm, False)

device = 'cpu'
if opt.cuda:
    netG.cuda()
    netD_B.cuda()
    device = 'cuda'

# netG.apply(weights_init_normal)
# netD_A.apply(weights_init_normal)
netD_B.apply(weights_init_normal)

# Lossess

criterionGAN = networks.GANLoss().to(device)
criterion_MSE = torch.nn.MSELoss(reduce=False)
Example #17
0
    def __init__(self, p):

        super(CycleGAN, self).__init__(p)
        nb = p.batchSize
        size = p.cropSize

        # load/define models
        # The naming conversion is different from those used in the paper
        # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)

        self.netG_A = networks.define_G(p.input_nc, p.output_nc, p.ngf,
                                        p.which_model_netG, p.norm,
                                        not p.no_dropout, p.init_type,
                                        self.gpu_ids)
        self.netG_B = networks.define_G(p.output_nc, p.input_nc, p.ngf,
                                        p.which_model_netG, p.norm,
                                        not p.no_dropout, p.init_type,
                                        self.gpu_ids)

        if self.isTrain:
            use_sigmoid = p.no_lsgan
            self.netD_A = networks.define_D(p.output_nc, p.ndf,
                                            p.which_model_netD, p.n_layers_D,
                                            p.norm, use_sigmoid, p.init_type,
                                            self.gpu_ids)
            self.netD_B = networks.define_D(p.input_nc, p.ndf,
                                            p.which_model_netD, p.n_layers_D,
                                            p.norm, use_sigmoid, p.init_type,
                                            self.gpu_ids)

        if not self.isTrain or p.continue_train:
            which_epoch = p.which_epoch
            self.load_model(self.netG_A, 'G_A', which_epoch)
            self.load_model(self.netG_B, 'G_B', which_epoch)
            if self.isTrain:
                self.load_model(self.netD_A, 'D_A', which_epoch)
                self.load_model(self.netD_B, 'D_B', which_epoch)

        if self.isTrain:
            self.old_lr = p.lr
            self.fake_A_pool = ImagePool(p.pool_size)
            self.fake_B_pool = ImagePool(p.pool_size)
            # define loss functions
            self.criterionGAN = networks.GANLoss(use_lsgan=not p.no_lsgan,
                                                 tensor=self.Tensor)
            self.criterionCycle = torch.nn.L1Loss()
            self.criterionIdt = torch.nn.L1Loss()

            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(itertools.chain(
                self.netG_A.parameters(), self.netG_B.parameters()),
                                                lr=p.lr,
                                                betas=(p.beta1, 0.999))
            self.optimizer_D_A = torch.optim.Adam(self.netD_A.parameters(),
                                                  lr=p.lr,
                                                  betas=(p.beta1, 0.999))
            self.optimizer_D_B = torch.optim.Adam(self.netD_B.parameters(),
                                                  lr=p.lr,
                                                  betas=(p.beta1, 0.999))

            self.optimizers = []
            self.schedulers = []
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D_A)
            self.optimizers.append(self.optimizer_D_B)
            for optimizer in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optimizer, p))
Example #18
0
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 1

print('===> Loading datasets')
dataset = DatasetFromFolder(opt.dataroot, opt.imageSize)
assert dataset
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=opt.batchSize,
                                         shuffle=True,
                                         num_workers=int(opt.workers))

print('===> Building model')
netG = define_G(nc, nz, ngf, ngpu, device, opt.netG)
netD = define_D(nc, ndf, ngpu, device, opt.netD)

criterion = nn.BCELoss()

fixed_noise = torch.randn(opt.batchSize, nz, 1, 1, device=device)
real_label = 1
fake_label = 0

# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))

print('---------- Networks initialized -------------')
print_network(netG)
print_network(netD)
print('-----------------------------------------------')
    def initialize(self, opt):
        super(MultimodalDesignerGAN_V2, self).initialize(opt)
        ###################################
        # define networks
        ###################################
        self.modules = {}
        # shape branch
        if opt.which_model_netG != 'unet':
            self.shape_encoder = networks.define_image_encoder(opt, 'shape')
            self.modules['shape_encoder'] = self.shape_encoder
        else:
            self.shape_encoder = None
        # edge branch
        if opt.use_edge:
            self.edge_encoder = networks.define_image_encoder(opt, 'edge')
            self.modules['edge_encoder'] = self.edge_encoder
        else:
            self.encoder_edge = None
        # color branch
        if opt.use_color:
            self.color_encoder = networks.define_image_encoder(opt, 'color')
            self.modules['color_encoder'] = self.color_encoder
        else:
            self.color_encoder = None

        # fusion model
        if opt.ftn_model == 'none':
            # shape_feat, edge_feat and color_feat will be simply upmpled to same size (size of shape_feat) and concatenated
            pass
        elif opt.ftn_model == 'concat':
            assert opt.use_edge or opt.use_color
            if opt.use_edge:
                self.edge_trans_net = networks.define_feature_fusion_network(
                    name='FeatureConcatNetwork',
                    feat_nc=opt.edge_nof,
                    guide_nc=opt.shape_nof,
                    nblocks=opt.ftn_nblocks,
                    norm=opt.norm,
                    gpu_ids=self.gpu_ids,
                    init_type=opt.init_type)
                self.modules['edge_trans_net'] = self.edge_trans_net
            if opt.use_color:
                self.color_trans_net = networks.define_feature_fusion_network(
                    name='FeatureConcatNetwork',
                    feat_nc=opt.color_nof,
                    guide_nc=opt.shape_nof,
                    nblocks=opt.ftn_nblocks,
                    norm=opt.norm,
                    gpu_ids=self.gpu_ids,
                    init_type=opt.init_type)
                self.modules['color_trans_net'] = self.color_trans_net
        elif opt.ftn_model == 'reduce':
            assert opt.use_edge or opt.use_color
            if opt.use_edge:
                self.edge_trans_net = networks.define_feature_fusion_network(
                    name='FeatureReduceNetwork',
                    feat_nc=opt.edge_nof,
                    guide_nc=opt.shape_nof,
                    nblocks=opt.ftn_nblocks,
                    ndowns=opt.ftn_ndowns,
                    norm=opt.norm,
                    gpu_ids=self.gpu_ids,
                    init_type=opt.init_type)
                self.modules['edge_trans_net'] = self.edge_trans_net
            if opt.use_color:
                self.color_trans_net = networks.define_feature_fusion_network(
                    name='FeatureReduceNetwork',
                    feat_nc=opt.color_nof,
                    guide_nc=opt.shape_nof,
                    nblocks=opt.ftn_nblocks,
                    ndowns=opt.ftn_ndowns,
                    norm=opt.norm,
                    gpu_ids=self.gpu_ids,
                    init_type=opt.init_type)
                self.modules['color_trans_net'] = self.color_trans_net

        elif opt.ftn_model == 'trans':
            assert opt.use_edge or opt.use_color
            if opt.use_edge:
                self.edge_trans_net = networks.define_feature_fusion_network(
                    name='FeatureTransformNetwork',
                    feat_nc=opt.edge_nof,
                    guide_nc=opt.shape_nof,
                    nblocks=opt.ftn_nblocks,
                    feat_size=opt.feat_size_lr,
                    norm=opt.norm,
                    gpu_ids=self.gpu_ids,
                    init_type=opt.init_type)
                self.modules['edge_trans_net'] = self.edge_trans_net
            if opt.use_color:
                self.color_trans_net = networks.define_feature_fusion_network(
                    name='FeatureTransformNetwork',
                    feat_nc=opt.color_nof,
                    guide_nc=opt.shape_nof,
                    nblocks=opt.ftn_nblocks,
                    feat_size=opt.feat_size_lr,
                    norm=opt.norm,
                    gpu_ids=self.gpu_ids,
                    init_type=opt.init_type)
                self.modules['color_trans_net'] = self.color_trans_net

        # netG
        self.netG = networks.define_generator(opt)
        self.modules['netG'] = self.netG

        # netD
        if self.is_train:
            self.netD = networks.define_D(opt)
            self.modules['netD'] = self.netD

        ###################################
        # load weights
        ###################################
        if self.is_train:
            if opt.continue_train:
                for label, net in self.modules.iteritems():
                    self.load_network(net, label, opt.which_epoch)
            else:
                if opt.which_model_init != 'none':
                    # load pretrained entire model
                    for label, net in self.modules.iteritems():
                        self.load_network(net,
                                          label,
                                          'latest',
                                          opt.which_model_init,
                                          forced=False)
                else:
                    # load pretrained encoder
                    if opt.which_model_netG != 'unet' and opt.pretrain_shape:
                        self.load_network(self.shape_encoder, 'shape_encoder',
                                          'latest',
                                          opt.which_model_init_shape_encoder)
                    if opt.use_edge and opt.pretrain_edge:
                        self.load_network(self.edge_encoder, 'edge_encoder',
                                          'latest',
                                          opt.which_model_init_edge_encoder)
                    if opt.use_color and opt.pretrain_color:
                        self.load_network(self.color_encoder, 'color_encoder',
                                          'latest',
                                          opt.which_model_init_color_encoder)
        else:
            for label, net in self.modules.iteritems():
                if label != 'netD':
                    self.load_network(net, label, opt.which_epoch)

        ###################################
        # prepare for training
        ###################################
        if self.is_train:
            self.fake_pool = ImagePool(opt.pool_size)
            ###################################
            # define loss functions
            ###################################
            self.loss_functions = []
            if opt.which_gan in {'dcgan', 'lsgan'}:
                self.crit_GAN = networks.GANLoss(
                    use_lsgan=opt.which_gan == 'lsgan', tensor=self.Tensor)
                self.loss_functions.append(self.crit_GAN)
            else:
                # WGAN loss will be calculated in self.backward_D_wgangp and self.backward_G
                self.crit_GAN = None

            self.crit_L1 = nn.L1Loss()
            self.loss_functions.append(self.crit_L1)

            if self.opt.loss_weight_vgg > 0:
                self.crit_vgg = networks.VGGLoss(self.gpu_ids)
                self.loss_functions.append(self.crit_vgg)

            if self.opt.G_output_seg:
                self.crit_CE = nn.CrossEntropyLoss()
                self.loss_functions.append(self.crit_CE)

            self.crit_psnr = networks.SmoothLoss(networks.PSNR())
            self.loss_functions.append(self.crit_psnr)
            ###################################
            # create optimizers
            ###################################
            self.schedulers = []
            self.optimizers = []

            # G optimizer
            G_module_list = [
                'shape_encoder', 'edge_encoder', 'color_encoder', 'netG'
            ]
            G_param_groups = [{
                'params': self.modules[m].parameters()
            } for m in G_module_list if m in self.modules]
            self.optim_G = torch.optim.Adam(G_param_groups,
                                            lr=opt.lr,
                                            betas=(opt.beta1, opt.beta2))
            self.optimizers.append(self.optim_G)
            # D optimizer
            self.optim_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=opt.lr_D,
                                            betas=(opt.beta1, opt.beta2))
            self.optimizers.append(self.optim_D)
            # feature transfer network optimizer
            FTN_module_list = ['edge_trans_net', 'color_trans_net']
            FTN_param_groups = [{
                'params': self.modules[m].parameters()
            } for m in FTN_module_list if m in self.modules]
            if len(FTN_param_groups) > 0:
                self.optim_FTN = torch.optim.Adam(FTN_param_groups,
                                                  lr=opt.lr_FTN,
                                                  betas=(0.9, 0.999))
                self.optimizers.append(self.optim_FTN)
            else:
                self.optim_FTN = None
            # schedulers
            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))
    def initialize(self, opt):
        super(MultimodalDesignerGAN, self).initialize(opt)
        ###################################
        # load/define networks
        ###################################

        # basic G
        self.netG = networks.define_G(opt)

        # encoders
        self.encoders = {}
        if opt.use_edge:
            self.edge_encoder = networks.define_image_encoder(opt, 'edge')
            self.encoders['edge_encoder'] = self.edge_encoder
        if opt.use_color:
            self.color_encoder = networks.define_image_encoder(opt, 'color')
            self.encoders['color_encoder'] = self.color_encoder
        if opt.use_attr:
            self.attr_encoder, self.opt_AE = network_loader.load_attribute_encoder_net(
                id=opt.which_model_AE, gpu_ids=opt.gpu_ids)

        # basic D and auxiliary Ds
        if self.is_train:
            # basic D
            self.netD = networks.define_D(opt)
            # auxiliary Ds
            self.auxiliaryDs = {}
            if opt.use_edge_D:
                assert opt.use_edge
                self.netD_edge = networks.define_D_from_params(
                    input_nc=opt.edge_nof + 3,
                    ndf=opt.ndf,
                    which_model_netD=opt.which_model_netD,
                    n_layers_D=opt.n_layers_D,
                    norm=opt.norm,
                    which_gan='dcgan',
                    init_type=opt.init_type,
                    gpu_ids=opt.gpu_ids)
                self.auxiliaryDs['D_edge'] = self.netD_edge
            if opt.use_color_D:
                assert opt.use_color
                self.netD_color = networks.define_D_from_params(
                    input_nc=opt.color_nof + 3,
                    ndf=opt.ndf,
                    which_model_netD=opt.which_model_netD,
                    n_layers_D=opt.n_layers_D,
                    norm=opt.norm,
                    which_gan='dcgan',
                    init_type=opt.init_type,
                    gpu_ids=opt.gpu_ids)
                self.auxiliaryDs['D_color'] = self.netD_color
            if opt.use_attr_D:
                assert opt.use_attr
                attr_nof = opt.n_attr_feat if opt.attr_cond_type in {
                    'feat', 'feat_map'
                } else opt.n_attr
                self.netD_attr = networks.define_D_from_params(
                    input_nc=attr_nof + 3,
                    ndf=opt.ndf,
                    which_model_netD=opt.which_model_netD,
                    n_layers_D=opt.n_layers_D,
                    norm=opt.norm,
                    which_gan='dcgan',
                    init_type=opt.init_type,
                    gpu_ids=opt.gpu_ids)
                self.auxiliaryDs['D_attr'] = self.netD_attr
            # load weights
            if not opt.continue_train:
                if opt.which_model_init != 'none':
                    self.load_network(self.netG, 'G', 'latest',
                                      opt.which_model_init)
                    self.load_network(self.netD, 'D', 'latest',
                                      opt.which_model_init)
                    for l, net in self.encoders.iteritems():
                        self.load_network(net, l, 'latest',
                                          opt.which_model_init)
                    for l, net in self.auxiliaryDs.iteritems():
                        self.load_network(net, l, 'latest',
                                          opt.which_model_init)
            else:
                self.load_network(self.netG, 'G', opt.which_epoch)
                self.load_network(self.netD, 'D', opt.which_epoch)
                for l, net in self.encoders.iteritems():
                    self.load_network(net, l, opt.which_epoch)
                for l, net in self.auxiliaryDs.iteritems():
                    self.load_network(net, l, opt.which_epoch)
        else:
            self.load_network(self.netG, 'G', opt.which_epoch)
            for l, net in self.encoders.iteritems():
                self.load_network(net, l, opt.which_epoch)

        if self.is_train:
            self.fake_pool = ImagePool(opt.pool_size)
            ###################################
            # define loss functions and loss buffers
            ###################################
            self.loss_functions = []
            if opt.which_gan in {'dcgan', 'lsgan'}:
                self.crit_GAN = networks.GANLoss(
                    use_lsgan=opt.which_gan == 'lsgan', tensor=self.Tensor)
            else:
                # WGAN loss will be calculated in self.backward_D_wgangp and self.backward_G
                self.crit_GAN = None

            self.loss_functions.append(self.crit_GAN)

            self.crit_L1 = nn.L1Loss()
            self.loss_functions.append(self.crit_L1)

            if self.opt.loss_weight_vgg > 0:
                self.crit_vgg = networks.VGGLoss(self.gpu_ids)
                self.loss_functions.append(self.crit_vgg)

            self.crit_psnr = networks.SmoothLoss(networks.PSNR())
            self.loss_functions.append(self.crit_psnr)
            ###################################
            # create optimizers
            ###################################
            self.schedulers = []
            self.optimizers = []

            # optim_G will optimize parameters of netG and all image encoders (except attr_encoder)
            G_param_groups = [{'params': self.netG.parameters()}]
            for l, net in self.encoders.iteritems():
                G_param_groups.append({'params': net.parameters()})
            self.optim_G = torch.optim.Adam(G_param_groups,
                                            lr=opt.lr,
                                            betas=(opt.beta1, opt.beta2))
            self.optimizers.append(self.optim_G)
            # optim_D will optimize parameters of netD
            self.optim_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=opt.lr_D,
                                            betas=(opt.beta1, opt.beta2))
            self.optimizers.append(self.optim_D)
            # optim_D_aux will optimize parameters of auxiliaryDs
            if len(self.auxiliaryDs) > 0:
                aux_D_param_groups = [{
                    'params': net.parameters()
                } for net in self.auxiliaryDs.values()]
                self.optim_D_aux = torch.optim.Adam(aux_D_param_groups,
                                                    lr=opt.lr_D,
                                                    betas=(opt.beta1,
                                                           opt.beta2))
                self.optimizers.append(self.optim_D_aux)
            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))

        # color transformation from std to imagenet
        # img_imagenet = img_std * a + b
        self.trans_std_to_imagenet = {
            'a':
            Variable(self.Tensor([0.5 / 0.229, 0.5 / 0.224, 0.5 / 0.225]),
                     requires_grad=False).view(3, 1, 1),
            'b':
            Variable(self.Tensor([(0.5 - 0.485) / 0.229, (0.5 - 0.456) / 0.224,
                                  (0.5 - 0.406) / 0.225]),
                     requires_grad=False).view(3, 1, 1)
        }
Example #21
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)
        if opt.resize_or_crop != 'none' or not opt.isTrain:  # when training at full res this causes OOM
            torch.backends.cudnn.benchmark = True
        self.isTrain = opt.isTrain
        input_nc = opt.label_nc if opt.label_nc != 0 else opt.input_nc

        ##### define networks
        # Generator network
        netG_input_nc = input_nc
        # Main Generator
        self.netG = networks.define_G(11,
                                      opt.output_nc,
                                      opt.ngf,
                                      opt.netG,
                                      opt.n_downsample_global,
                                      opt.n_blocks_global,
                                      opt.n_local_enhancers,
                                      opt.n_blocks_local,
                                      opt.norm,
                                      gpu_ids=self.gpu_ids)

        self.netP = networks.define_P(44,
                                      20,
                                      opt.ngf,
                                      opt.netG,
                                      opt.n_downsample_global,
                                      opt.n_blocks_global,
                                      opt.n_local_enhancers,
                                      opt.n_blocks_local,
                                      opt.norm,
                                      gpu_ids=self.gpu_ids)
        self.netP.load_state_dict(
            torch.load(
                os.path.dirname(os.path.realpath(__file__)) +
                "/checkpoints/generate/parse.pth"))

        # Discriminator network
        if self.isTrain:
            use_sigmoid = opt.no_lsgan
            netD_input_nc = input_nc + opt.output_nc
            netB_input_nc = opt.output_nc * 2
            self.netD = networks.define_D(netD_input_nc,
                                          opt.ndf,
                                          opt.n_layers_D,
                                          opt.norm,
                                          use_sigmoid,
                                          opt.num_D,
                                          not opt.no_ganFeat_loss,
                                          gpu_ids=self.gpu_ids)
            #self.netB = networks.define_B(netB_input_nc, opt.output_nc, 32, 3, 3, opt.norm, gpu_ids=self.gpu_ids)

        if self.opt.verbose:
            print('---------- Networks initialized -------------')

        # load networks
        if not self.isTrain or opt.continue_train or opt.load_pretrain:
            pretrained_path = '' if not self.isTrain else opt.load_pretrain
            self.load_network(self.netG, 'G', opt.which_epoch, pretrained_path)

            if self.isTrain:
                self.load_network(self.netD, 'D', opt.which_epoch,
                                  pretrained_path)

        # set loss functions and optimizers
        if self.isTrain:
            if opt.pool_size > 0 and (len(self.gpu_ids)) > 1:
                raise NotImplementedError(
                    "Fake Pool Not Implemented for MultiGPU")
            self.fake_pool = ImagePool(opt.pool_size)
            self.old_lr = opt.lr

            # define loss functions
            self.loss_filter = self.init_loss_filter(not opt.no_ganFeat_loss,
                                                     not opt.no_vgg_loss)

            self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan,
                                                 tensor=self.Tensor)
            self.criterionFeat = torch.nn.L1Loss()
            if not opt.no_vgg_loss:
                self.criterionVGG = networks.VGGLoss(self.gpu_ids)
            self.criterionStyle = networks.StyleLoss(self.gpu_ids)
            # Names so we can breakout loss
            self.loss_names = self.loss_filter('G_GAN', 'G_GAN_Feat', 'G_VGG',
                                               'D_real', 'D_fake')
            # initialize optimizers
            # optimizer G
            if opt.niter_fix_global > 0:
                import sys
                if sys.version_info >= (3, 0):
                    finetune_list = set()
                else:
                    from sets import Set
                    finetune_list = Set()

                params_dict = dict(self.netG.named_parameters())
                params = []
                for key, value in params_dict.items():
                    if key.startswith('model' + str(opt.n_local_enhancers)):
                        params += [value]
                        finetune_list.add(key.split('.')[0])
                print(
                    '------------- Only training the local enhancer network (for %d epochs) ------------'
                    % opt.niter_fix_global)
                print('The layers that are finetuned are ',
                      sorted(finetune_list))
            else:
                params = list(self.netG.parameters())+list(self.netimage.parameters())+list(self.netcolor.parameters())\
                         +list(self.netlabel.parameters())+list(self.netsketch.parameters())+list(self.classfier.parameters())
                # params.extend(list(self.netimage.parameters()))

            self.optimizer_G = torch.optim.Adam(params,
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))

            # optimizer D
            params = list(self.netD.parameters())
            self.optimizer_D = torch.optim.Adam(params,
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
Example #22
0
    def TrainGenerator(real_A: tp.Numpy.Placeholder(
        (1, 3, args.crop_size, args.crop_size), dtype=flow.float32),
                       real_B: tp.Numpy.Placeholder(
                           (1, 3, args.crop_size, args.crop_size),
                           dtype=flow.float32)):
        with flow.scope.placement("gpu", "0:0-0"):
            # G_A(A)
            fake_B = networks.define_G(real_A,
                                       "netG_A",
                                       ngf=args.ngf,
                                       n_blocks=9,
                                       trainable=True,
                                       reuse=True)
            # G_B(G_A(A))
            rec_A = networks.define_G(fake_B,
                                      "netG_B",
                                      ngf=args.ngf,
                                      n_blocks=9,
                                      trainable=True,
                                      reuse=True)
            # G_B(B)
            fake_A = networks.define_G(real_B,
                                       "netG_B",
                                       ngf=args.ngf,
                                       n_blocks=9,
                                       trainable=True,
                                       reuse=True)
            # G_A(G_B(B))
            rec_B = networks.define_G(fake_A,
                                      "netG_A",
                                      ngf=args.ngf,
                                      n_blocks=9,
                                      trainable=True,
                                      reuse=True)

            # Identity loss
            # G_A should be identity if real_B is fed: ||G_A(B) - B||
            idt_A = networks.define_G(real_B,
                                      "netG_A",
                                      ngf=args.ngf,
                                      n_blocks=9,
                                      trainable=True,
                                      reuse=True)
            loss_idt_A = networks.L1Loss(
                idt_A - real_B) * args.lambda_B * args.lambda_identity

            # G_B should be identity if real_A is fed: ||G_B(A) - A||
            idt_B = networks.define_G(real_A,
                                      "netG_B",
                                      ngf=args.ngf,
                                      n_blocks=9,
                                      trainable=True,
                                      reuse=True)
            loss_idt_B = networks.L1Loss(
                idt_B - real_A) * args.lambda_A * args.lambda_identity

            # GAN loss D_A(G_A(A))
            netD_A_out = networks.define_D(fake_B,
                                           "netD_A",
                                           ndf=args.ndf,
                                           n_layers_D=3,
                                           trainable=False,
                                           reuse=True)
            loss_G_A = networks.GANLoss(netD_A_out, True)

            # GAN loss D_B(G_B(B))
            netD_B_out = networks.define_D(fake_A,
                                           "netD_B",
                                           ndf=args.ndf,
                                           n_layers_D=3,
                                           trainable=False,
                                           reuse=True)
            loss_G_B = networks.GANLoss(netD_B_out, True)

            # Forward cycle loss || G_B(G_A(A)) - A||
            loss_cycle_A = networks.L1Loss(rec_A - real_A) * args.lambda_A
            # Backward cycle loss || G_A(G_B(B)) - B||
            loss_cycle_B = networks.L1Loss(rec_B - real_B) * args.lambda_B
            # combined loss and calculate gradients
            loss_G = loss_G_A + loss_G_B + loss_cycle_A + loss_cycle_B + loss_idt_A + loss_idt_B

            flow.optimizer.Adam(flow.optimizer.PiecewiseConstantScheduler(
                [], [args.learning_rate]),
                                beta1=0.5).minimize(loss_G)

        return fake_B, rec_A, fake_A, rec_B, loss_G
Example #23
0
import torch.nn.functional as F
import torch.optim as optim
import torch
from tqdm import tqdm
import numpy as np

from tensorboardX import SummaryWriter

batch_size = 1
ds_size = 200

writer = SummaryWriter()

device = torch.device("cuda:6")

net_D = networks.define_D(6, 64, 'n_layers', n_layers_D=3, use_sigmoid=False, out_channels=128)
net_D.to(device)
optimizer_D = optim.SGD(net_D.parameters(), lr=0.0001, momentum=0.5)
chkpt = torch.load('../vanilla/checkpoints_/63.pth')
net_D.load_state_dict(chkpt['state_dict'])

def softmax2d(tensor):
    '''
    Pytorch doesn't natively implement 2d softmax???
    I am saddened!
    '''
    return F.softmax(tensor.view(-1)).view_as(tensor)

def normalize_G(net_G):
    '''
    Normalizes the off diagonal and on diagonal entries of net_G 
Example #24
0
def main(name_exp, segloss=False, cuda=True, finetune=False):
    # Training settings
    parser = argparse.ArgumentParser(description='pix2pix-PyTorch-implementation')
    parser.add_argument('--batchSize', type=int, default=8, help='training batch size')
    parser.add_argument('--testBatchSize', type=int, default=8, help='testing batch size')
    parser.add_argument('--nEpochs', type=int, default=100, help='number of epochs to train for')
    parser.add_argument('--input_nc', type=int, default=3, help='input image channels')
    parser.add_argument('--output_nc', type=int, default=3, help='output image channels')
    parser.add_argument('--ngf', type=int, default=64, help='generator filt+ers in first conv layer')
    parser.add_argument('--ndf', type=int, default=64, help='discriminator filters in first conv layer')
    parser.add_argument('--lr', type=float, default=0.0002, help='Learning Rate. Default=0.0002')
    parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
    parser.add_argument('--threads', type=int, default=8, help='number of threads for data loader to use')
    parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')
    parser.add_argument('--lamb', type=int, default=10, help='weight on L1 term in objective')
    opt = parser.parse_args()

    cudnn.benchmark = True



    def val():
        net_current = "path_exp/checkpoint/DFS/{}/netG_model_current.pth".format(name_exp)
        netVal = torch.load(net_current)
        netVal.eval()
        SEG_NET.eval()
        features.eval()
        with torch.no_grad():
            total_mse = 0
            total_mse2 = 0
            avg_psnr_depth = 0
            avg_psnr_dehaze = 0
            avg_ssim_depth = 0
            avg_ssim_dehaze = 0
            for batch in validation_data_loader:
                input, target, depth = Variable(batch[0]), Variable(batch[1]), Variable(batch[2])
                if cuda == True:
                    input = input.cuda()
                    target = target.cuda()
                    depth = depth.cuda()
                    
                

                dehaze = netVal(input)
                prediction = SEG_NET(dehaze)

                avg_ssim_dehaze += pytorch_ssim.ssim(dehaze, target).item()

                mse = criterionMSE(prediction, depth)
                total_mse += mse.item()
                avg_psnr_depth += 10 * log10(1 / mse.item())

                mse2 = criterionMSE(dehaze, target)
                total_mse2 += mse2.item()
                avg_psnr_dehaze += 10 * log10(1 / mse2.item())

                avg_ssim_depth += pytorch_ssim.ssim(prediction, depth).item()


                visual_ret_val = OrderedDict()

                visual_ret_val['Haze'] = input
                visual_ret_val['Seg estimate'] = prediction
                visual_ret_val['Dehaze '] = dehaze
                visual_ret_val['GT dehaze'] = target
                visual_ret_val['GT Seg '] = depth

                visualizer.display_current_results(visual_ret_val, epoch, True)


            print("===> Validation")
            #f.write("===> Testing: \r\n")

            print("===> PSNR seg: {:.4f} ".format(avg_psnr_depth / len(validation_data_loader)))
            #f.write("===> PSNR depth: {:.4f} \r\n".format(avg_psnr_depth / len(validation_data_loader)))

            print("===> Mse seg: {:.4f} ".format(total_mse / len(validation_data_loader)))
            #f.write("===> Mse depth: {:.4f} \r\n".format(total_mse / len(validation_data_loader)))

            print("===> SSIM seg: {:.4f} ".format(avg_ssim_depth / len(validation_data_loader)))
            #f.write("===> SSIM depth: {:.4f} \r\n".format(avg_ssim_depth / len(validation_data_loader)))

            return total_mse / len(validation_data_loader)






    def testing():
        path = "path_exp/checkpoint/DFS/{}/netG_model_best.pth".format(name_exp)
        net = torch.load(path)
        net.eval()
        SEG_NET.eval()
        features.eval()
        with torch.no_grad():
            total_mse = 0
            total_mse2 = 0
            avg_psnr_depth = 0
            avg_psnr_dehaze = 0
            avg_ssim_depth = 0
            avg_ssim_dehaze = 0
            for batch in testing_data_loader:
                input, target, depth = Variable(batch[0]), Variable(batch[1]), Variable(batch[2])
                if cuda == True:
                    input = input.cuda()
                    target = target.cuda()
                    depth = depth.cuda()

                dehaze = net(input)
                prediction = SEG_NET(dehaze)

                avg_ssim_dehaze += pytorch_ssim.ssim(dehaze, target).item()

                mse = criterionMSE(prediction, depth)
                total_mse += mse.item()
                avg_psnr_depth += 10 * log10(1 / mse.item())

                mse2 = criterionMSE(dehaze, target)
                total_mse2 += mse2.item()
                avg_psnr_dehaze += 10 * log10(1 / mse2.item())

                avg_ssim_depth += pytorch_ssim.ssim(prediction, depth).item()

            print("===> Testing")
            print("===> PSNR seg: {:.4f} ".format(avg_psnr_depth / len(testing_data_loader)))
            print("===> Mse seg: {:.4f} ".format(total_mse / len(testing_data_loader)))
            print("===> SSIM seg: {:.4f} ".format(avg_ssim_depth / len(testing_data_loader)))
            print("===> PSNR dehaze: {:.4f} ".format(avg_psnr_dehaze / len(testing_data_loader)))
            print("===> SSIM dehaze: {:.4f} ".format(avg_ssim_dehaze / len(testing_data_loader)))





    def checkpoint():
        if not os.path.exists("checkpoint"):
            os.mkdir("checkpoint")
        if not os.path.exists(os.path.join("path_exp/checkpoint/DFS", name_exp)):
            os.mkdir(os.path.join("path_exp/checkpoint/DFS", name_exp))
        net_g_model_out_path = "path_exp/checkpoint/DFS/{}/netG_model_best.pth".format(name_exp)
        net_d_model_out_path = "path_exp/checkpoint/DFS/{}/netD_model_best.pth".format(name_exp)
        torch.save(netG, net_g_model_out_path)
        torch.save(netD, net_d_model_out_path)


    def checkpoint_current():
        if not os.path.exists(os.path.join("path_exp/checkpoint/DFS", name_exp)):
            os.mkdir(os.path.join("path_exp/checkpoint/DFS", name_exp))
        net_g_model_out_path = "path_exp/checkpoint/DFS/{}/netG_model_current.pth".format(name_exp)
        torch.save(netG, net_g_model_out_path)

    def checkpoint_seg():
        if not os.path.exists(os.path.join("path_exp/checkpoint/DFS", name_exp)):
            os.mkdir(os.path.join("path_exp/checkpoint/DFS", name_exp))
        net_g_model_out_path = "path_exp/checkpoint/DFS/{}/seg_net.pth".format(name_exp)
        torch.save(SEG_NET, net_g_model_out_path)



    torch.manual_seed(opt.seed)
    if cuda==True:
        torch.cuda.manual_seed(opt.seed)

    print(" ")
    print(name_exp)
    print(" ")

    print('===> Loading datasets')
    train_set = get_training_set('path_exp/cityscape/HAZE')
    val_set = get_val_set('path_exp/cityscape/HAZE')
    test_set = get_test_set('path_exp/cityscape/HAZE')


    training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
    validation_data_loader = DataLoader(dataset=val_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
    testing_data_loader= DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False)

    print('===> Building model')
    netG = define_G(opt.input_nc, opt.output_nc, opt.ngf, 'batch', False, [0])
    netD = define_D(opt.input_nc + opt.output_nc, opt.ndf, 'batch', False, [0])

    criterionGAN = GANLoss()
    criterionL1 = nn.L1Loss()
    criterionMSE = nn.MSELoss()

    # setup optimizer
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))



    print('---------- Networks initialized -------------')
    print_network(netG)
    print_network(netD)
    print('-----------------------------------------------')


    real_a = torch.FloatTensor(opt.batchSize, opt.input_nc, 256, 256)
    real_b = torch.FloatTensor(opt.batchSize, opt.output_nc, 256, 256)
    real_c = torch.FloatTensor(opt.batchSize, opt.output_nc, 256, 256)

    if cuda==True:
        netD = netD.cuda()
        netG = netG.cuda()
        criterionGAN = criterionGAN.cuda()
        criterionL1 = criterionL1.cuda()
        criterionMSE = criterionMSE.cuda()
        real_a = real_a.cuda()
        real_b = real_b.cuda()
        real_c=real_c.cuda()

    real_a = Variable(real_a)
    real_b = Variable(real_b)
    real_c = Variable(real_c)



    SEG_NET = torch.load("path_exp/SEG_NET.pth")

    optimizerSeg = optim.Adam(SEG_NET.parameters(), lr=opt.lr/10, betas=(opt.beta1, 0.999))



    features = Vgg16()

    if cuda==True:
        SEG_NET.cuda()
        features.cuda()


    bon =100000000
    for epoch in range(opt.nEpochs):
        features.eval()

        if finetune== True and epoch>50:
            SEG_NET.train()
        else:
            SEG_NET.eval()

        loss_epoch_gen=0
        loss_epoch_dis=0
        total_segloss=0
        loss_seg=0
        i=0
        for iteration, batch in enumerate(training_data_loader, 1):

            netG.train()
            i=i+1

            # forward
            real_a_cpu, real_b_cpu, real_c_cpu = batch[0], batch[1], batch[2]

            with torch.no_grad():
                real_a = real_a.resize_(real_a_cpu.size()).copy_(real_a_cpu)

            with torch.no_grad():
                real_b = real_b.resize_(real_b_cpu.size()).copy_(real_b_cpu)

            with torch.no_grad():
                real_c = real_c.resize_(real_c_cpu.size()).copy_(real_c_cpu)


            fake_b = netG(real_a)

            ############################
            # (1) Update D network: maximize log(D(x,y)) + log(1 - D(x,G(x)))
            ###########################

            optimizerD.zero_grad()

            # train with fake
            fake_ab = torch.cat((real_a, fake_b), 1)
            pred_fake = netD.forward(fake_ab.detach())
            loss_d_fake = criterionGAN(pred_fake, False)

            # train with real
            real_ab = torch.cat((real_a, real_b), 1)
            pred_real = netD.forward(real_ab)
            loss_d_real = criterionGAN(pred_real, True)

            # Combined loss
            loss_d = (loss_d_fake + loss_d_real) * 0.5

            loss_d.backward()

            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(x,G(x))) + L1(y,G(x))
            ##########################
            optimizerG.zero_grad()
            # First, G(A) should fake the discriminator
            fake_ab = torch.cat((real_a, fake_b), 1)
            pred_fake = netD.forward(fake_ab)
            loss_g_gan = criterionGAN(pred_fake, True)


            # Second, G(A) = B
            loss_g_l1 = criterionL1(fake_b, real_b) * opt.lamb

            features_y = features(fake_b)
            features_x = features(real_b)

            loss_content = criterionMSE(features_y[1], features_x[1])*10


            if segloss == True:
                fake_seg = SEG_NET(fake_b)
                loss_seg = criterionMSE(fake_seg, real_c) * 10

                total_segloss += loss_seg.item()

                features_y = features(fake_seg)
                features_x = features(real_c)

                ssim_seg = criterionMSE(features_y[1], features_x[1]) * 10

                loss_g = loss_g_gan + loss_g_l1 + loss_content + loss_seg


            else:
                loss_g = loss_g_gan + loss_g_l1+loss_content

            loss_epoch_gen+=loss_g.item()
            loss_epoch_dis+=loss_d.item()





            if finetune== True and epoch>50:
                loss_g.backward(retain_graph=True)

                optimizerG.step()

                loss_seg=loss_seg

                loss_seg.backward()

                optimizerSeg.zero_grad()

                optimizerSeg.step()

            else:
                loss_g.backward()
                optimizerG.step()



            errors_ret = OrderedDict()
            errors_ret['Total_G'] = float(loss_g)
            errors_ret['Content'] = float(loss_content)
            errors_ret['GAN'] = float(loss_g_gan)
            errors_ret['L1'] = float(loss_g_l1)
            errors_ret['D'] = float(loss_d)



            if i % 10 == 0:  # print training losses and save logging information to the disk
                if i > 0:
                    visualizer.plot_current_losses(epoch, i/(len(training_data_loader)*opt.batchSize), errors_ret)




        print("===> Epoch[{}]: Loss_D: {:.4f} Loss_G: {:.4f} Loss Seg: {:.4f} ".format(epoch, loss_epoch_dis,loss_epoch_gen, total_segloss))
        checkpoint_current()
        MSE=val()
        if MSE < bon:
            bon = MSE
            checkpoint()
            checkpoint_seg()
            print("BEST EPOCH SAVED")

    testing()
Example #25
0
        if opt.two_step:
            optimizer = None
            netG = define_G(3, 3, 64, 'batch', False)

            if opt.end:
                optimizerG = optim.Adam(list(model.parameters()) +
                                        list(netG.parameters()),
                                        opt.learning_rate,
                                        betas=(0.9, 0.999))
            else:
                optimizerG = optim.Adam(netG.parameters(),
                                        opt.learning_rate,
                                        betas=(0.9, 0.999))
            print(netG)
            if opt.use_gan:
                netD = define_D(3 + 3, 64, 'batch', use_sigmoid=False)
                optimizerD = optim.Adam(netD.parameters(),
                                        opt.learning_rate,
                                        betas=(0.9, 0.999))
                print(netD)
            else:
                netD = None
                optimizerD = None
        else:
            if opt.use_gan:
                netD = define_D(3 + 3, 64, 'batch', use_sigmoid=False)
                optimizerD = optim.Adam(netD.parameters(),
                                        opt.learning_rate,
                                        betas=(0.9, 0.999))
                print(netD)
            else:
Example #26
0
# testing_data_loader = DataLoader(dataset=DatasetFromFolder(dataroot_test, direction), num_workers=threads, batch_size=test_batch_size)
visualization_data_loader = DataLoader(dataset=visual_set,
                                       num_workers=threads,
                                       batch_size=test_batch_size,
                                       shuffle=False)

print('===> Building models')
'''loading the generator and discriminator'''
net_g = define_G(input_nc,
                 output_nc,
                 ngf,
                 netG=netG_type,
                 norm='batch',
                 use_dropout=False,
                 gpu_id=device)
net_d = define_D(input_nc + output_nc, ndf, 'n_layers', gpu_id=device)
'''set loss fn'''
criterionGAN = GANLoss().to(device)
criterionL1 = nn.L1Loss().to(device)
criterionMSE = nn.MSELoss().to(device)
'''setup optimizer'''
optimizer_g = optim.Adam(net_g.parameters(), lr=lr, betas=(beta1, 0.999))
optimizer_d = optim.Adam(net_d.parameters(), lr=lr, betas=(beta1, 0.999))
'''set the learning rate adjust policy'''
net_g_scheduler = get_scheduler(optimizer_g)
net_d_scheduler = get_scheduler(optimizer_d)
'''training process'''
for epoch in range(epoch_count, n_epoch + n_epoch_decay + 1):

    epoch_start_time = time.time()  # start timer in each epoch
Example #27
0
train_set = get_training_set(root_path + opt.dataset)
test_set = get_test_set(root_path + opt.dataset)
training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batchSize,
                                  shuffle=True)
testing_data_loader = DataLoader(dataset=test_set,
                                 num_workers=opt.threads,
                                 batch_size=opt.testBatchSize,
                                 shuffle=False)

print('===> Building model')

netG = define_G(opt.input_nc, opt.output_nc, opt.ngf, 'batch', False, [0])

netD = define_D(opt.input_nc + opt.output_nc, opt.ndf, 'batch', False, [0])

print('loading done')

criterionGAN = GANLoss()
criterionL1 = nn.L1Loss()
criterionMSE = nn.MSELoss()

# setup optimizer
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))

print('---------- Networks initialized -------------')
print_network(netG)
print_network(netD)
print('-----------------------------------------------')
Example #28
0
    test_set = PanColorDataset(mode='test', dataset=opt.dataset)
elif opt.model == 'PanSRGAN':
    train_set = PanSRDataset(mode='train', dataset=opt.dataset)
    test_set = PanSRDataset(mode='test', dataset=opt.dataset)

training_data_loader = DataLoader(
    dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
testing_data_loader = DataLoader(
    dataset=test_set,num_workers=opt.threads, batch_size=opt.batchSize, shuffle=False)



## Define Network
netG = define_G(opt.input_nc, opt.output_nc,
                    opt.ngf, 'batch','leakyrelu', opt.useDropout, opt.upConvType, opt.gtype, opt.blockType, opt.nblocks, gpus, n_downsampling=opt.ndowns)
netD = define_D(opt.input_nc + opt.output_nc,
                    opt.ndf, 'batch', not opt.lsgan, opt.nlayers, gpus)

## Define Losses
criterionGAN = GANLoss(use_lsgan=opt.lsgan)
criterionL1 = nn.L1Loss()
criterionMSE = nn.MSELoss()

## Define Optimizers
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(
    opt.beta1, 0.999), weight_decay=opt.regTerm)
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(
    opt.beta1, 0.999), weight_decay=opt.regTerm)

## Continue from a checkpoint

if opt.cont:
Example #29
0
from data.eval_dataset import EvalDataset
import matplotlib.pyplot as plt
from scipy.misc import imsave
from torch.utils.data import DataLoader
import torch.nn.functional as F
import torch
import numpy as np

# Panorama parameters
batch_size = 1
NUM_SLICES = 3
SAMPLES = 5  # Number of horizon offsets to sample before finetuning

# Set up deep learning stuff
device = torch.device("cuda")
model = networks.define_D(6, 64, 'n_layers', n_layers_D=3, use_sigmoid=False)
chkpt = torch.load('checkpoints/patch_horizon/49.pth')
model.load_state_dict(chkpt['state_dict'])
model.to(device)
patch_loss = networks.GANLoss()

# Create dataset
dataset = EvalDataset()
#dataset.initialize('../../../data/semanticLandscapes512/train_img', allrandom=True, return_idx=True)
#dataset.initialize('../../../data/MITCVCL/coast', allrandom=True)
dataset.initialize('../../../data/instagram_landscapes/anne_karin_69',
                   allrandom=True)


def convertImage(im):
    '''
Example #30
0
train_set = get_training_set(root_path)
test_set = get_test_set(root_path)

training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads,
                                    batch_size=opt.batchSize, shuffle=True)

test_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads,
                                    batch_size=opt.testBatchSize, shuffle=False)

cprint('==> Preparing Data Set: Complete\n', 'green')

################################################################################
cprint('==> Building Models', 'yellow')
netG = define_G(opt.input_nc, opt.output_nc, opt.ngf, norm='batch', use_dropout=False, gpu_ids=gpu_ids)
netD = define_D(opt.input_nc + opt.output_nc, opt.ndf, norm='batch', use_sigmoid=False, gpu_ids=gpu_ids)

print('---------- Networks initialized -------------')
print_network(netG)
print_network(netD)
print('-----------------------------------------------\n')
cprint('==> Building Models: Complete\n', 'green')

################################################################################
criterionGAN = GANLoss()
criterionL1 = nn.L1Loss()
criterionMSE = nn.MSELoss()

# setup optimizer
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))