コード例 #1
0
 def print_network(self):
     print('--------------------- Model ---------------------')
     print('##################### NetG #####################')
     networks.print_network(self.net_i)
     if self.isTrain and self.opt.lambda_gan > 0:
         print('##################### NetD #####################')
         networks.print_network(self.netD)
コード例 #2
0
 def print_network(self):
     if self.opt.debug:
         print('--------------------- Model ---------------------')
         if self.vgg is not None:
             print('##################### VGG #####################')
             from torchsummary import summary
             summary(self.vgg, input_size=(
                 3, 224,
                 224))  #torch vgg model summary for 224*224 i/p size
         print('##################### NetG #####################')
         networks.print_network(self.net_i)
         if self.isTrain and self.opt.lambda_gan > 0:
             print('##################### NetD #####################')
             networks.print_network(self.netD)
コード例 #3
0
  def __init__(self, opt):
    super(Pose_GAN, self).__init__()

    # load generator and discriminator models
    # adding extra layers for larger image size
    if(opt.checkMode == 0):
      nfilters_decoder = (512, 512, 512, 256, 128, 3) if max(opt.image_size) < 256 else (512, 512, 512, 512, 256, 128, 3)
      nfilters_encoder = (64, 128, 256, 512, 512, 512) if max(opt.image_size) < 256 else (64, 128, 256, 512, 512, 512, 512)
    else:
      nfilters_decoder = (128, 3) if max(opt.image_size) < 256 else (256, 128, 3)
      nfilters_encoder = (64, 128) if max(opt.image_size) < 256 else (64, 128, 256)

    if (opt.use_input_pose):
      input_nc = 3 + 2*opt.pose_dim
    else:
      input_nc = 3 + opt.pose_dim

    self.num_stacks = opt.num_stacks
    self.batch_size = opt.batch_size
    self.pose_dim = opt.pose_dim
    if(opt.gen_type=='stacked'):
      self.gen = Stacked_Generator(input_nc, opt.num_stacks, opt.pose_dim, nfilters_encoder, nfilters_decoder, use_input_pose=opt.use_input_pose)
    elif(opt.gen_type=='baseline'):
      self.gen = Generator(input_nc, nfilters_encoder, nfilters_decoder, use_input_pose=opt.use_input_pose)
    else:
      raise Exception('Invalid gen_type')
    # discriminator also sees the output image for the target pose
    self.disc = Discriminator(input_nc + 3, use_input_pose=opt.use_input_pose, checkMode=opt.checkMode)
    print('---------- Networks initialized -------------')
    print_network(self.gen)
    print_network(self.disc)
    print('-----------------------------------------------')
    # Setup the optimizers
    lr = opt.learning_rate
    self.disc_opt = torch.optim.Adam(self.disc.parameters(), lr=lr, betas=(0.5, 0.999))
    self.gen_opt = torch.optim.Adam(self.gen.parameters(), lr=lr, betas=(0.5, 0.999))

    # Network weight initialization
    self.gen.cuda()
    self.disc.cuda()
    self.disc.apply(xavier_weights_init)
    self.gen.apply(xavier_weights_init)

    # Setup the loss function for training
    self.ll_loss_criterion = torch.nn.L1Loss()
コード例 #4
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)
        self.isTrain = opt.isTrain

        # load/define networks
        self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
                                      opt.which_model_netG, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids)
        if self.isTrain:
            use_sigmoid = opt.no_lsgan
            self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf,
                                          opt.which_model_netD,
                                          opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids)
        if not self.isTrain or opt.continue_train:
            self.load_network(self.netG, 'G', opt.which_epoch)
            if self.isTrain:
                self.load_network(self.netD, 'D', opt.which_epoch)

        if self.isTrain:
            self.fake_AB_pool = ImagePool(opt.pool_size)
            # define loss functions
            self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor)
            self.criterionL1 = torch.nn.L1Loss()

            # initialize optimizers
            self.schedulers = []
            self.optimizers = []
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                                lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
            for optimizer in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optimizer, opt))

        print('---------- Networks initialized -------------')
        networks.print_network(self.netG)
        if self.isTrain:
            networks.print_network(self.netD)
        print('-----------------------------------------------')
コード例 #5
0
    def __init__(self, opt, _isTrain=False):
        self.initialize(opt)

        self.mode = opt.mode
        if opt.input == 'single_view':
            self.num_input = 3
        elif opt.input == 'two_view':
            self.num_input = 6
        elif opt.input == 'two_view_k':
            self.num_input = 7
        else:
            raise ValueError("Unknown input type %s" % opt.input)

        if self.mode == 'Ours_Bilinear':
            print(
                '======================================  DIW NETWORK TRAIN FROM %s======================='
                % self.mode)

            new_model = hourglass.HourglassModel(self.num_input)

            print(
                '===================Loading Pretrained Model OURS ==================================='
            )

            if not _isTrain:
                if self.num_input == 7:
                    model_parameters = self.load_network(
                        new_model, 'G', 'best_depth_Ours_Bilinear_inc_7')
                elif self.num_input == 3:
                    model_parameters = self.load_network(
                        new_model, 'G', 'best_depth_Ours_Bilinear_inc_3')
                elif self.num_input == 6:
                    model_parameters = self.load_network(
                        new_model, 'G', 'best_depth_Ours_Bilinear_inc_6')
                else:
                    print('Something Wrong')
                    sys.exit()

                new_model.load_state_dict(model_parameters)

            new_model = torch.nn.parallel.DataParallel(
                new_model.cuda(), device_ids=range(torch.cuda.device_count()))

            self.netG = new_model

        else:
            print('ONLY SUPPORT Ours_Bilinear')
            sys.exit()

        self.old_lr = opt.lr
        self.netG.train()

        if True:
            self.criterion_joint = networks.JointLoss(opt)
            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(
                self.netG.parameters(), lr=opt.lr, betas=(0.9, 0.999))
            self.scheduler = networks.get_scheduler(self.optimizer_G, opt)
            print('---------- Networks initialized -------------')
            networks.print_network(self.netG)
            print('-----------------------------------------------')
コード例 #6
0
ファイル: pix2pix.py プロジェクト: arnoldqin/SR
    def initialize(self, opt):
        BaseModel.initialize(self, opt)

        nb = opt.batchSize
        size = opt.fineSize
        self.target_weight = []
        self.input_A = self.Tensor(nb, opt.input_nc, size, size)
        self.input_B = self.Tensor(nb, opt.output_nc, size, size)
        self.input_C = self.Tensor(nb, opt.output_nc, size, size)
        self.input_C_sr = self.Tensor(nb, opt.output_nc, size, size)
        if opt.aux:
            self.A_aux = self.Tensor(nb, opt.input_nc, size, size)
            self.B_aux = self.Tensor(nb, opt.output_nc, size, size)
            self.C_aux = self.Tensor(nb, opt.output_nc, size, size)

        self.netE_A = networks.define_G(opt.input_nc,
                                        opt.output_nc,
                                        opt.ngf,
                                        'ResnetEncoder_my',
                                        opt.norm,
                                        not opt.no_dropout,
                                        opt.init_type,
                                        self.gpu_ids,
                                        opt=opt,
                                        n_downsampling=2)

        mult = self.netE_A.get_mult()

        self.netE_C = networks.define_G(opt.input_nc,
                                        opt.output_nc,
                                        64,
                                        'ResnetEncoder_my',
                                        opt.norm,
                                        not opt.no_dropout,
                                        opt.init_type,
                                        self.gpu_ids,
                                        opt=opt,
                                        n_downsampling=3)

        self.net_D = networks.define_G(opt.input_nc,
                                       opt.output_nc,
                                       opt.ngf,
                                       'ResnetDecoder_my',
                                       opt.norm,
                                       not opt.no_dropout,
                                       opt.init_type,
                                       self.gpu_ids,
                                       opt=opt,
                                       mult=mult)

        mult = self.net_D.get_mult()

        self.net_Dc = networks.define_G(opt.input_nc,
                                        opt.output_nc,
                                        opt.ngf,
                                        'ResnetDecoder_my',
                                        opt.norm,
                                        not opt.no_dropout,
                                        opt.init_type,
                                        self.gpu_ids,
                                        opt=opt,
                                        mult=mult,
                                        n_upsampling=1)

        self.netG_A = networks.define_G(opt.input_nc,
                                        opt.output_nc,
                                        opt.ngf,
                                        'GeneratorLL',
                                        opt.norm,
                                        not opt.no_dropout,
                                        opt.init_type,
                                        self.gpu_ids,
                                        opt=opt,
                                        mult=mult)

        mult = self.net_Dc.get_mult()

        self.netG_C = networks.define_G(opt.input_nc,
                                        opt.output_nc,
                                        opt.ngf,
                                        'GeneratorLL',
                                        opt.norm,
                                        not opt.no_dropout,
                                        opt.init_type,
                                        self.gpu_ids,
                                        opt=opt,
                                        mult=mult)

        #        self.netG_A_running = networks.define_G(opt.input_nc, opt.output_nc,
        #                                       opt.ngf, opt.which_model_netG, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids, opt=opt)
        #      set_eval(self.netG_A_running)
        #     accumulate(self.netG_A_running, self.netG_A, 0)
        #        self.netG_B = networks.define_G(opt.output_nc, opt.input_nc,
        #                                       opt.ngf, opt.which_model_netG, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids, opt=opt)
        #    self.netG_B_running = networks.define_G(opt.output_nc, opt.input_nc,
        #                                   opt.ngf, opt.which_model_netG, opt.norm, not opt.no_dropout, opt.init_type, self.gpu_ids, opt=opt)
        #  set_eval(self.netG_B_running)
        # accumulate(self.netG_B_running, self.netG_B, 0)
        if self.isTrain:
            use_sigmoid = opt.no_lsgan
            self.netD_A = networks.define_D(opt.output_nc,
                                            opt.ndf,
                                            opt.which_model_netD,
                                            opt.n_layers_D,
                                            opt.norm,
                                            use_sigmoid,
                                            opt.init_type,
                                            self.gpu_ids,
                                            opt=opt)
#         self.netD_B = networks.define_D(opt.input_nc, opt.ndf,
#                                          opt.which_model_netD,
#                                        opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, self.gpu_ids, opt=opt)
        print('---------- Networks initialized -------------')
        #        networks.print_network(self.netG_B, opt, (opt.input_nc, opt.fineSize, opt.fineSize))
        networks.print_network(self.netE_C, opt,
                               (opt.input_nc, opt.fineSize, opt.fineSize))
        networks.print_network(
            self.net_D, opt, (opt.ngf * 4, opt.fineSize / 4, opt.fineSize / 4))
        networks.print_network(self.net_Dc, opt,
                               (opt.ngf, opt.CfineSize / 2, opt.CfineSize / 2))
        # networks.print_network(self.netG_B, opt)
        if self.isTrain:
            networks.print_network(self.netD_A, opt)
            # networks.print_network(self.netD_B, opt)
        print('-----------------------------------------------')

        if not self.isTrain or opt.continue_train:
            print('Loaded model')
            which_epoch = opt.which_epoch
            self.load_network(self.netG_A, 'G_A', which_epoch)
            self.load_network(self.netG_B, 'G_B', which_epoch)
            if self.isTrain:
                self.load_network(self.netG_A_running, 'G_A', which_epoch)
                self.load_network(self.netG_B_running, 'G_B', which_epoch)
                self.load_network(self.netD_A, 'D_A', which_epoch)
                self.load_network(self.netD_B, 'D_B', which_epoch)

        if self.isTrain and opt.load_path != '':
            print('Loaded model from load_path')
            which_epoch = opt.which_epoch
            load_network_with_path(self.netG_A,
                                   'G_A',
                                   opt.load_path,
                                   epoch_label=which_epoch)
            load_network_with_path(self.netG_B,
                                   'G_B',
                                   opt.load_path,
                                   epoch_label=which_epoch)
            load_network_with_path(self.netD_A,
                                   'D_A',
                                   opt.load_path,
                                   epoch_label=which_epoch)
            load_network_with_path(self.netD_B,
                                   'D_B',
                                   opt.load_path,
                                   epoch_label=which_epoch)

        if self.isTrain:
            self.old_lr = opt.lr
            self.fake_A_pool = ImagePool(opt.pool_size)
            self.fake_B_pool = ImagePool(opt.pool_size)
            self.fake_C_pool = ImagePool(opt.pool_size)
            # define loss functions
            if len(self.target_weight) == opt.num_D:
                print(self.target_weight)
                self.criterionGAN = networks.GANLoss(
                    use_lsgan=not opt.no_lsgan,
                    tensor=self.Tensor,
                    target_weight=self.target_weight,
                    gan=opt.gan)
            else:
                self.criterionGAN = networks.GANLoss(
                    use_lsgan=not opt.no_lsgan,
                    tensor=self.Tensor,
                    gan=opt.gan)
            self.criterionCycle = torch.nn.L1Loss()
            self.criterionIdt = torch.nn.L1Loss()
            self.criterionColor = networks.ColorLoss()
            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(itertools.chain(
                self.netE_A.parameters(), self.net_D.parameters(),
                self.netG_A.parameters(), self.net_Dc.parameters(),
                self.netG_C.parameters()),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizer_AE = torch.optim.Adam(itertools.chain(
                self.netE_C.parameters(), self.net_D.parameters(),
                self.net_Dc.parameters(), self.netG_C.parameters()),
                                                 lr=opt.lr,
                                                 betas=(opt.beta1, 0.999))
            self.optimizer_G_A_sr = torch.optim.Adam(itertools.chain(
                self.netE_A.parameters(), self.net_D.parameters(),
                self.net_Dc.parameters(), self.netG_C.parameters()),
                                                     lr=opt.lr,
                                                     betas=(opt.beta1, 0.999))
            self.optimizer_AE_sr = torch.optim.Adam(itertools.chain(
                self.netE_C.parameters(), self.net_D.parameters(),
                self.netG_A.parameters()),
                                                    lr=opt.lr,
                                                    betas=(opt.beta1, 0.999))
            self.optimizer_D_A = torch.optim.Adam(self.netD_A.parameters(),
                                                  lr=opt.lr,
                                                  betas=(opt.beta1, 0.999))
            #       self.optimizer_D_B = torch.optim.Adam(self.netD_B.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizers = []
            self.schedulers = []
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_AE)
            # self.optimizers.append(self.optimizer_G_A_sr)
            self.optimizers.append(self.optimizer_AE_sr)
            self.optimizers.append(self.optimizer_D_A)
            #   self.optimizers.append(self.optimizer_D_B)
            for optimizer in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optimizer, opt))
コード例 #7
0
        save_path = os.path.join(opt.checkpoints_dir, opt.name, save_filename)
        torch.save(network.cpu().state_dict(), save_path)
        if torch.cuda.is_available():
            network.cuda()

netG_deblur = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
                                      opt.which_model_netG, opt.norm, not opt.no_dropout, opt.gpu_ids, False,
                                      opt.learn_residual)
netG_blur = multi_in_networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
                                      opt.which_model_netG, opt.norm, not opt.no_dropout, opt.gpu_ids, False,
                                      opt.learn_residual)

load_network(netG_deblur, 'deblur_G', opt.which_epoch)
load_network(netG_blur, 'blur_G', opt.which_epoch)
print('------- Networks deblur_G initialized ---------')
networks.print_network(netG_deblur)
print('-----------------------------------------------')


# ### Freeze layers

# In[6]:


def freeze_single_input(model,num_layers_frozen=19):

    ct=0
    for child in list(model.children())[0]:
        ct+=1
        if ct<num_layers_frozen:
            for param in child.parameters():
コード例 #8
0
 def print_network(self):
     print('--------------------- NetworkWrapper ---------------------')
     networks.print_network(self.net)
コード例 #9
0
# visualizer = Visualizer(opt)
# if opt.distributed:
#         torch.cuda.set_device(args.local_rank)
#         torch.distributed.init_process_group(backend="nccl", init_method="env://")
#         synchronize()

optimizer_G, optimizer_D = model.module.optimizer_G, model.module.optimizer_D

total_steps = (start_epoch - 1) * dataset_size + epoch_iter

display_delta = total_steps % opt.display_freq
print_delta = total_steps % opt.print_freq
save_delta = total_steps % opt.save_latest_freq
netVAE = networks.define_VAE(opt.input_nc)
networks.print_network(netVAE)

for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):
    epoch_start_time = time.time()
    if epoch != start_epoch:
        epoch_iter = epoch_iter % dataset_size
    for i, data in enumerate(dataset, start=epoch_iter):
        if total_steps % opt.print_freq == print_delta:
            iter_start_time = time.time()

        total_steps += opt.batchSize
        epoch_iter += opt.batchSize

        # whether to collect output images
        save_fake = total_steps % opt.display_freq == display_delta
        # print("DATA.shape:  ", data)
コード例 #10
0
ファイル: ELD_model.py プロジェクト: szWingLee/ELD
 def print_network(self):
     print('--------------------- Model ---------------------')
     networks.print_network(self.netG)
コード例 #11
0
    def initialize(self):  # , opt
        BaseModel.initialize(self)  # , opt

        batchSize = 32
        fineSize = 256
        input_nc = 3
        output_nc = 3
        vgg = 0
        skip = 0.8
        ngf = 64
        pool_size = 50
        norm = 'instance'
        lr = 0.0001
        no_dropout = True
        no_lsgan = True
        continue_train = True
        use_wgan = 0.0
        use_mse = True
        beta1 = 0.5
        global which_direction
        new_lr = True
        niter_decay = 100
        l1 = 10.0

        # batch size
        nb = batchSize
        # 图像size
        size = fineSize
        #self.opt = opt
        self.input_A = self.Tensor(nb, input_nc, size, size)
        self.input_B = self.Tensor(nb, output_nc, size, size)
        self.input_img = self.Tensor(nb, input_nc, size, size)
        self.input_A_gray = self.Tensor(nb, 1, size, size)

        # Default 0, use perceptrual loss
        if vgg > 0:
            self.vgg_loss = networks.PerceptualLoss()
            self.vgg_loss.cuda()
            self.vgg = networks.load_vgg16("./model")
            self.vgg.eval()
            for param in self.vgg.parameters():
                param.requires_grad = False
        # load/define networks
        # The naming conversion is different from those used in the paper
        # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)

        # default=0.8, help='B = net.forward(A) + skip*A'
        skip = True if skip > 0 else False

        # which_model_netG,  default = 'unet-256', selects model to use for netG
        # ngf, default = 64, of gen filters in first conv layer'
        # norm, default = 'instance', instance normalization or batch normalization
        # no_dropout, default = 'True', no dropout for the generator
        self.netG_A = networks.define_G(input_nc,
                                        output_nc,
                                        ngf,
                                        which_model_netG,
                                        norm,
                                        not no_dropout,
                                        self.gpu_ids,
                                        skip=skip)

        if not self.isTrain or continue_train:
            #which epoch to load
            which_epoch = 'lastest'
            self.load_network(self.netG_A, 'G_A', which_epoch)

        # --pool_size', default=50, help='the size of image buffer that stores previously generated images'
        # lr, default=0.0001
        if self.isTrain:
            self.old_lr = lr
            self.fake_A_pool = ImagePool(pool_size)
            self.fake_B_pool = ImagePool(pool_size)
            # define loss functions
            if use_wgan:
                self.criterionGAN = networks.DiscLossWGANGP()
            else:
                # no_lsgan = True
                self.criterionGAN = networks.GANLoss(use_lsgan=not no_lsgan,
                                                     tensor=self.Tensor)
            if use_mse:
                self.criterionCycle = torch.nn.MSELoss()
            else:
                self.criterionCycle = torch.nn.L1Loss()
            self.criterionL1 = torch.nn.L1Loss()
            self.criterionIdt = torch.nn.L1Loss()
            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(self.netG_A.parameters(),
                                                lr=lr,
                                                betas=(beta1, 0.999))

        print('---------- Networks initialized -------------')
        networks.print_network(self.netG_A)
        if isTrain:
            self.netG_A.train()
        else:
            self.netG_A.eval()
        print('-----------------------------------------------')