Exemplo n.º 1
0
    def __init__(
        self,
        p,
    ):

        super(CycleGAN_inference, self).__init__(p)
        size = p.cropSize

        self.netG_A = networks.define_G(p.input_nc, p.output_nc, p.ngf,
                                        p.which_model_netG, p.norm,
                                        not p.no_dropout, p.init_type,
                                        self.gpu_ids)
        self.netG_B = networks.define_G(p.output_nc, p.input_nc, p.ngf,
                                        p.which_model_netG, p.norm,
                                        not p.no_dropout, p.init_type,
                                        self.gpu_ids)

        def load_model(model, checkpoint_path):
            model.load_state_dict(torch.load(checkpoint_path))

        which_epoch = p.which_epoch
        G_A_checkpoint_path = os.path.join(p.checkpoints_dir, p.name,
                                           f'{which_epoch}_net_G_A.pth')
        G_B_checkpoint_path = os.path.join(p.checkpoints_dir, p.name,
                                           f'{which_epoch}_net_G_B.pth')
        load_model(self.netG_A, G_A_checkpoint_path)
        load_model(self.netG_B, G_B_checkpoint_path)
Exemplo n.º 2
0
    def init_models(self):
        """ Models: G_UM, G_MU, D_M, D_U """
        # Networks
        self.G_UM = networks.define_G(input_nc=1, output_nc=1, ngf=self.config.g_conv_dim,
                                      which_model_netG=self.config.which_model_netG, norm='batch', init_type='normal',
                                      gpu_ids=self.gpu_ids)
        self.G_MU = networks.define_G(input_nc=1, output_nc=1, ngf=self.config.g_conv_dim,
                                      which_model_netG=self.config.which_model_netG, norm='batch', init_type='normal',
                                      gpu_ids=self.gpu_ids)
        self.D_M = networks.define_D(input_nc=1, ndf=self.config.d_conv_dim,
                                     which_model_netD=self.config.which_model_netD,
                                     n_layers_D=3, norm='instance', use_sigmoid=True, init_type='normal',
                                     gpu_ids=self.gpu_ids)
        self.D_U = networks.define_D(input_nc=1, ndf=self.config.d_conv_dim,
                                     which_model_netD=self.config.which_model_netD,
                                     n_layers_D=3, norm='instance', use_sigmoid=True, init_type='normal',
                                     gpu_ids=self.gpu_ids)

        # Optimisers
        # single optimiser for both generators
        self.G_optim = optim.Adam(itertools.chain(self.G_UM.parameters(), self.G_MU.parameters()),
                                  self.config.lr, betas=(self.config.beta1, self.config.beta2))
        self.D_M_optim = optim.Adam(self.D_M.parameters(),
                                    lr=self.config.lr, betas=(self.config.beta1, self.config.beta2))
        self.D_U_optim = optim.Adam(self.D_U.parameters(),
                                    lr=self.config.lr, betas=(self.config.beta1, self.config.beta2))
        self.optimizers = [self.G_optim, self.D_M_optim, self.D_U_optim]

        # Schedulers
        self.schedulers = []
        for optimizer in self.optimizers:
            self.schedulers.append(networks.get_scheduler(optimizer, self.config))
Exemplo n.º 3
0
    def __init__(self, opt):
        """Initialize the CycleGAN class.

        Parameters:
            opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseModel.__init__(self, opt)
        # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
        self.loss_names = ['D_A', 'G_A', 'cycle_A', 'idt_A', 'D_B', 'G_B', 'cycle_B', 'idt_B']
        # # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
        # visual_names_A = ['real_A', 'fake_B', 'rec_A']
        # visual_names_B = ['real_B', 'fake_A', 'rec_B']
        # if self.isTrain and self.opt.lambda_identity > 0.0:  # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
        #     visual_names_A.append('idt_B')
        #     visual_names_B.append('idt_A')
        #
        # self.visual_names = visual_names_A + visual_names_B  # combine visualizations for A and B
        # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
        if self.isTrain:
            self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']
        else:  # during test time, only load Gs
            self.model_names = ['G_A', 'G_B']

        # define networks (both Generators and discriminators)
        # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
        # input file shape (batch, length, dims-256)
        self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,
                                        not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
        self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,
                                        not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:  # define discriminators
            self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
            self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
                                            opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)

        if self.isTrain:
            if opt.lambda_identity > 0.0:  # only works when input and output images have the same number of channels
                assert(opt.input_nc == opt.output_nc)
            self.fake_A_pool = AudioPool(opt.pool_size)  # create image buffer to store previously generated images
            self.fake_B_pool = AudioPool(opt.pool_size)  # create image buffer to store previously generated images
            # define loss functions
            self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)  # define GAN loss.
            self.criterionCycle = nn.L1Loss()
            self.criterionIdt = nn.L1Loss()
            # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
            self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)
Exemplo n.º 4
0
    def init_models(self):
        # Networks
        self.G_AB = networks.define_G(
            input_nc=self.config.input_nc,
            output_nc=self.config.output_nc,
            ngf=self.config.g_conv_dim,
            which_model_netG=self.config.which_model_netG,
            norm='batch',
            init_type='normal',
            gpu_ids=self.gpu_ids)
        self.D_B = networks.define_D(
            input_nc=self.config.input_nc,
            ndf=self.config.d_conv_dim,
            which_model_netD=self.config.which_model_netD,
            n_layers_D=3,
            norm='instance',
            use_sigmoid=True,
            init_type='normal',
            gpu_ids=self.gpu_ids,
            image_size=self.config.image_size)

        # Optimisers
        self.G_optim = optim.Adam(self.G_AB.parameters(),
                                  lr=self.config.lr,
                                  betas=(self.config.beta1, self.config.beta2))
        self.D_optim = optim.Adam(self.D_B.parameters(),
                                  lr=self.config.lr,
                                  betas=(self.config.beta1, self.config.beta2))
        self.optimizers = [self.G_optim, self.D_optim]

        # Schedulers
        self.schedulers = []
        for optimizer in self.optimizers:
            self.schedulers.append(
                networks.get_scheduler(optimizer, self.config))
Exemplo n.º 5
0
    def __init__(self, args):

        self.ckptdir = args.ckptdir
        self.datadir = args.datadir
        self.input_mode = args.input_mode

        self.in_size_w, self.in_size_h = args.in_size_w, args.in_size_h
        self.out_size_w, self.out_size_h = args.out_size_w, args.out_size_h

        self.skyboxengine = SkyBox(args)

        self.net_G = define_G(input_nc=3, output_nc=1, ngf=64,
                              netG=args.net_G).to(device)
        self.load_model()

        self.video_writer = cv2.VideoWriter('demo.mp4',
                                            cv2.VideoWriter_fourcc(*'MP4V'),
                                            20.0,
                                            (args.out_size_w, args.out_size_h))
        self.video_writer_cat = cv2.VideoWriter(
            'demo-cat.mp4', cv2.VideoWriter_fourcc(*'MP4V'), 20.0,
            (2 * args.out_size_w, args.out_size_h))

        if os.path.exists(args.output_dir) is False:
            os.mkdir(args.output_dir)

        self.save_jpgs = args.save_jpgs
    def __init__(self, args):
        self.args = args
        self._name = args.model_name
        self.resume = args.resume
        self.device = args.device

        self.output_directory = args.output_dir
        self.model_dir = args.model_dir

        self.model_store_path = os.path.join(args.model_dir, args.model_name)
        if not os.path.exists(self.model_store_path) and args.mode == 'train':
            os.mkdir(self.model_store_path)
            # Also store all args in a text_file.
            self.write_args_string_to_file()
        if not os.path.exists(self.model_store_path) and args.mode == 'test':
            raise FileNotFoundError(
                'Model does not exist. Please check if the model has yet been run.'
            )

        self.model_names = []
        self.loss_names = []
        self.optimizer_names = []

        self.losses = {}
        self.start_epoch = 0

        self.G = define_G(args)
        self.G = self.G.to(self.device)
        if args.use_multiple_gpu:
            self.G = torch.nn.DataParallel(self.G)
        print("G [{}] initiated with {} trainable parameters".format(
            args.generator_model, self.num_parameters))

        self.loader = None
Exemplo n.º 7
0
    def __init__(self, args, loader):
        self.args = args
        self._name = args.model_name
        self.device = args.device
        self.mode = args.mode

        self.model_dir = args.model_dir
        self.model_store_path = os.path.join(args.model_dir, args.model_name)
        if not os.path.exists(self.model_store_path) and args.mode == 'train':
            os.mkdir(self.model_store_path)
            # Also store all args in a text_file.
            self.write_args_string_to_file()
        if not os.path.exists(self.model_store_path) and args.mode == 'test':
            raise FileNotFoundError(
                'Model does not exist. Please check if the model has yet been run.'
            )

        self.losses = {}
        self.loader = loader
        self.epochs = args.epochs

        # Define the generator network.
        self.G = define_G(args).to(self.device)
        print("[{}] initiated with {} trainable parameters".format(
            args.backbone, self.num_parameters))

        # Set the optimizer and scheduler, but wait for method-specific parameters.
        self.criterion = None
        self.optimizer = None
        self.learning_rate_scheduler = None
        if args.mode == 'train':
            self.learning_rate_scheduler = LearningRateScheduler(
                args.adjust_lr, args.lr_mode, args.learning_rate, args.epochs,
                args.num_iterations)
Exemplo n.º 8
0
    def __init__(self, opt):
        super().__init__()
        self.is_train = opt.is_train
        self.gpu_ids = opt.gpu_ids
        self.save_dir = opt.checkpoints_dir
        self.device = torch.device('cuda:{}'.format(
            self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')

        # load/define networks
        #self.netG = networks.define_X(1, 3, opt.init_type, opt.init_gain, self.gpu_ids)
        self.netG = networks.define_G(3, 2, opt.ngf_S12, opt.netG,
                                      opt.norm_G_D, not opt.no_dropout,
                                      opt.init_type, opt.init_gain,
                                      self.gpu_ids)

        if self.is_train:
            # define loss functions
            self.criterionGAN = networks.GANLoss(
                use_lsgan=not opt.no_lsgan).to(self.device)
            self.criterionL1 = torch.nn.L1Loss()

            # initialize optimizers
            self.optimizers = []
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
 def TestGenerator(real: tp.Numpy.Placeholder(
     (1, 3, args.network_input_size, args.network_input_size),
     dtype=flow.float32)) -> tp.Numpy:
     with flow.scope.placement("gpu", "0:0-0"):
         fake = networks.define_G(real,
                                  netG_name,
                                  ngf=64,
                                  n_blocks=9,
                                  trainable=False,
                                  reuse=True)
     return fake
Exemplo n.º 10
0
    def __init__(self, p):

        super(TestModel, self).__init__(p)
        assert (not p.isTrain)
        self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
                                      opt.which_model_netG, opt.norm,
                                      not opt.no_dropout, opt.init_type,
                                      self.gpu_ids)
        which_epoch = p.which_epoch
        self.load_model(self.netG, 'G', which_epoch)
        print('---------- Networks initialized -------------')
        networks.print_network(self.netG)
        print('-----------------------------------------------')
Exemplo n.º 11
0
    def __init__(self, opt, ignore_noise=False, testing=False):

        self.ignore_noise = ignore_noise

        ##### model options
        self.old_lr = opt.lr
        opt.use_sigmoid = opt.no_lsgan

        self.opt = opt

        ##### define all networks we need here
        self.netG_A_B = networks.define_stochastic_G(nlatent=opt.nlatent, input_nc=opt.input_nc,
                                                     output_nc=opt.output_nc, ngf=opt.ngf,
                                                     which_model_netG=opt.which_model_netG,
                                                     norm=opt.norm, use_dropout=opt.use_dropout,
                                                     gpu_ids=opt.gpu_ids)

        self.netG_B_A = networks.define_G(input_nc=opt.output_nc,
                                          output_nc=opt.input_nc, ngf=opt.ngf,
                                          which_model_netG=opt.which_model_netG,
                                          norm=opt.norm, use_dropout=opt.use_dropout,
                                          gpu_ids=opt.gpu_ids)

        self.netD_A = networks.define_D_A(input_nc=opt.input_nc,
                                          ndf=32, which_model_netD=opt.which_model_netD,
                                          norm=opt.norm, use_sigmoid=opt.use_sigmoid, gpu_ids=opt.gpu_ids)

        self.netD_B = networks.define_D_B(input_nc=opt.output_nc,
                                          ndf=opt.ndf, which_model_netD=opt.which_model_netD,
                                          norm=opt.norm, use_sigmoid=opt.use_sigmoid, gpu_ids=opt.gpu_ids)

        ##### define all optimizers here
        self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A_B.parameters(),
                                                            self.netG_B_A.parameters()),
                                            lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(),
                                                            self.netD_B.parameters()),
                                            lr=opt.lr/5., betas=(opt.beta1, 0.999))

        self.criterionGAN = functools.partial(criterion_GAN, use_sigmoid=opt.use_sigmoid)
        self.criterionCycle = F.l1_loss

        if not testing:
            with open("%s/nets.txt" % opt.expr_dir, 'w') as nets_f:
                networks.print_network(self.netG_A_B, nets_f)
                networks.print_network(self.netG_B_A, nets_f)
                networks.print_network(self.netD_A, nets_f)
                networks.print_network(self.netD_B, nets_f)
Exemplo n.º 12
0
    def initialize(self, opt, writer=None):
        BaseModel.initialize(self, opt)
        self.writer = writer
        self.num_step = 0
        self.opt = opt
        if self.opt.use_lbp_network:
            self.model_names = ['G', 'LBP', 'D', 'D2']
        else:
            self.model_names = ['G', 'D']

        self.netG = networks.define_G(self.opt)
        if self.opt.use_lbp_network:
            self.netLBP = networks.define_LBP(self.opt)
        self.netD = networks.define_D(
            opt.input_nc, opt.ndf, self.opt.device)  # Discriminator for netG
        if self.opt.use_lbp_network:
            self.netD2 = networks.define_D(
                opt.input_nc - 2, opt.ndf,
                self.opt.device)  # Discriminator for netLBP

        self.vgg16_extractor = util.VGG16FeatureExtractor().to(self.opt.device)

        self.criterionGAN = networks.GANLoss(gan_type=opt.gan_type).to(
            self.opt.device)
        self.criterionL1 = torch.nn.L1Loss()
        self.criterionL2 = torch.nn.MSELoss()
        self.criterionL1_mask = networks.Discounted_L1(opt).to(self.opt.device)

        self.criterionL2_style_loss = torch.nn.MSELoss()
        self.criterionL2_perceptual_loss = torch.nn.MSELoss()

        self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                            lr=opt.lr,
                                            betas=(0.5, 0.999))
        if self.opt.use_lbp_network:
            self.optimizer_LBP = torch.optim.Adam(self.netLBP.parameters(),
                                                  lr=opt.lr,
                                                  betas=(0.5, 0.999))
        self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=opt.lr,
                                            betas=(0.5, 0.999))

        if self.opt.use_lbp_network:
            self.optimizer_D2 = torch.optim.Adam(self.netD2.parameters(),
                                                 lr=opt.lr,
                                                 betas=(0.5, 0.999))

        _, self.rand_t, self.rand_l = util.create_rand_mask(self.opt)
Exemplo n.º 13
0
 def __init__(self, base_model='unet_256', gpu_ids=[0]):
     self.gpu_ids = gpu_ids
     self.device = torch.device('cuda:{}'.format(
         self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
     print("Using device", self.device)
     self.netG = networks.define_G(3,
                                   3,
                                   64,
                                   netG=base_model,
                                   gpu_ids=self.gpu_ids)
     print("Network Initialized")
     self.opt = {
         'crop_size': 256,
         'no_flip': True,
         'load_size': 256,
         'preprocess': 'resize_and_crop'
     }
Exemplo n.º 14
0
    def initialize(self, opt, train_mode=True):
        # Model transforms from A --> B and uses Adv as the
        # adversarial example.
        BaseModel.initialize(self, opt)
        self.train_mode = train_mode
        # define tensors
        self.input_B = self.Tensor(opt['batchSize'], opt['input_nc'],
                                   opt['B_height'], opt['B_width'])

        self.input_A = self.Tensor(opt['batchSize'], opt['output_nc'],
                                   opt['A_height'], opt['A_width'])

        # load/define networks
        self.netG = networks.define_G(opt['input_nc'], opt['output_nc'],
                                      opt['ngf'], opt['norm'], self.gpu_ids)

        if self.train_mode:
            use_sigmoid = opt['no_lsgan']
            self.netD = networks.define_D(opt['input_nc'] + opt['output_nc'],
                                          opt['ndf'], opt['which_model_netD'],
                                          opt['n_layers_D'], use_sigmoid,
                                          self.gpu_ids)

        if self.train_mode:
            # self.fake_AB_pool = ImagePool(opt['pool_size'])
            self.old_lr = opt['lr']
            # define loss functions
            self.criterionGAN = networks.GANLoss(use_lsgan=not opt['no_lsgan'],
                                                 tensor=self.Tensor)
            self.content_loss = torch.nn.MSELoss()

            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                                lr=opt['lr'],
                                                betas=(opt['beta1'], 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt['lr'],
                                                betas=(opt['beta1'], 0.999))

            print('---------- Networks initialized -------------')
            networks.print_network(self.netG)
            networks.print_network(self.netD)
            print('-----------------------------------------------')
Exemplo n.º 15
0
torch.manual_seed(opt.seed)
if opt.cuda:
    torch.cuda.manual_seed(opt.seed)

print('===> Loading datasets')
train_set = get_dataset(path=opt.dataset)
training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batch_size,
                                  shuffle=True)
device = torch.device("cuda:0" if opt.cuda else "cpu")
print('===> Building models')
net_g = define_G(opt.input_nc,
                 opt.output_nc,
                 opt.ngf,
                 'batch',
                 False,
                 'normal',
                 0.02,
                 gpu_id=device)
net_d = define_D(opt.input_nc + opt.output_nc, opt.ndf, 'basic', gpu_id=device)

criterionGAN = GANLoss().to(device)
criterionL1 = nn.L1Loss().to(device)
criterionMSE = nn.MSELoss().to(device)

# setup optimizer
optimizer_g = optim.Adam(net_g.parameters(),
                         lr=opt.lr,
                         betas=(opt.beta1, 0.999))
optimizer_d = optim.Adam(net_d.parameters(),
                         lr=opt.lr,
Exemplo n.º 16
0
    def initialize(self, opt):
        super(DesignerGAN, self).initialize(opt)
        ###################################
        # define data tensors
        ###################################
        # self.input['img'] = self.Tensor()
        # self.input['img_attr'] = self.Tensor()
        # self.input['lm_map'] = self.Tensor()
        # self.input['seg_mask'] = self.Tensor()
        # self.input['attr_label'] = self.Tensor()
        # self.input['id'] = []

        ###################################
        # load/define networks
        ###################################

        # Todo modify networks.define_G
        # 1. add specified generator networks

        self.netG = networks.define_G(opt)
        self.netAE, self.opt_AE = network_loader.load_attribute_encoder_net(
            id=opt.which_model_AE, gpu_ids=opt.gpu_ids)
        if opt.which_model_FeatST != 'none':
            self.netFeatST, self.opt_FeatST = network_loader.load_feature_spatial_transformer_net(
                id=opt.which_model_FeatST, gpu_ids=opt.gpu_ids)
            self.use_FeatST = True
            # assert self.opt_FeatST.shape_encode == self.opt.shape_encode, 'GAN model and FeatST model has different shape encode mode'
            # assert self.opt_FeatST.input_mask_mode == self.opt.input_mask_mode, 'GAN model and FeatST model has different segmentation input mode'
        else:
            self.use_FeatST = False

        if self.is_train:
            self.netD = networks.define_D(opt)
            if opt.which_model_init_netG != 'none' and not opt.continue_train:
                self.load_network(self.netG, 'G', 'latest',
                                  opt.which_model_init_netG)

        if not self.is_train or opt.continue_train:
            self.load_network(self.netG, 'G', opt.which_epoch)
            if self.is_train:
                self.load_network(self.netD, 'D', opt.which_epoch)

        if self.is_train:
            self.fake_pool = ImagePool(opt.pool_size)

            ###################################
            # define loss functions and loss buffers
            ###################################
            if opt.which_gan in {'dcgan', 'lsgan'}:
                self.crit_GAN = networks.GANLoss(
                    use_lsgan=opt.which_gan == 'lsgan', tensor=self.Tensor)
            else:
                # WGAN loss will be calculated in self.backward_D_wgangp and self.backward_G
                self.crit_GAN = None
            self.crit_L1 = nn.L1Loss()
            self.crit_attr = nn.BCELoss()

            self.loss_functions = []
            self.loss_functions.append(self.crit_GAN)
            self.loss_functions.append(self.crit_L1)
            self.loss_functions.append(self.crit_attr)

            if self.opt.loss_weight_vgg > 0:
                self.crit_vgg = networks.VGGLoss(self.gpu_ids)
                self.loss_functions.append(self.crit_vgg)

            ###################################
            # create optimizers
            ###################################
            self.schedulers = []
            self.optimizers = []

            self.optim_G = torch.optim.Adam(self.netG.parameters(),
                                            lr=opt.lr,
                                            betas=(opt.beta1, opt.beta2))
            self.optim_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=opt.lr,
                                            betas=(opt.beta1, opt.beta2))
            self.optimizers.append(self.optim_G)
            self.optimizers.append(self.optim_D)

            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))

        # color transformation from std to imagenet
        # img_imagenet = img_std * a + b
        self.trans_std_to_imagenet = {
            'a':
            Variable(self.Tensor([0.5 / 0.229, 0.5 / 0.224, 0.5 / 0.225]),
                     requires_grad=False).view(3, 1, 1),
            'b':
            Variable(self.Tensor([(0.5 - 0.485) / 0.229, (0.5 - 0.456) / 0.224,
                                  (0.5 - 0.406) / 0.225]),
                     requires_grad=False).view(3, 1, 1)
        }
Exemplo n.º 17
0
    def __init__(self, hyperparameters):
        super(myMUNIT_Trainer, self).__init__()
        lr = hyperparameters['lr']
        # Initiate the networks
        self.style_dim = hyperparameters['gen']['style_dim']
        self.enc_a = networks.define_E(input_nc=3,
                                       output_nc=self.style_dim,
                                       ndf=64)  # encoder for domain a
        self.enc_b = networks.define_E(input_nc=3,
                                       output_nc=self.style_dim,
                                       ndf=64)  # encoder for domain b
        self.gen_a = networks.define_G(input_nc=3,
                                       output_nc=3,
                                       nz=self.style_dim,
                                       ngf=64)  # generator for domain a
        self.gen_b = networks.define_G(input_nc=3,
                                       output_nc=3,
                                       nz=self.style_dim,
                                       ngf=64)  # generator for domain b
        self.dis_a = networks.define_D(input_nc=3,
                                       ndf=64,
                                       norm='instance',
                                       num_Ds=2)  # discriminator for domain a
        self.dis_b = networks.define_D(input_nc=3,
                                       ndf=64,
                                       norm='instance',
                                       num_Ds=2)  # discriminator for domain b
        self.netVGGF = networks.define_VGGF()
        self.instancenorm = nn.InstanceNorm2d(512, affine=False)

        # Initiate the criterions or loss functions
        self.criterionGAN = networks.GANLoss(
            mse_loss=True,
            tensor=torch.cuda.FloatTensor)  # criterion GAN adversarial loss
        self.wGANloss = networks.wGANLoss(
            tensor=torch.cuda.FloatTensor)  # wGAN adversarial loss
        self.criterionL1 = torch.nn.L1Loss()  # L1 loss
        self.criterionL2 = networks.L2Loss()  # L2 loss
        self.criterionZ = torch.nn.L1Loss()  # L1 loss between code
        self.criterionC = networks.ContentLoss(
            vgg_features=self.netVGGF)  # content loss
        self.criterionS = networks.StyleLoss(
            vgg_features=self.netVGGF)  # style loss
        self.criterionC_l = networks.ContentLoss(
            vgg_features=self.netVGGF)  # local content loss
        self.criterionS_l = networks.StyleLoss(
            vgg_features=self.netVGGF)  # local style loss
        self.criterionHisogram = networks.HistogramLoss(
            vgg_features=self.netVGGF)  # histogram loss
        self.Feature_map_im = networks.Feature_map_im(
            vgg_features=self.netVGGF)  # show feature map

        # fix the noise used in sampling
        self.s_a = torch.randn(8, self.style_dim, 1, 1).cuda()
        self.s_b = torch.randn(8, self.style_dim, 1, 1).cuda()

        # Setup the optimizers
        beta1 = hyperparameters['beta1']
        beta2 = hyperparameters['beta2']
        dis_params = list(self.dis_a.parameters()) + list(
            self.dis_b.parameters())
        gen_params = list(self.gen_a.parameters()) + list(
            self.gen_b.parameters())
        self.dis_opt = torch.optim.Adam(
            [p for p in dis_params if p.requires_grad],
            lr=lr,
            betas=(beta1, beta2),
            weight_decay=hyperparameters['weight_decay'])
        self.gen_opt = torch.optim.Adam(
            [p for p in gen_params if p.requires_grad],
            lr=lr,
            betas=(beta1, beta2),
            weight_decay=hyperparameters['weight_decay'])
        self.dis_scheduler = get_scheduler(self.dis_opt, hyperparameters)
        self.gen_scheduler = get_scheduler(self.gen_opt, hyperparameters)

        # Load VGG model if needed
        if 'vgg_w' in hyperparameters.keys() and hyperparameters['vgg_w'] > 0:
            self.vgg = load_vgg16(hyperparameters['vgg_model_path'] +
                                  '/models')
            self.vgg.eval()
            for param in self.vgg.parameters():
                param.requires_grad = False
Exemplo n.º 18
0
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 1

print('===> Loading datasets')
dataset = DatasetFromFolder(opt.dataroot, opt.imageSize)
assert dataset
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=opt.batchSize,
                                         shuffle=True,
                                         num_workers=int(opt.workers))

print('===> Building model')
netG = define_G(nc, nz, ngf, ngpu, device, opt.netG)
netD = define_D(nc, ndf, ngpu, device, opt.netD)

criterion = nn.BCELoss()

fixed_noise = torch.randn(opt.batchSize, nz, 1, 1, device=device)
real_label = 1
fake_label = 0

# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))

print('---------- Networks initialized -------------')
print_network(netG)
print_network(netD)
Exemplo n.º 19
0
  torch.cuda.manual_seed(opt.seed)

print('===> Loading datasets')
root_path            = "dataset/"
train_set            = get_training_set(root_path + opt.dataset)
test_set             = get_test_set(root_path + opt.dataset)
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=False)
testing_data_loader  = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False)

max_dataset_num = 1500
#trainsetの要素削減
train_set.image_filenames = train_set.image_filenames[:max_dataset_num]
test_set.image_filenames = test_set.image_filenames[:max_dataset_num]

print('===> Building model')
netG = define_G(4, 3, opt.ngf, 'batch', False, [0])
#NetDを3つ構築するのがよい
#netD = define_D(opt.input_nc + opt.output_nc, opt.ndf, 'batch', False, [0])
#そもそもいくつが入力なのか
disc_input_nc = 6
disc_outpuc_nc = 1024
netD_Global = define_D_Global(disc_input_nc , disc_outpuc_nc, opt.ndf,  [0])
netD_Local   = define_D_Local(disc_input_nc , disc_outpuc_nc, opt.ndf,  [0])
netD_Edge     = define_D_Edge(disc_input_nc , disc_outpuc_nc, opt.ndf,  [0])
#netD_Global = define_D_Global(hogehoge)
#netD_Local  = define_D_Local(hogehoge)
#netD_Edge   = define_D_Edge(hogehoge)


criterionGAN = GANLoss()
criterionL1 = nn.L1Loss()
Exemplo n.º 20
0
print('===> Loading datasets')
root_path = "dataset/"
train_set = get_training_set(root_path + opt.dataset)
test_set = get_test_set(root_path + opt.dataset)
training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batchSize,
                                  shuffle=True)
testing_data_loader = DataLoader(dataset=test_set,
                                 num_workers=opt.threads,
                                 batch_size=opt.testBatchSize,
                                 shuffle=False)

print('===> Building model')

netG = define_G(opt.input_nc, opt.output_nc, opt.ngf, 'batch', False, [0])

netD = define_D(opt.input_nc + opt.output_nc, opt.ndf, 'batch', False, [0])

print('loading done')

criterionGAN = GANLoss()
criterionL1 = nn.L1Loss()
criterionMSE = nn.MSELoss()

# setup optimizer
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))

print('---------- Networks initialized -------------')
print_network(netG)
Exemplo n.º 21
0
    def __init__(self, p):

        super(CycleGAN, self).__init__(p)
        nb = p.batchSize
        size = p.cropSize

        # load/define models
        # The naming conversion is different from those used in the paper
        # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)

        self.netG_A = networks.define_G(p.input_nc, p.output_nc, p.ngf,
                                        p.which_model_netG, p.norm,
                                        not p.no_dropout, p.init_type,
                                        self.gpu_ids)
        self.netG_B = networks.define_G(p.output_nc, p.input_nc, p.ngf,
                                        p.which_model_netG, p.norm,
                                        not p.no_dropout, p.init_type,
                                        self.gpu_ids)

        if self.isTrain:
            use_sigmoid = p.no_lsgan
            self.netD_A = networks.define_D(p.output_nc, p.ndf,
                                            p.which_model_netD, p.n_layers_D,
                                            p.norm, use_sigmoid, p.init_type,
                                            self.gpu_ids)
            self.netD_B = networks.define_D(p.input_nc, p.ndf,
                                            p.which_model_netD, p.n_layers_D,
                                            p.norm, use_sigmoid, p.init_type,
                                            self.gpu_ids)

        if not self.isTrain or p.continue_train:
            which_epoch = p.which_epoch
            self.load_model(self.netG_A, 'G_A', which_epoch)
            self.load_model(self.netG_B, 'G_B', which_epoch)
            if self.isTrain:
                self.load_model(self.netD_A, 'D_A', which_epoch)
                self.load_model(self.netD_B, 'D_B', which_epoch)

        if self.isTrain:
            self.old_lr = p.lr
            self.fake_A_pool = ImagePool(p.pool_size)
            self.fake_B_pool = ImagePool(p.pool_size)
            # define loss functions
            self.criterionGAN = networks.GANLoss(use_lsgan=not p.no_lsgan,
                                                 tensor=self.Tensor)
            self.criterionCycle = torch.nn.L1Loss()
            self.criterionIdt = torch.nn.L1Loss()

            # initialize optimizers
            self.optimizer_G = torch.optim.Adam(itertools.chain(
                self.netG_A.parameters(), self.netG_B.parameters()),
                                                lr=p.lr,
                                                betas=(p.beta1, 0.999))
            self.optimizer_D_A = torch.optim.Adam(self.netD_A.parameters(),
                                                  lr=p.lr,
                                                  betas=(p.beta1, 0.999))
            self.optimizer_D_B = torch.optim.Adam(self.netD_B.parameters(),
                                                  lr=p.lr,
                                                  betas=(p.beta1, 0.999))

            self.optimizers = []
            self.schedulers = []
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D_A)
            self.optimizers.append(self.optimizer_D_B)
            for optimizer in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optimizer, p))
Exemplo n.º 22
0
from networks import define_G
import torch
import numpy as np
from torch.autograd import Variable

if __name__ == '__main__':
    net = define_G(input_nc=3,
                   output_nc=3,
                   ngf=64,
                   which_model_netG='unet_80',
                   norm='batch')

    xfake = np.random.normal(0, 1, size=(2, 3, 80, 80))
    xfake = Variable(torch.from_numpy(xfake).float())
    dat = net(xfake)
    import pdb
    pdb.set_trace()
Exemplo n.º 23
0
def main(name_exp, segloss=False, cuda=True, finetune=False):
    # Training settings
    parser = argparse.ArgumentParser(description='pix2pix-PyTorch-implementation')
    parser.add_argument('--batchSize', type=int, default=8, help='training batch size')
    parser.add_argument('--testBatchSize', type=int, default=8, help='testing batch size')
    parser.add_argument('--nEpochs', type=int, default=100, help='number of epochs to train for')
    parser.add_argument('--input_nc', type=int, default=3, help='input image channels')
    parser.add_argument('--output_nc', type=int, default=3, help='output image channels')
    parser.add_argument('--ngf', type=int, default=64, help='generator filt+ers in first conv layer')
    parser.add_argument('--ndf', type=int, default=64, help='discriminator filters in first conv layer')
    parser.add_argument('--lr', type=float, default=0.0002, help='Learning Rate. Default=0.0002')
    parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
    parser.add_argument('--threads', type=int, default=8, help='number of threads for data loader to use')
    parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')
    parser.add_argument('--lamb', type=int, default=10, help='weight on L1 term in objective')
    opt = parser.parse_args()

    cudnn.benchmark = True



    def val():
        net_current = "path_exp/checkpoint/DFS/{}/netG_model_current.pth".format(name_exp)
        netVal = torch.load(net_current)
        netVal.eval()
        SEG_NET.eval()
        features.eval()
        with torch.no_grad():
            total_mse = 0
            total_mse2 = 0
            avg_psnr_depth = 0
            avg_psnr_dehaze = 0
            avg_ssim_depth = 0
            avg_ssim_dehaze = 0
            for batch in validation_data_loader:
                input, target, depth = Variable(batch[0]), Variable(batch[1]), Variable(batch[2])
                if cuda == True:
                    input = input.cuda()
                    target = target.cuda()
                    depth = depth.cuda()
                    
                

                dehaze = netVal(input)
                prediction = SEG_NET(dehaze)

                avg_ssim_dehaze += pytorch_ssim.ssim(dehaze, target).item()

                mse = criterionMSE(prediction, depth)
                total_mse += mse.item()
                avg_psnr_depth += 10 * log10(1 / mse.item())

                mse2 = criterionMSE(dehaze, target)
                total_mse2 += mse2.item()
                avg_psnr_dehaze += 10 * log10(1 / mse2.item())

                avg_ssim_depth += pytorch_ssim.ssim(prediction, depth).item()


                visual_ret_val = OrderedDict()

                visual_ret_val['Haze'] = input
                visual_ret_val['Seg estimate'] = prediction
                visual_ret_val['Dehaze '] = dehaze
                visual_ret_val['GT dehaze'] = target
                visual_ret_val['GT Seg '] = depth

                visualizer.display_current_results(visual_ret_val, epoch, True)


            print("===> Validation")
            #f.write("===> Testing: \r\n")

            print("===> PSNR seg: {:.4f} ".format(avg_psnr_depth / len(validation_data_loader)))
            #f.write("===> PSNR depth: {:.4f} \r\n".format(avg_psnr_depth / len(validation_data_loader)))

            print("===> Mse seg: {:.4f} ".format(total_mse / len(validation_data_loader)))
            #f.write("===> Mse depth: {:.4f} \r\n".format(total_mse / len(validation_data_loader)))

            print("===> SSIM seg: {:.4f} ".format(avg_ssim_depth / len(validation_data_loader)))
            #f.write("===> SSIM depth: {:.4f} \r\n".format(avg_ssim_depth / len(validation_data_loader)))

            return total_mse / len(validation_data_loader)






    def testing():
        path = "path_exp/checkpoint/DFS/{}/netG_model_best.pth".format(name_exp)
        net = torch.load(path)
        net.eval()
        SEG_NET.eval()
        features.eval()
        with torch.no_grad():
            total_mse = 0
            total_mse2 = 0
            avg_psnr_depth = 0
            avg_psnr_dehaze = 0
            avg_ssim_depth = 0
            avg_ssim_dehaze = 0
            for batch in testing_data_loader:
                input, target, depth = Variable(batch[0]), Variable(batch[1]), Variable(batch[2])
                if cuda == True:
                    input = input.cuda()
                    target = target.cuda()
                    depth = depth.cuda()

                dehaze = net(input)
                prediction = SEG_NET(dehaze)

                avg_ssim_dehaze += pytorch_ssim.ssim(dehaze, target).item()

                mse = criterionMSE(prediction, depth)
                total_mse += mse.item()
                avg_psnr_depth += 10 * log10(1 / mse.item())

                mse2 = criterionMSE(dehaze, target)
                total_mse2 += mse2.item()
                avg_psnr_dehaze += 10 * log10(1 / mse2.item())

                avg_ssim_depth += pytorch_ssim.ssim(prediction, depth).item()

            print("===> Testing")
            print("===> PSNR seg: {:.4f} ".format(avg_psnr_depth / len(testing_data_loader)))
            print("===> Mse seg: {:.4f} ".format(total_mse / len(testing_data_loader)))
            print("===> SSIM seg: {:.4f} ".format(avg_ssim_depth / len(testing_data_loader)))
            print("===> PSNR dehaze: {:.4f} ".format(avg_psnr_dehaze / len(testing_data_loader)))
            print("===> SSIM dehaze: {:.4f} ".format(avg_ssim_dehaze / len(testing_data_loader)))





    def checkpoint():
        if not os.path.exists("checkpoint"):
            os.mkdir("checkpoint")
        if not os.path.exists(os.path.join("path_exp/checkpoint/DFS", name_exp)):
            os.mkdir(os.path.join("path_exp/checkpoint/DFS", name_exp))
        net_g_model_out_path = "path_exp/checkpoint/DFS/{}/netG_model_best.pth".format(name_exp)
        net_d_model_out_path = "path_exp/checkpoint/DFS/{}/netD_model_best.pth".format(name_exp)
        torch.save(netG, net_g_model_out_path)
        torch.save(netD, net_d_model_out_path)


    def checkpoint_current():
        if not os.path.exists(os.path.join("path_exp/checkpoint/DFS", name_exp)):
            os.mkdir(os.path.join("path_exp/checkpoint/DFS", name_exp))
        net_g_model_out_path = "path_exp/checkpoint/DFS/{}/netG_model_current.pth".format(name_exp)
        torch.save(netG, net_g_model_out_path)

    def checkpoint_seg():
        if not os.path.exists(os.path.join("path_exp/checkpoint/DFS", name_exp)):
            os.mkdir(os.path.join("path_exp/checkpoint/DFS", name_exp))
        net_g_model_out_path = "path_exp/checkpoint/DFS/{}/seg_net.pth".format(name_exp)
        torch.save(SEG_NET, net_g_model_out_path)



    torch.manual_seed(opt.seed)
    if cuda==True:
        torch.cuda.manual_seed(opt.seed)

    print(" ")
    print(name_exp)
    print(" ")

    print('===> Loading datasets')
    train_set = get_training_set('path_exp/cityscape/HAZE')
    val_set = get_val_set('path_exp/cityscape/HAZE')
    test_set = get_test_set('path_exp/cityscape/HAZE')


    training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
    validation_data_loader = DataLoader(dataset=val_set, num_workers=opt.threads, batch_size=opt.batchSize, shuffle=True)
    testing_data_loader= DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=opt.testBatchSize, shuffle=False)

    print('===> Building model')
    netG = define_G(opt.input_nc, opt.output_nc, opt.ngf, 'batch', False, [0])
    netD = define_D(opt.input_nc + opt.output_nc, opt.ndf, 'batch', False, [0])

    criterionGAN = GANLoss()
    criterionL1 = nn.L1Loss()
    criterionMSE = nn.MSELoss()

    # setup optimizer
    optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
    optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))



    print('---------- Networks initialized -------------')
    print_network(netG)
    print_network(netD)
    print('-----------------------------------------------')


    real_a = torch.FloatTensor(opt.batchSize, opt.input_nc, 256, 256)
    real_b = torch.FloatTensor(opt.batchSize, opt.output_nc, 256, 256)
    real_c = torch.FloatTensor(opt.batchSize, opt.output_nc, 256, 256)

    if cuda==True:
        netD = netD.cuda()
        netG = netG.cuda()
        criterionGAN = criterionGAN.cuda()
        criterionL1 = criterionL1.cuda()
        criterionMSE = criterionMSE.cuda()
        real_a = real_a.cuda()
        real_b = real_b.cuda()
        real_c=real_c.cuda()

    real_a = Variable(real_a)
    real_b = Variable(real_b)
    real_c = Variable(real_c)



    SEG_NET = torch.load("path_exp/SEG_NET.pth")

    optimizerSeg = optim.Adam(SEG_NET.parameters(), lr=opt.lr/10, betas=(opt.beta1, 0.999))



    features = Vgg16()

    if cuda==True:
        SEG_NET.cuda()
        features.cuda()


    bon =100000000
    for epoch in range(opt.nEpochs):
        features.eval()

        if finetune== True and epoch>50:
            SEG_NET.train()
        else:
            SEG_NET.eval()

        loss_epoch_gen=0
        loss_epoch_dis=0
        total_segloss=0
        loss_seg=0
        i=0
        for iteration, batch in enumerate(training_data_loader, 1):

            netG.train()
            i=i+1

            # forward
            real_a_cpu, real_b_cpu, real_c_cpu = batch[0], batch[1], batch[2]

            with torch.no_grad():
                real_a = real_a.resize_(real_a_cpu.size()).copy_(real_a_cpu)

            with torch.no_grad():
                real_b = real_b.resize_(real_b_cpu.size()).copy_(real_b_cpu)

            with torch.no_grad():
                real_c = real_c.resize_(real_c_cpu.size()).copy_(real_c_cpu)


            fake_b = netG(real_a)

            ############################
            # (1) Update D network: maximize log(D(x,y)) + log(1 - D(x,G(x)))
            ###########################

            optimizerD.zero_grad()

            # train with fake
            fake_ab = torch.cat((real_a, fake_b), 1)
            pred_fake = netD.forward(fake_ab.detach())
            loss_d_fake = criterionGAN(pred_fake, False)

            # train with real
            real_ab = torch.cat((real_a, real_b), 1)
            pred_real = netD.forward(real_ab)
            loss_d_real = criterionGAN(pred_real, True)

            # Combined loss
            loss_d = (loss_d_fake + loss_d_real) * 0.5

            loss_d.backward()

            optimizerD.step()

            ############################
            # (2) Update G network: maximize log(D(x,G(x))) + L1(y,G(x))
            ##########################
            optimizerG.zero_grad()
            # First, G(A) should fake the discriminator
            fake_ab = torch.cat((real_a, fake_b), 1)
            pred_fake = netD.forward(fake_ab)
            loss_g_gan = criterionGAN(pred_fake, True)


            # Second, G(A) = B
            loss_g_l1 = criterionL1(fake_b, real_b) * opt.lamb

            features_y = features(fake_b)
            features_x = features(real_b)

            loss_content = criterionMSE(features_y[1], features_x[1])*10


            if segloss == True:
                fake_seg = SEG_NET(fake_b)
                loss_seg = criterionMSE(fake_seg, real_c) * 10

                total_segloss += loss_seg.item()

                features_y = features(fake_seg)
                features_x = features(real_c)

                ssim_seg = criterionMSE(features_y[1], features_x[1]) * 10

                loss_g = loss_g_gan + loss_g_l1 + loss_content + loss_seg


            else:
                loss_g = loss_g_gan + loss_g_l1+loss_content

            loss_epoch_gen+=loss_g.item()
            loss_epoch_dis+=loss_d.item()





            if finetune== True and epoch>50:
                loss_g.backward(retain_graph=True)

                optimizerG.step()

                loss_seg=loss_seg

                loss_seg.backward()

                optimizerSeg.zero_grad()

                optimizerSeg.step()

            else:
                loss_g.backward()
                optimizerG.step()



            errors_ret = OrderedDict()
            errors_ret['Total_G'] = float(loss_g)
            errors_ret['Content'] = float(loss_content)
            errors_ret['GAN'] = float(loss_g_gan)
            errors_ret['L1'] = float(loss_g_l1)
            errors_ret['D'] = float(loss_d)



            if i % 10 == 0:  # print training losses and save logging information to the disk
                if i > 0:
                    visualizer.plot_current_losses(epoch, i/(len(training_data_loader)*opt.batchSize), errors_ret)




        print("===> Epoch[{}]: Loss_D: {:.4f} Loss_G: {:.4f} Loss Seg: {:.4f} ".format(epoch, loss_epoch_dis,loss_epoch_gen, total_segloss))
        checkpoint_current()
        MSE=val()
        if MSE < bon:
            bon = MSE
            checkpoint()
            checkpoint_seg()
            print("BEST EPOCH SAVED")

    testing()
    def initialize(self, opt):
        super(MultimodalDesignerGAN, self).initialize(opt)
        ###################################
        # load/define networks
        ###################################

        # basic G
        self.netG = networks.define_G(opt)

        # encoders
        self.encoders = {}
        if opt.use_edge:
            self.edge_encoder = networks.define_image_encoder(opt, 'edge')
            self.encoders['edge_encoder'] = self.edge_encoder
        if opt.use_color:
            self.color_encoder = networks.define_image_encoder(opt, 'color')
            self.encoders['color_encoder'] = self.color_encoder
        if opt.use_attr:
            self.attr_encoder, self.opt_AE = network_loader.load_attribute_encoder_net(
                id=opt.which_model_AE, gpu_ids=opt.gpu_ids)

        # basic D and auxiliary Ds
        if self.is_train:
            # basic D
            self.netD = networks.define_D(opt)
            # auxiliary Ds
            self.auxiliaryDs = {}
            if opt.use_edge_D:
                assert opt.use_edge
                self.netD_edge = networks.define_D_from_params(
                    input_nc=opt.edge_nof + 3,
                    ndf=opt.ndf,
                    which_model_netD=opt.which_model_netD,
                    n_layers_D=opt.n_layers_D,
                    norm=opt.norm,
                    which_gan='dcgan',
                    init_type=opt.init_type,
                    gpu_ids=opt.gpu_ids)
                self.auxiliaryDs['D_edge'] = self.netD_edge
            if opt.use_color_D:
                assert opt.use_color
                self.netD_color = networks.define_D_from_params(
                    input_nc=opt.color_nof + 3,
                    ndf=opt.ndf,
                    which_model_netD=opt.which_model_netD,
                    n_layers_D=opt.n_layers_D,
                    norm=opt.norm,
                    which_gan='dcgan',
                    init_type=opt.init_type,
                    gpu_ids=opt.gpu_ids)
                self.auxiliaryDs['D_color'] = self.netD_color
            if opt.use_attr_D:
                assert opt.use_attr
                attr_nof = opt.n_attr_feat if opt.attr_cond_type in {
                    'feat', 'feat_map'
                } else opt.n_attr
                self.netD_attr = networks.define_D_from_params(
                    input_nc=attr_nof + 3,
                    ndf=opt.ndf,
                    which_model_netD=opt.which_model_netD,
                    n_layers_D=opt.n_layers_D,
                    norm=opt.norm,
                    which_gan='dcgan',
                    init_type=opt.init_type,
                    gpu_ids=opt.gpu_ids)
                self.auxiliaryDs['D_attr'] = self.netD_attr
            # load weights
            if not opt.continue_train:
                if opt.which_model_init != 'none':
                    self.load_network(self.netG, 'G', 'latest',
                                      opt.which_model_init)
                    self.load_network(self.netD, 'D', 'latest',
                                      opt.which_model_init)
                    for l, net in self.encoders.iteritems():
                        self.load_network(net, l, 'latest',
                                          opt.which_model_init)
                    for l, net in self.auxiliaryDs.iteritems():
                        self.load_network(net, l, 'latest',
                                          opt.which_model_init)
            else:
                self.load_network(self.netG, 'G', opt.which_epoch)
                self.load_network(self.netD, 'D', opt.which_epoch)
                for l, net in self.encoders.iteritems():
                    self.load_network(net, l, opt.which_epoch)
                for l, net in self.auxiliaryDs.iteritems():
                    self.load_network(net, l, opt.which_epoch)
        else:
            self.load_network(self.netG, 'G', opt.which_epoch)
            for l, net in self.encoders.iteritems():
                self.load_network(net, l, opt.which_epoch)

        if self.is_train:
            self.fake_pool = ImagePool(opt.pool_size)
            ###################################
            # define loss functions and loss buffers
            ###################################
            self.loss_functions = []
            if opt.which_gan in {'dcgan', 'lsgan'}:
                self.crit_GAN = networks.GANLoss(
                    use_lsgan=opt.which_gan == 'lsgan', tensor=self.Tensor)
            else:
                # WGAN loss will be calculated in self.backward_D_wgangp and self.backward_G
                self.crit_GAN = None

            self.loss_functions.append(self.crit_GAN)

            self.crit_L1 = nn.L1Loss()
            self.loss_functions.append(self.crit_L1)

            if self.opt.loss_weight_vgg > 0:
                self.crit_vgg = networks.VGGLoss(self.gpu_ids)
                self.loss_functions.append(self.crit_vgg)

            self.crit_psnr = networks.SmoothLoss(networks.PSNR())
            self.loss_functions.append(self.crit_psnr)
            ###################################
            # create optimizers
            ###################################
            self.schedulers = []
            self.optimizers = []

            # optim_G will optimize parameters of netG and all image encoders (except attr_encoder)
            G_param_groups = [{'params': self.netG.parameters()}]
            for l, net in self.encoders.iteritems():
                G_param_groups.append({'params': net.parameters()})
            self.optim_G = torch.optim.Adam(G_param_groups,
                                            lr=opt.lr,
                                            betas=(opt.beta1, opt.beta2))
            self.optimizers.append(self.optim_G)
            # optim_D will optimize parameters of netD
            self.optim_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=opt.lr_D,
                                            betas=(opt.beta1, opt.beta2))
            self.optimizers.append(self.optim_D)
            # optim_D_aux will optimize parameters of auxiliaryDs
            if len(self.auxiliaryDs) > 0:
                aux_D_param_groups = [{
                    'params': net.parameters()
                } for net in self.auxiliaryDs.values()]
                self.optim_D_aux = torch.optim.Adam(aux_D_param_groups,
                                                    lr=opt.lr_D,
                                                    betas=(opt.beta1,
                                                           opt.beta2))
                self.optimizers.append(self.optim_D_aux)
            for optim in self.optimizers:
                self.schedulers.append(networks.get_scheduler(optim, opt))

        # color transformation from std to imagenet
        # img_imagenet = img_std * a + b
        self.trans_std_to_imagenet = {
            'a':
            Variable(self.Tensor([0.5 / 0.229, 0.5 / 0.224, 0.5 / 0.225]),
                     requires_grad=False).view(3, 1, 1),
            'b':
            Variable(self.Tensor([(0.5 - 0.485) / 0.229, (0.5 - 0.456) / 0.224,
                                  (0.5 - 0.406) / 0.225]),
                     requires_grad=False).view(3, 1, 1)
        }
Exemplo n.º 25
0
                              ['epoch', 'loss', 'acc', 'lr'])
        train_batch_logger = Logger(
            os.path.join(opt.result_path, 'train_batch.log'),
            ['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])

        if opt.nesterov:
            dampening = 0
        else:
            dampening = opt.dampening

        optimizer = optim.Adam(model.parameters(),
                               lr=opt.learning_rate,
                               betas=(0.9, 0.999))
        if opt.two_step:
            optimizer = None
            netG = define_G(3, 3, 64, 'batch', False)

            if opt.end:
                optimizerG = optim.Adam(list(model.parameters()) +
                                        list(netG.parameters()),
                                        opt.learning_rate,
                                        betas=(0.9, 0.999))
            else:
                optimizerG = optim.Adam(netG.parameters(),
                                        opt.learning_rate,
                                        betas=(0.9, 0.999))
            print(netG)
            if opt.use_gan:
                netD = define_D(3 + 3, 64, 'batch', use_sigmoid=False)
                optimizerD = optim.Adam(netD.parameters(),
                                        opt.learning_rate,
def image_loader(image_name):
    image = cv2.imread(image_name)
    image = cv2.copyMakeBorder(image, 16, 16, 16, 16, cv2.BORDER_REPLICATE)
    image = cv2.resize(image, (256, 256))
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image = torch.from_numpy(image.transpose((2, 0, 1)))
    image = image.float().div(255)
    image = image.mul_(2).add_(-1)
    return image.unsqueeze(0)


def imsave(tensor, title):
    tensor = tensor[0, :, :, :].data.cpu().numpy()
    tensor = tensor.transpose((1, 2, 0))
    tensor = cv2.resize(tensor, (288, 288))
    image = tensor[16:-16, 16:-16, :] / 2 + 0.5
    cv2.imwrite('%s.jpg' % (title), image * 255)


netSeg = networks.define_G(6, 3, ngf, 'unet_256').to(device)
netSeg.load_state_dict(
    torch.load('Segmentation/netSeg.pth',
               map_location=lambda storage, loc: storage))

input_content = image_loader(opt.img_content).to(device)
input_style = image_loader(opt.img).to(device)

result = netSeg(torch.cat([input_content, input_style], 1))
imsave(result, title='temp/mask_ori')
Exemplo n.º 27
0
parser.add_argument(
    '--glyph_path',
    help='path to the corresponding glyph of the input style image')
parser.add_argument('--content_path', help='path to the target content image')
parser.add_argument('--save_name', default='name to save')

plt.switch_backend('agg')

opt = parser.parse_args()

opt.cuda = (opt.gpu != -1)
cudnn.benchmark = True
device = torch.device("cuda:%d" % (opt.gpu) if opt.cuda else "cpu")

###############   Model  ####################
netG = networks.define_G(9, 3).to(device)
netG.load_state_dict(
    torch.load('cache/%s_netG.pth' % (opt.style_name),
               map_location=lambda storage, loc: storage))
netG.eval()
for p in netG.parameters():
    p.requires_grad = False


###############   Processing   ####################
def image_loader(image_name):
    image = cv2.imread(image_name)
    image = cv2.resize(image, (256, 256))
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image = torch.from_numpy(image.transpose((2, 0, 1)))
    image = image.float().div(255)
Exemplo n.º 28
0
train_set = get_training_set()
test_set = get_test_set()
training_data_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batchSize,
                                  shuffle=True)
testing_data_loader = DataLoader(dataset=test_set,
                                 num_workers=opt.threads,
                                 batch_size=opt.testBatchSize,
                                 shuffle=False)

train_logger = Logger(opt.nEpochs, len(training_data_loader), opt.date)
test_logger = Logger(opt.nEpochs, len(testing_data_loader), opt.date)

print('===> Building model')
netG = define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.batch_mode, False,
                [0])
netD = define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.batch_mode, False,
                [0])

criterionGAN = GANLoss()
criterionL1 = nn.L1Loss()
criterionMSE = nn.MSELoss()

# setup optimizer
optimizerG = optim.Adam(netG.parameters(),
                        lr=opt.glr,
                        betas=(opt.beta1, 0.999))
optimizerD = optim.Adam(netD.parameters(),
                        lr=opt.dlr,
                        betas=(opt.beta1, 0.999))
Exemplo n.º 29
0
root_path = './dataset/'

train_set = get_training_set(root_path)
test_set = get_test_set(root_path)

training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads,
                                    batch_size=opt.batchSize, shuffle=True)

test_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads,
                                    batch_size=opt.testBatchSize, shuffle=False)

cprint('==> Preparing Data Set: Complete\n', 'green')

################################################################################
cprint('==> Building Models', 'yellow')
netG = define_G(opt.input_nc, opt.output_nc, opt.ngf, norm='batch', use_dropout=False, gpu_ids=gpu_ids)
netD = define_D(opt.input_nc + opt.output_nc, opt.ndf, norm='batch', use_sigmoid=False, gpu_ids=gpu_ids)

print('---------- Networks initialized -------------')
print_network(netG)
print_network(netD)
print('-----------------------------------------------\n')
cprint('==> Building Models: Complete\n', 'green')

################################################################################
criterionGAN = GANLoss()
criterionL1 = nn.L1Loss()
criterionMSE = nn.MSELoss()

# setup optimizer
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
Exemplo n.º 30
0
    def initialize(self, opt):
        BaseModel.initialize(self, opt)
        if opt.resize_or_crop != 'none' or not opt.isTrain:  # when training at full res this causes OOM
            torch.backends.cudnn.benchmark = True
        self.isTrain = opt.isTrain
        input_nc = opt.label_nc if opt.label_nc != 0 else opt.input_nc

        ##### define networks
        # Generator network
        netG_input_nc = input_nc
        # Main Generator
        self.netG = networks.define_G(11,
                                      opt.output_nc,
                                      opt.ngf,
                                      opt.netG,
                                      opt.n_downsample_global,
                                      opt.n_blocks_global,
                                      opt.n_local_enhancers,
                                      opt.n_blocks_local,
                                      opt.norm,
                                      gpu_ids=self.gpu_ids)

        self.netP = networks.define_P(44,
                                      20,
                                      opt.ngf,
                                      opt.netG,
                                      opt.n_downsample_global,
                                      opt.n_blocks_global,
                                      opt.n_local_enhancers,
                                      opt.n_blocks_local,
                                      opt.norm,
                                      gpu_ids=self.gpu_ids)
        self.netP.load_state_dict(
            torch.load(
                os.path.dirname(os.path.realpath(__file__)) +
                "/checkpoints/generate/parse.pth"))

        # Discriminator network
        if self.isTrain:
            use_sigmoid = opt.no_lsgan
            netD_input_nc = input_nc + opt.output_nc
            netB_input_nc = opt.output_nc * 2
            self.netD = networks.define_D(netD_input_nc,
                                          opt.ndf,
                                          opt.n_layers_D,
                                          opt.norm,
                                          use_sigmoid,
                                          opt.num_D,
                                          not opt.no_ganFeat_loss,
                                          gpu_ids=self.gpu_ids)
            #self.netB = networks.define_B(netB_input_nc, opt.output_nc, 32, 3, 3, opt.norm, gpu_ids=self.gpu_ids)

        if self.opt.verbose:
            print('---------- Networks initialized -------------')

        # load networks
        if not self.isTrain or opt.continue_train or opt.load_pretrain:
            pretrained_path = '' if not self.isTrain else opt.load_pretrain
            self.load_network(self.netG, 'G', opt.which_epoch, pretrained_path)

            if self.isTrain:
                self.load_network(self.netD, 'D', opt.which_epoch,
                                  pretrained_path)

        # set loss functions and optimizers
        if self.isTrain:
            if opt.pool_size > 0 and (len(self.gpu_ids)) > 1:
                raise NotImplementedError(
                    "Fake Pool Not Implemented for MultiGPU")
            self.fake_pool = ImagePool(opt.pool_size)
            self.old_lr = opt.lr

            # define loss functions
            self.loss_filter = self.init_loss_filter(not opt.no_ganFeat_loss,
                                                     not opt.no_vgg_loss)

            self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan,
                                                 tensor=self.Tensor)
            self.criterionFeat = torch.nn.L1Loss()
            if not opt.no_vgg_loss:
                self.criterionVGG = networks.VGGLoss(self.gpu_ids)
            self.criterionStyle = networks.StyleLoss(self.gpu_ids)
            # Names so we can breakout loss
            self.loss_names = self.loss_filter('G_GAN', 'G_GAN_Feat', 'G_VGG',
                                               'D_real', 'D_fake')
            # initialize optimizers
            # optimizer G
            if opt.niter_fix_global > 0:
                import sys
                if sys.version_info >= (3, 0):
                    finetune_list = set()
                else:
                    from sets import Set
                    finetune_list = Set()

                params_dict = dict(self.netG.named_parameters())
                params = []
                for key, value in params_dict.items():
                    if key.startswith('model' + str(opt.n_local_enhancers)):
                        params += [value]
                        finetune_list.add(key.split('.')[0])
                print(
                    '------------- Only training the local enhancer network (for %d epochs) ------------'
                    % opt.niter_fix_global)
                print('The layers that are finetuned are ',
                      sorted(finetune_list))
            else:
                params = list(self.netG.parameters())+list(self.netimage.parameters())+list(self.netcolor.parameters())\
                         +list(self.netlabel.parameters())+list(self.netsketch.parameters())+list(self.classfier.parameters())
                # params.extend(list(self.netimage.parameters()))

            self.optimizer_G = torch.optim.Adam(params,
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))

            # optimizer D
            params = list(self.netD.parameters())
            self.optimizer_D = torch.optim.Adam(params,
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))