Ejemplo n.º 1
0
    def __init__(self, args, dataloaders):

        self.dataloaders = dataloaders

        self.rderr = renderer.Renderer(renderer=args.renderer)

        # define G
        self.net_G = define_G(rdrr=self.rderr, netG=args.net_G).to(device)

        # Learning rate
        self.lr = args.lr

        # define optimizers
        self.optimizer_G = optim.Adam(self.net_G.parameters(),
                                      lr=self.lr,
                                      betas=(0.9, 0.999))

        # define lr schedulers
        self.exp_lr_scheduler_G = lr_scheduler.StepLR(self.optimizer_G,
                                                      step_size=100,
                                                      gamma=0.1)

        # define some other vars to record the training states
        self.running_acc = []
        self.epoch_acc = 0
        self.best_val_acc = 0.0
        self.best_epoch_id = 0
        self.epoch_to_start = 0
        self.max_num_epochs = args.max_num_epochs
        self.G_pred_foreground = None
        self.G_pred_alpha = None
        self.batch = None
        self.G_loss = None
        self.is_training = False
        self.batch_id = 0
        self.epoch_id = 0
        self.checkpoint_dir = args.checkpoint_dir
        self.vis_dir = args.vis_dir

        # define the loss functions
        self._pxl_loss = loss.PixelLoss(p=2)

        self.VAL_ACC = np.array([], np.float32)
        if os.path.exists(os.path.join(self.checkpoint_dir, 'val_acc.npy')):
            self.VAL_ACC = np.load(
                os.path.join(self.checkpoint_dir, 'val_acc.npy'))

        # check and create model dir
        if os.path.exists(self.checkpoint_dir) is False:
            os.mkdir(self.checkpoint_dir)
        if os.path.exists(self.vis_dir) is False:
            os.mkdir(self.vis_dir)

        # visualize model
        if args.print_models:
            self._visualize_models()
Ejemplo n.º 2
0
    def __init__(self, args, device=DEVICE):
        self.args = args
        self.device = device

        self.rderr = renderer.Renderer(renderer=args.renderer,
                                       CANVAS_WIDTH=args.canvas_size,
                                       canvas_color=args.canvas_color)

        # define G
        self.net_G = define_G(rdrr=self.rderr, netG=args.net_G).to(device)

        # define some other vars to record the training states
        self.x_ctt = None
        self.x_color = None
        self.x_alpha = None

        self.G_pred_foreground = None
        self.G_pred_alpha = None
        self.G_final_pred_canvas = torch.zeros([1, 3, 128, 128]).to(device)

        self.G_loss = torch.tensor(0.0)
        self.step_id = 0
        self.anchor_id = 0
        self.renderer_checkpoint_dir = args.renderer_checkpoint_dir
        self.output_dir = args.output_dir
        self.lr = args.lr

        # define the loss functions
        self._pxl_loss = loss.PixelLoss(p=1)
        self._sinkhorn_loss = loss.SinkhornLoss(epsilon=0.01,
                                                niter=5,
                                                normalize=False)

        # some other vars to be initialized in child classes
        self.input_aspect_ratio = None
        self.img_path = None
        self.img_batch = None
        self.img_ = None
        self.final_rendered_images = None
        self.m_grid = None
        self.m_strokes_per_block = None

        if os.path.exists(self.output_dir) is False:
            os.mkdir(self.output_dir)

        if os.path.exists(args.vector_file):
            npzfile = np.load(args.vector_file)
            print(npzfile['x_ctt'].shape)
            print(npzfile['x_color'].shape)
            print(npzfile['x_alpha'].shape)
            self.x_ctt = np.copy(npzfile['x_ctt'][:, ::-1, :])
            self.x_color = np.copy(npzfile['x_color'][:, ::-1, :])
            self.x_alpha = np.copy(npzfile['x_alpha'][:, ::-1, :])
            self.m_grid = int(np.sqrt(self.x_ctt.shape[0]))
            self.anchor_id = self.x_ctt.shape[1] - 1
Ejemplo n.º 3
0
    def __init__(self, args):
        self.args = args

        self.rderr = renderer.Renderer(
            renderer=args.renderer,
            CANVAS_WIDTH=args.canvas_size,
            canvas_color=args.canvas_color,
        )

        # define G
        self.net_G = define_G(rdrr=self.rderr, netG=args.net_G).to(device)

        # define some other vars to record the training states
        self.x_ctt = None
        self.x_color = None
        self.x_alpha = None

        self.G_pred_foreground = None
        self.G_pred_alpha = None
        self.G_final_pred_canvas = torch.zeros(
            [1, 3, self.net_G.out_size, self.net_G.out_size]).to(device)

        self.G_loss = torch.tensor(0.0)
        self.step_id = 0
        self.anchor_id = 0
        self.renderer_checkpoint_dir = args.renderer_checkpoint_dir
        self.output_dir = args.output_dir
        self.lr = args.lr

        # define the loss functions
        self._pxl_loss = loss.PixelLoss(p=1)
        self._sinkhorn_loss = loss.SinkhornLoss(epsilon=0.01,
                                                niter=5,
                                                normalize=False)

        # some other vars to be initialized in child classes
        self.input_aspect_ratio = None
        self.img_path = None
        self.img_batch = None
        self.img_ = None
        self.final_rendered_images = None
        self.m_grid = None
        self.m_strokes_per_block = None

        if os.path.exists(self.output_dir) is False:
            os.mkdir(self.output_dir)
    def __init__(self, args, dataloaders):

        self.dataloaders = dataloaders
        self.net_D1 = cycnet.define_D(input_nc=6,
                                      ndf=64,
                                      netD='n_layers',
                                      n_layers_D=2).to(device)
        self.net_D2 = cycnet.define_D(input_nc=6,
                                      ndf=64,
                                      netD='n_layers',
                                      n_layers_D=2).to(device)
        self.net_D3 = cycnet.define_D(input_nc=6,
                                      ndf=64,
                                      netD='n_layers',
                                      n_layers_D=3).to(device)
        self.net_G = cycnet.define_G(input_nc=3,
                                     output_nc=6,
                                     ngf=args.ngf,
                                     netG=args.net_G,
                                     use_dropout=False,
                                     norm='none').to(device)
        # M.Amintoosi norm='instance'
        # self.net_G = cycnet.define_G(
        #     input_nc=3, output_nc=6, ngf=args.ngf, netG=args.net_G, use_dropout=False, norm='instance').to(device)

        # Learning rate and Beta1 for Adam optimizers
        self.lr = args.lr

        # define optimizers
        self.optimizer_G = optim.Adam(self.net_G.parameters(),
                                      lr=self.lr,
                                      betas=(0.5, 0.999))
        self.optimizer_D1 = optim.Adam(self.net_D1.parameters(),
                                       lr=self.lr,
                                       betas=(0.5, 0.999))
        self.optimizer_D2 = optim.Adam(self.net_D2.parameters(),
                                       lr=self.lr,
                                       betas=(0.5, 0.999))
        self.optimizer_D3 = optim.Adam(self.net_D3.parameters(),
                                       lr=self.lr,
                                       betas=(0.5, 0.999))

        # define lr schedulers
        self.exp_lr_scheduler_G = lr_scheduler.StepLR(
            self.optimizer_G,
            step_size=args.exp_lr_scheduler_stepsize,
            gamma=0.1)
        self.exp_lr_scheduler_D1 = lr_scheduler.StepLR(
            self.optimizer_D1,
            step_size=args.exp_lr_scheduler_stepsize,
            gamma=0.1)
        self.exp_lr_scheduler_D2 = lr_scheduler.StepLR(
            self.optimizer_D2,
            step_size=args.exp_lr_scheduler_stepsize,
            gamma=0.1)
        self.exp_lr_scheduler_D3 = lr_scheduler.StepLR(
            self.optimizer_D3,
            step_size=args.exp_lr_scheduler_stepsize,
            gamma=0.1)

        # coefficient to balance loss functions
        self.lambda_L1 = args.lambda_L1
        self.lambda_adv = args.lambda_adv

        # based on which metric to update the "best" ckpt
        self.metric = args.metric

        # define some other vars to record the training states
        self.running_acc = []
        self.epoch_acc = 0
        if 'mse' in self.metric:
            self.best_val_acc = 1e9  # for mse, rmse, a lower score is better
        else:
            self.best_val_acc = 0.0  # for others (ssim, psnr), a higher score is better
        self.best_epoch_id = 0
        self.epoch_to_start = 0
        self.max_num_epochs = args.max_num_epochs
        self.G_pred1 = None
        self.G_pred2 = None
        self.batch = None
        self.G_loss = None
        self.D_loss = None
        self.is_training = False
        self.batch_id = 0
        self.epoch_id = 0
        self.checkpoint_dir = args.checkpoint_dir
        self.vis_dir = args.vis_dir
        self.D1_fake_pool = utils.ImagePool(pool_size=50)
        self.D2_fake_pool = utils.ImagePool(pool_size=50)
        self.D3_fake_pool = utils.ImagePool(pool_size=50)

        # define the loss functions
        if args.pixel_loss == 'minimum_pixel_loss':
            self._pxl_loss = loss.MinimumPixelLoss(
                opt=1)  # 1 for L1 and 2 for L2
        elif args.pixel_loss == 'pixel_loss':
            self._pxl_loss = loss.PixelLoss(opt=1)  # 1 for L1 and 2 for L2
        else:
            raise NotImplementedError(
                'pixel loss function [%s] is not implemented', args.pixel_loss)
        self._gan_loss = loss.GANLoss(gan_mode='vanilla').to(device)
        self._exclusion_loss = loss.ExclusionLoss()
        self._kurtosis_loss = loss.KurtosisLoss()
        # enable some losses?
        self.with_d1d2 = args.enable_d1d2
        self.with_d3 = args.enable_d3
        self.with_exclusion_loss = args.enable_exclusion_loss
        self.with_kurtosis_loss = args.enable_kurtosis_loss

        # m-th epoch to activate adversarial training
        self.m_epoch_activate_adv = int(self.max_num_epochs / 20) + 1

        # output auto-enhancement?
        self.output_auto_enhance = args.output_auto_enhance

        # use synfake to train D?
        self.synfake = args.enable_synfake

        # check and create model dir
        if os.path.exists(self.checkpoint_dir) is False:
            os.mkdir(self.checkpoint_dir)
        if os.path.exists(self.vis_dir) is False:
            os.mkdir(self.vis_dir)

        # visualize model
        if args.print_models:
            self._visualize_models()