Ejemplo n.º 1
0
    def __init__(self, cfg):

        # build train dataloader
        self.train_dataloader = build_dataloader(cfg.dataset.train)
        
        if 'lr_scheduler' in cfg.optimizer:
            cfg.optimizer.lr_scheduler.step_per_epoch = len(self.train_dataloader)
        
        # build model
        self.model = build_model(cfg)
        # multiple gpus prepare
        if ParallelEnv().nranks > 1:
            self.distributed_data_parallel()

        self.logger = logging.getLogger(__name__)

        # base config
        self.output_dir = cfg.output_dir
        self.epochs = cfg.epochs
        self.start_epoch = 0
        self.current_epoch = 0
        self.batch_id = 0
        self.weight_interval = cfg.snapshot_config.interval
        self.log_interval = cfg.log_config.interval
        self.visual_interval = cfg.log_config.visiual_interval
        self.cfg = cfg

        self.local_rank = ParallelEnv().local_rank

        # time count
        self.time_count = {}
Ejemplo n.º 2
0
def setup(args, cfg):
    if args.evaluate_only:
        cfg.isTrain = False

    cfg.timestamp = time.strftime('-%Y-%m-%d-%H-%M', time.localtime())
    cfg.output_dir = os.path.join(cfg.output_dir,
                                  str(cfg.model.name) + cfg.timestamp)

    logger = setup_logger(cfg.output_dir)

    logger.info('Configs: {}'.format(cfg))

    place = paddle.fluid.CUDAPlace(ParallelEnv().dev_id) \
                    if ParallelEnv().nranks > 1 else paddle.fluid.CUDAPlace(0)
    paddle.enable_imperative(place)
Ejemplo n.º 3
0
    def __init__(self, cfg):

        # build train dataloader
        self.train_dataloader = build_dataloader(cfg.dataset.train)
        
        if 'lr_scheduler' in cfg.optimizer:
            cfg.optimizer.lr_scheduler.step_per_epoch = len(self.train_dataloader)
        
        # build model
        self.model = build_model(cfg)

        self.logger = logging.getLogger(__name__)
        # base config
        # self.timestamp = time.strftime('-%Y-%m-%d-%H-%M', time.localtime())
        self.output_dir = cfg.output_dir
        self.epochs = cfg.epochs
        self.start_epoch = 0
        self.current_epoch = 0
        self.batch_id = 0
        self.weight_interval = cfg.snapshot_config.interval
        self.log_interval = cfg.log_config.interval
        self.visual_interval = cfg.log_config.visiual_interval
        self.cfg = cfg

        self.local_rank = ParallelEnv().local_rank
Ejemplo n.º 4
0
    def backward_D_basic(self, netD, real, fake):
        """Calculate GAN loss for the discriminator

        Parameters:
            netD (network)      -- the discriminator D
            real (tensor array) -- real images
            fake (tensor array) -- images generated by a generator

        Return the discriminator loss.
        We also call loss_D.backward() to calculate the gradients.
        """
        # Real
        pred_real = netD(real)
        loss_D_real = self.criterionGAN(pred_real, True)
        # Fake
        pred_fake = netD(fake.detach())
        loss_D_fake = self.criterionGAN(pred_fake, False)
        # Combined loss and calculate gradients
        loss_D = (loss_D_real + loss_D_fake) * 0.5
        # loss_D.backward()
        if ParallelEnv().nranks > 1:
            loss_D = netD.scale_loss(loss_D)
            loss_D.backward()
            netD.apply_collective_grads()
        else:
            loss_D.backward()
        return loss_D
Ejemplo n.º 5
0
    def __init__(self, dataset, batch_size, is_train, num_workers=4):

        self.dataset = DictDataset(dataset)

        place = paddle.fluid.CUDAPlace(ParallelEnv().dev_id) \
                    if ParallelEnv().nranks > 1 else paddle.fluid.CUDAPlace(0)

        sampler = DistributedBatchSampler(
            self.dataset,
            batch_size=batch_size,
            shuffle=True if is_train else False,
            drop_last=True if is_train else False)

        self.dataloader = paddle.io.DataLoader(self.dataset,
                                               batch_sampler=sampler,
                                               places=place,
                                               num_workers=num_workers)

        self.batch_size = batch_size
Ejemplo n.º 6
0
def setup_logger(output=None, name="ppgan"):
    """
    Initialize the detectron2 logger and set its verbosity level to "INFO".

    Args:
        output (str): a file name or a directory to save log. If None, will not save log file.
            If ends with ".txt" or ".log", assumed to be a file name.
            Otherwise, logs will be saved to `output/log.txt`.
        name (str): the root module name of this logger

    Returns:
        logging.Logger: a logger
    """
    logger = logging.getLogger(name)
    logger.setLevel(logging.DEBUG)
    logger.propagate = False

    plain_formatter = logging.Formatter(
        "[%(asctime)s] %(name)s %(levelname)s: %(message)s",
        datefmt="%m/%d %H:%M:%S")
    # stdout logging: master only
    local_rank = ParallelEnv().local_rank
    if local_rank == 0:
        ch = logging.StreamHandler(stream=sys.stdout)
        ch.setLevel(logging.DEBUG)
        formatter = plain_formatter
        ch.setFormatter(formatter)
        logger.addHandler(ch)

    # file logging: all workers
    if output is not None:
        if output.endswith(".txt") or output.endswith(".log"):
            filename = output
        else:
            filename = os.path.join(output, "log.txt")
        if local_rank > 0:
            filename = filename + ".rank{}".format(local_rank)

        # PathManager.mkdirs(os.path.dirname(filename))
        os.makedirs(os.path.dirname(filename), exist_ok=True)

        # fh = logging.StreamHandler(_cached_log_stream(filename)
        fh = logging.FileHandler(filename, mode='a')
        fh.setLevel(logging.DEBUG)
        fh.setFormatter(plain_formatter)
        logger.addHandler(fh)

    return logger
Ejemplo n.º 7
0
    def backward_G(self):
        """Calculate GAN and L1 loss for the generator"""
        # First, G(A) should fake the discriminator
        fake_AB = paddle.concat((self.real_A, self.fake_B), 1)
        pred_fake = self.netD(fake_AB)
        self.loss_G_GAN = self.criterionGAN(pred_fake, True)
        # Second, G(A) = B
        self.loss_G_L1 = self.criterionL1(self.fake_B,
                                          self.real_B) * self.opt.lambda_L1
        # combine loss and calculate gradients
        self.loss_G = self.loss_G_GAN + self.loss_G_L1

        if ParallelEnv().nranks > 1:
            self.loss_G = self.netG.scale_loss(self.loss_G)
            self.loss_G.backward()
            self.netG.apply_collective_grads()
        else:
            self.loss_G.backward()
Ejemplo n.º 8
0
 def backward_D(self):
     """Calculate GAN loss for the discriminator"""
     # Fake; stop backprop to the generator by detaching fake_B
     # use conditional GANs; we need to feed both input and output to the discriminator
     fake_AB = paddle.concat((self.real_A, self.fake_B), 1)
     pred_fake = self.netD(fake_AB.detach())
     self.loss_D_fake = self.criterionGAN(pred_fake, False)
     # Real
     real_AB = paddle.concat((self.real_A, self.real_B), 1)
     pred_real = self.netD(real_AB)
     self.loss_D_real = self.criterionGAN(pred_real, True)
     # combine loss and calculate gradients
     self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
     if ParallelEnv().nranks > 1:
         self.loss_D = self.netD.scale_loss(self.loss_D)
         self.loss_D.backward()
         self.netD.apply_collective_grads()
     else:
         self.loss_D.backward()
Ejemplo n.º 9
0
    def backward_G(self):
        """Calculate the loss for generators G_A and G_B"""
        lambda_idt = self.opt.lambda_identity
        lambda_A = self.opt.lambda_A
        lambda_B = self.opt.lambda_B
        # Identity loss
        if lambda_idt > 0:
            # G_A should be identity if real_B is fed: ||G_A(B) - B||
            self.idt_A = self.netG_A(self.real_B)
            self.loss_idt_A = self.criterionIdt(
                self.idt_A, self.real_B) * lambda_B * lambda_idt
            # G_B should be identity if real_A is fed: ||G_B(A) - A||
            self.idt_B = self.netG_B(self.real_A)
            self.loss_idt_B = self.criterionIdt(
                self.idt_B, self.real_A) * lambda_A * lambda_idt
        else:
            self.loss_idt_A = 0
            self.loss_idt_B = 0

        # GAN loss D_A(G_A(A))
        self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
        # GAN loss D_B(G_B(B))
        self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
        # Forward cycle loss || G_B(G_A(A)) - A||
        self.loss_cycle_A = self.criterionCycle(self.rec_A,
                                                self.real_A) * lambda_A
        # Backward cycle loss || G_A(G_B(B)) - B||
        self.loss_cycle_B = self.criterionCycle(self.rec_B,
                                                self.real_B) * lambda_B
        # combined loss and calculate gradients
        self.loss_G = self.loss_G_A + self.loss_G_B + self.loss_cycle_A + self.loss_cycle_B + self.loss_idt_A + self.loss_idt_B

        if ParallelEnv().nranks > 1:
            self.loss_G = self.netG_A.scale_loss(self.loss_G)
            self.loss_G.backward()
            self.netG_A.apply_collective_grads()
            self.netG_B.apply_collective_grads()
        else:
            self.loss_G.backward()