Ejemplo n.º 1
0
    def fit(self):
        opt = self.opt

        self.t1 = time.time()
        for step in range(opt.max_steps):
            try:
                inputs = next(iters)
            except (UnboundLocalError, StopIteration):
                iters = iter(self.train_loader)
                inputs = next(iters)

            HR = inputs[0].to(self.dev)
            LR = inputs[1].to(self.dev)

            # match the resolution of (LR, HR) due to CutBlur
            if HR.size() != LR.size():
                scale = HR.size(2) // LR.size(2)
                LR = F.interpolate(LR, scale_factor=scale, mode="nearest")

            HR, LR, mask, aug = augments.apply_augment(HR, LR, opt.augs,
                                                       opt.prob, opt.alpha,
                                                       opt.aux_alpha,
                                                       opt.aux_alpha,
                                                       opt.mix_p)
            SR = self.net(LR)
            if isinstance(SR, (tuple, list)):
                if aug == "cutout":
                    HR = HR * mask
                    SR = [sr * mask for sr in SR]
                loss = self.loss_fn(SR[0], HR)
                for sr in SR[1:]:
                    loss += self.loss_fn(sr, HR)
            else:
                if aug == "cutout":
                    SR, HR = SR * mask, HR * mask
                loss = self.loss_fn(SR, HR)

            self.optim.zero_grad()
            loss.backward()

            if opt.gclip > 0:
                torch.nn.utils.clip_grad_value_(self.net.parameters(),
                                                opt.gclip)

            self.optim.step()
            self.scheduler.step()

            if (step + 1) % opt.log_intervals == 0:
                _step, _max_steps = (step +
                                     1) // 1000, self.opt.max_steps // 1000
                logger.info(f"[{_step}K/{_max_steps}K] {loss.data:.2f}")

            if (step + 1) % opt.eval_steps == 0:
                self.summary_and_save(step)
Ejemplo n.º 2
0
    def fit(self):
        opt = self.opt

        self.t1 = time.time()
        
        # save psnr value for each eval_step
        if not os.path.exists(opt.save_log):
            os.makedirs(opt.save_log)
        log_file = os.path.join(opt.save_log, opt.log_file)
        f = open(log_file, 'w')
        f.close()
        
        for step in range(opt.max_steps):
            try:
                inputs = next(iters)
            except (UnboundLocalError, StopIteration):
                iters = iter(self.train_loader)
                inputs = next(iters)

            HR = inputs[0].to(self.dev)
            LR = inputs[1].to(self.dev)

            # match the resolution of (LR, HR) due to CutBlur
            if HR.size() != LR.size():
                scale = HR.size(2) // LR.size(2)
                LR = F.interpolate(LR, scale_factor=scale, mode="nearest")

            HR, LR, mask, aug = augments.apply_augment(
                HR, LR,
                opt.augs, opt.prob, opt.alpha,
                opt.aux_alpha, opt.aux_alpha, opt.mix_p
            )

            SR = self.net(LR)
            if aug == "cutout":
                SR, HR = SR*mask, HR*mask

            loss = self.loss_fn(SR, HR)
            self.optim.zero_grad()
            loss.backward()

            if opt.gclip > 0:
                torch.nn.utils.clip_grad_value_(self.net.parameters(), opt.gclip)

            self.optim.step()
            self.scheduler.step()

            if (step+1) % opt.eval_steps == 0:
                self.summary_and_save(step, log_file)
Ejemplo n.º 3
0
    for epoch in range(start_epoch + 1, opt.nEpochs_pre + 1):
        mean_generator_pixel_loss = 0.0
        # data-lr target-hr

        for i, target in enumerate(train_dataloader):  # 16700个样本,一次epoch迭代1045次
            data = DownSample2DMatlab(target, 1/(float(opt.upSampling)))
            data = torch.clamp(data, 0, 1)
            # MoA数据增强
            if target.size() != data.size():
                scale = target.size(2) // data.size(2)
                data = F.interpolate(data, scale_factor=scale, mode="nearest")  # 从第三维reshape,前两维是batchsize和channel

            HR, LR, mask, aug = apply_augment(
                target, data,
                opt.augs, opt.prob, opt.alpha,
                opt.aux_alpha, opt.aux_alpha, opt.mix_p
            )
            #        Train generator         #
            generator.zero_grad()

            # Generate real and fake inputs
            HR = Variable(HR.to(device))
            SR = generator(Variable(LR).to(device))

            if aug == "cutout":
                SR, HR = SR * mask, HR * mask

            generator_pixel_loss = criterion_pixel(SR, HR).to(device)
            mean_generator_pixel_loss += generator_pixel_loss.item()  # item()得到元素张量的值