示例#1
0
def test_pix2pix_fid(model, opt):
    opt.phase = 'val'
    opt.num_threads = 0
    opt.batch_size = 1
    opt.serial_batches = True
    opt.no_flip = True
    opt.load_size = 256
    opt.display_id = -1
    dataset = create_dataset(opt)
    model.model_eval()

    result_dir = os.path.join(opt.checkpoints_dir, opt.name, 'test_results')
    util.mkdirs(result_dir)

    fake_B = {}
    for i, data in enumerate(dataset):
        model.set_input(data)
        with torch.no_grad():
            model.forward()
        visuals = model.get_current_visuals()
        fake_B[data['A_paths'][0]] = visuals['fake_B']
        util.save_images(visuals,
                         model.image_paths,
                         result_dir,
                         direction=opt.direction,
                         aspect_ratio=opt.aspect_ratio)

    block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
    inception_model = InceptionV3([block_idx])
    inception_model.to(model.device)
    inception_model.eval()
    npz = np.load(os.path.join(opt.dataroot, 'real_stat_B.npz'))
    fid = get_fid(list(fake_B.values()), inception_model, npz, model.device,
                  opt.batch_size)

    return fid
    def evaluate_model(self, step):
        self.is_best = False
        save_dir = os.path.join(self.opt.log_dir, 'eval', str(step))
        os.makedirs(save_dir, exist_ok=True)
        self.netG_student.eval()
        fakes, names = [], []
        cnt = 0
        for i, data_i in enumerate(tqdm(self.eval_dataloader)):
            if self.opt.dataset_mode == 'aligned':
                self.set_input(data_i)
            else:
                self.set_single_input(data_i)
            self.test()
            fakes.append(self.Sfake_B.cpu())
            for j in range(len(self.image_paths)):
                short_path = ntpath.basename(self.image_paths[j])
                name = os.path.splitext(short_path)[0]
                names.append(name)
                if cnt < 10:
                    input_im = util.tensor2im(self.real_A[j])
                    Sfake_im = util.tensor2im(self.Sfake_B[j])
                    Tfake_im = util.tensor2im(self.Tfake_B[j])
                    util.save_image(input_im,
                                    os.path.join(save_dir, 'input', '%s.png') %
                                    name,
                                    create_dir=True)
                    util.save_image(Sfake_im,
                                    os.path.join(save_dir, 'Sfake',
                                                 '%s.png' % name),
                                    create_dir=True)
                    util.save_image(Tfake_im,
                                    os.path.join(save_dir, 'Tfake',
                                                 '%s.png' % name),
                                    create_dir=True)
                    if self.opt.dataset_mode == 'aligned':
                        real_im = util.tensor2im(self.real_B[j])
                        util.save_image(real_im,
                                        os.path.join(save_dir, 'real',
                                                     '%s.png' % name),
                                        create_dir=True)
                cnt += 1

        fid = get_fid(fakes,
                      self.inception_model,
                      self.npz,
                      device=self.device,
                      batch_size=self.opt.eval_batch_size)
        if fid < self.best_fid:
            self.is_best = True
            self.best_fid = fid
        self.fids.append(fid)
        if len(self.fids) > 3:
            self.fids.pop(0)
        ret = {
            'metric/fid': fid,
            'metric/fid-mean': sum(self.fids) / len(self.fids),
            'metric/fid-best': self.best_fid
        }
        if 'cityscapes' in self.opt.dataroot and self.opt.direction == 'BtoA':
            mIoU = get_mIoU(fakes,
                            names,
                            self.drn_model,
                            self.device,
                            table_path=self.opt.table_path,
                            data_dir=self.opt.cityscapes_path,
                            batch_size=self.opt.eval_batch_size,
                            num_workers=self.opt.num_threads)
            if mIoU > self.best_mIoU:
                self.is_best = True
                self.best_mIoU = mIoU
            self.mIoUs.append(mIoU)
            if len(self.mIoUs) > 3:
                self.mIoUs = self.mIoUs[1:]
            ret['metric/mIoU'] = mIoU
            ret['metric/mIoU-mean'] = sum(self.mIoUs) / len(self.mIoUs)
            ret['metric/mIoU-best'] = self.best_mIoU
        self.netG_student.train()
        return ret
示例#3
0
    def evaluate_model(self, step):
        ret = {}
        self.is_best = False
        save_dir = os.path.join(self.opt.log_dir, 'eval', str(step))
        os.makedirs(save_dir, exist_ok=True)
        if self.opt.eval_mode == 'both':
            settings = ('largest', 'smallest')
        else:
            settings = (self.opt.eval_mode, )
        for config_name in settings:
            config = self.configs(config_name)
            fakes, names = [], []
            self.modules_on_one_gpu.netG_student.train()
            self.calibrate(config, 2)
            tqdm_position = 2 + int(not self.opt.no_calibration)
            self.modules_on_one_gpu.netG_student.eval()
            torch.cuda.empty_cache()

            cnt = 0
            for i, data_i in enumerate(
                    tqdm(self.eval_dataloader,
                         desc='Eval       ',
                         position=tqdm_position,
                         leave=False)):
                self.set_input(data_i)
                self.test(config=config)
                fakes.append(self.Sfake_B.cpu())
                for j in range(len(self.image_paths)):
                    short_path = ntpath.basename(self.image_paths[j])
                    name = os.path.splitext(short_path)[0]
                    names.append(name)
                    if cnt < 10:
                        input_im = util.tensor2label(self.input_semantics[j],
                                                     self.opt.input_nc + 2)
                        real_im = util.tensor2im(self.real_B[j])
                        Tfake_im = util.tensor2im(self.Tfake_B[j])
                        Sfake_im = util.tensor2im(self.Sfake_B[j])
                        util.save_image(input_im,
                                        os.path.join(save_dir, 'input',
                                                     '%s.png' % name),
                                        create_dir=True)
                        util.save_image(real_im,
                                        os.path.join(save_dir, 'real',
                                                     '%s.png' % name),
                                        create_dir=True)
                        util.save_image(Tfake_im,
                                        os.path.join(save_dir, 'Tfake',
                                                     '%s.png' % name),
                                        create_dir=True)
                        util.save_image(Sfake_im,
                                        os.path.join(save_dir, 'Sfake',
                                                     '%s.png' % name),
                                        create_dir=True)
                    cnt += 1

            if not self.opt.no_fid:
                fid = get_fid(fakes,
                              self.inception_model,
                              self.npz,
                              device=self.device,
                              batch_size=self.opt.eval_batch_size,
                              tqdm_position=2)
                if fid < getattr(self, 'best_fid_%s' % config_name):
                    self.is_best = True
                    setattr(self, 'best_fid_%s' % config_name, fid)
                fids = getattr(self, 'fids_%s' % config_name)
                fids.append(fid)
                if len(fids) > 3:
                    fids.pop(0)
                ret['metric/fid_%s' % config_name] = fid
                ret['metric/fid_%s-mean' % config_name] = sum(
                    getattr(self, 'fids_%s' % config_name)) / len(
                        getattr(self, 'fids_%s' % config_name))
                ret['metric/fid_%s-best' % config_name] = getattr(
                    self, 'best_fid_%s' % config_name)
            if 'cityscapes' in self.opt.dataroot and not self.opt.no_mIoU:
                mIoU = get_cityscapes_mIoU(fakes,
                                           names,
                                           self.drn_model,
                                           self.device,
                                           table_path=self.opt.table_path,
                                           data_dir=self.opt.cityscapes_path,
                                           batch_size=self.opt.eval_batch_size,
                                           num_workers=self.opt.num_threads,
                                           tqdm_position=2)
                if mIoU > getattr(self, 'best_mIoU_%s' % config_name):
                    self.is_best = True
                    setattr(self, 'best_mIoU_%s' % config_name, mIoU)
                mIoUs = getattr(self, 'mIoUs_%s' % config_name)
                mIoUs.append(mIoU)
                if len(mIoUs) > 3:
                    mIoUs.pop(0)
                ret['metric/mIoU_%s' % config_name] = mIoU
                ret['metric/mIoU_%s-mean' % config_name] = sum(
                    getattr(self, 'mIoUs_%s' % config_name)) / len(
                        getattr(self, 'mIoUs_%s' % config_name))
                ret['metric/mIoU_%s-best' % config_name] = getattr(
                    self, 'best_mIoU_%s' % config_name)
            self.modules_on_one_gpu.netG_student.train()

        torch.cuda.empty_cache()
        return ret
示例#4
0
    def evaluate_model(self, step):
        ret = {}
        self.is_best = False
        save_dir = os.path.join(self.opt.log_dir, 'eval', str(step))
        os.makedirs(save_dir, exist_ok=True)
        self.netG_A.eval()
        self.netG_B.eval()
        for direction in ['AtoB', 'BtoA']:
            eval_dataloader = getattr(self, 'eval_dataloader_' + direction)
            fakes, names = [], []
            cnt = 0
            # print(len(eval_dataset))
            for i, data_i in enumerate(tqdm(eval_dataloader)):
                self.set_single_input(data_i)
                self.test_single_side(direction)
                # print(self.image_paths)
                fakes.append(self.fake_B.cpu())
                for j in range(len(self.image_paths)):
                    short_path = ntpath.basename(self.image_paths[j])
                    name = os.path.splitext(short_path)[0]
                    names.append(name)
                    if cnt < 10:
                        input_im = util.tensor2im(self.real_A[j])
                        fake_im = util.tensor2im(self.fake_B[j])
                        util.save_image(input_im,
                                        os.path.join(save_dir, direction,
                                                     'input', '%s.png' % name),
                                        create_dir=True)
                        util.save_image(fake_im,
                                        os.path.join(save_dir, direction,
                                                     'fake', '%s.png' % name),
                                        create_dir=True)
                    cnt += 1

            suffix = direction[-1]
            fid = get_fid(fakes,
                          self.inception_model,
                          getattr(self, 'npz_%s' % direction[-1]),
                          device=self.device,
                          batch_size=self.opt.eval_batch_size)
            if fid < getattr(self, 'best_fid_%s' % suffix):
                self.is_best = True
                setattr(self, 'best_fid_%s' % suffix, fid)
            fids = getattr(self, 'fids_%s' % suffix)
            fids.append(fid)
            if len(fids) > 3:
                fids.pop(0)
            ret['metric/fid_%s' % suffix] = fid
            ret['metric/fid_%s-mean' %
                suffix] = sum(getattr(self, 'fids_%s' % suffix)) / len(
                    getattr(self, 'fids_%s' % suffix))
            ret['metric/fid_%s-best' % suffix] = getattr(
                self, 'best_fid_%s' % suffix)

            if 'cityscapes' in self.opt.dataroot and direction == 'BtoA':
                mIoU = get_mIoU(fakes,
                                names,
                                self.drn_model,
                                self.device,
                                table_path=self.opt.table_path,
                                data_dir=self.opt.cityscapes_path,
                                batch_size=self.opt.eval_batch_size,
                                num_workers=self.opt.num_threads)
                if mIoU > self.best_mIoU:
                    self.is_best = True
                    self.best_mIoU = mIoU
                self.mIoUs.append(mIoU)
                if len(self.mIoUs) > 3:
                    self.mIoUs = self.mIoUs[1:]
                ret['metric/mIoU'] = mIoU
                ret['metric/mIoU-mean'] = sum(self.mIoUs) / len(self.mIoUs)
                ret['metric/mIoU-best'] = self.best_mIoU

        self.netG_A.train()
        self.netG_B.train()
        return ret
    def evaluate_model(self, step):
        self.is_best = False
        save_dir = os.path.join(self.opt.log_dir, 'eval', str(step))
        os.makedirs(save_dir, exist_ok=True)
        self.modules_on_one_gpu.netG_student.eval()
        torch.cuda.empty_cache()
        fakes, names = [], []
        ret = {}
        cnt = 0
        for i, data_i in enumerate(
                tqdm(self.eval_dataloader,
                     desc='Eval       ',
                     position=2,
                     leave=False)):
            self.set_input(data_i)
            self.test()
            fakes.append(self.Sfake_B.cpu())
            for j in range(len(self.image_paths)):
                short_path = ntpath.basename(self.image_paths[j])
                name = os.path.splitext(short_path)[0]
                names.append(name)
                if cnt < 10:
                    input_im = util.tensor2label(self.input_semantics[j],
                                                 self.opt.input_nc + 2)
                    real_im = util.tensor2im(self.real_B[j])
                    Tfake_im = util.tensor2im(self.Tfake_B[j])
                    Sfake_im = util.tensor2im(self.Sfake_B[j])
                    util.save_image(input_im,
                                    os.path.join(save_dir, 'input',
                                                 '%s.png' % name),
                                    create_dir=True)
                    util.save_image(real_im,
                                    os.path.join(save_dir, 'real',
                                                 '%s.png' % name),
                                    create_dir=True)
                    util.save_image(Tfake_im,
                                    os.path.join(save_dir, 'Tfake',
                                                 '%s.png' % name),
                                    create_dir=True)
                    util.save_image(Sfake_im,
                                    os.path.join(save_dir, 'Sfake',
                                                 '%s.png' % name),
                                    create_dir=True)
                cnt += 1
        if not self.opt.no_fid:
            fid = get_fid(fakes,
                          self.inception_model,
                          self.npz,
                          device=self.device,
                          batch_size=self.opt.eval_batch_size,
                          tqdm_position=2)
            if fid < self.best_fid:
                self.is_best = True
                self.best_fid = fid
            self.fids.append(fid)
            if len(self.fids) > 3:
                self.fids.pop(0)
            ret['metric/fid'] = fid
            ret['metric/fid-mean'] = sum(self.fids) / len(self.fids)
            ret['metric/fid-best'] = self.best_fid
        if 'cityscapes' in self.opt.dataroot and not self.opt.no_mIoU:
            mIoU = get_cityscapes_mIoU(fakes,
                                       names,
                                       self.drn_model,
                                       self.device,
                                       table_path=self.opt.table_path,
                                       data_dir=self.opt.cityscapes_path,
                                       batch_size=self.opt.eval_batch_size,
                                       num_workers=self.opt.num_threads,
                                       tqdm_position=2)
            if mIoU > self.best_mIoU:
                self.is_best = True
                self.best_mIoU = mIoU
            self.mIoUs.append(mIoU)
            if len(self.mIoUs) > 3:
                self.mIoUs = self.mIoUs[1:]
            ret['metric/mIoU'] = mIoU
            ret['metric/mIoU-mean'] = sum(self.mIoUs) / len(self.mIoUs)
            ret['metric/mIoU-best'] = self.best_mIoU

        self.modules_on_one_gpu.netG_student.train()
        # self.modules_on_one_gpu.train()
        torch.cuda.empty_cache()
        return ret
示例#6
0
    def evaluate_model(self, step):
        ret = {}
        self.is_best = False
        save_dir = os.path.join(self.opt.log_dir, 'eval', str(step))
        os.makedirs(save_dir, exist_ok=True)
        self.netG_student.eval()
        if self.opt.eval_mode == 'both':
            setting = ('largest', 'smallest')
        else:
            setting = (self.opt.eval_mode, )
        for config_name in setting:
            config = self.configs(config_name)
            fakes, names = [], []
            cnt = 0
            for i, data_i in enumerate(tqdm(self.eval_dataloader)):
                if self.opt.dataset_mode == 'aligned':
                    self.set_input(data_i)
                else:
                    self.set_single_input(data_i)
                self.test(config)
                fakes.append(self.Sfake_B.cpu())
                for j in range(len(self.image_paths)):
                    short_path = ntpath.basename(self.image_paths[j])
                    name = os.path.splitext(short_path)[0]
                    names.append(name)
                    if i < 10:
                        Sfake_im = util.tensor2im(self.Sfake_B[j])
                        real_im = util.tensor2im(self.real_B[j])
                        Tfake_im = util.tensor2im(self.Tfake_B[j])
                        util.save_image(real_im,
                                        os.path.join(save_dir, 'real',
                                                     '%s.png' % name),
                                        create_dir=True)
                        util.save_image(Sfake_im,
                                        os.path.join(save_dir,
                                                     'Sfake_%s' % config_name,
                                                     '%s.png' % name),
                                        create_dir=True)
                        util.save_image(Tfake_im,
                                        os.path.join(save_dir, 'Tfake',
                                                     '%s.png' % name),
                                        create_dir=True)
                        if self.opt.dataset_mode == 'aligned':
                            input_im = util.tensor2im(self.real_A[j])
                            util.save_image(
                                input_im,
                                os.path.join(save_dir, 'input', '%s.png') %
                                name,
                                create_dir=True)
                    cnt += 1

            fid = get_fid(fakes,
                          self.inception_model,
                          self.npz,
                          device=self.device,
                          batch_size=self.opt.eval_batch_size)
            if fid < getattr(self, 'best_fid_%s' % config_name):
                self.is_best = True
                setattr(self, 'best_fid_%s' % config_name, fid)
            fids = getattr(self, 'fids_%s' % config_name)
            fids.append(fid)
            if len(fids) > 3:
                fids.pop(0)

            ret['metric/fid_%s' % config_name] = fid
            ret['metric/fid_%s-mean' % config_name] = sum(
                getattr(self, 'fids_%s' % config_name)) / len(
                    getattr(self, 'fids_%s' % config_name))
            ret['metric/fid_%s-best' % config_name] = getattr(
                self, 'best_fid_%s' % config_name)

            if 'cityscapes' in self.opt.dataroot:
                mIoU = get_mIoU(fakes,
                                names,
                                self.drn_model,
                                self.device,
                                table_path=self.opt.table_path,
                                data_dir=self.opt.cityscapes_path,
                                batch_size=self.opt.eval_batch_size,
                                num_workers=self.opt.num_threads)
                if mIoU > getattr(self, 'best_mIoU_%s' % config_name):
                    self.is_best = True
                    setattr(self, 'best_mIoU_%s' % config_name, mIoU)
                mIoUs = getattr(self, 'mIoUs_%s' % config_name)
                mIoUs.append(mIoU)
                if len(mIoUs) > 3:
                    mIoUs.pop(0)
                ret['metric/mIoU_%s' % config_name] = mIoU
                ret['metric/mIoU_%s-mean' % config_name] = sum(
                    getattr(self, 'mIoUs_%s' % config_name)) / len(
                        getattr(self, 'mIoUs_%s' % config_name))
                ret['metric/mIoU_%s-best' % config_name] = getattr(
                    self, 'best_mIoU_%s' % config_name)

        self.netG_student.train()
        return ret
    def evaluate_model(self, step):
        self.is_best = False

        save_dir = os.path.join(self.opt.log_dir, 'eval', str(step))
        os.makedirs(save_dir, exist_ok=True)
        self.netG.eval()

        fakes, names = [], []
        cnt = 0
        for i, data_i in enumerate(tqdm(self.eval_dataloader)):
            self.set_input(data_i)
            self.test()
            fakes.append(self.fake_B.cpu())
            for j in range(len(self.image_paths)):
                short_path = ntpath.basename(self.image_paths[j])
                name = os.path.splitext(short_path)[0]
                names.append(name)
                if cnt < 10:
                    input_im = util.tensor2im(self.real_A[j])
                    real_im = util.tensor2im(self.real_B[j])
                    fake_im = util.tensor2im(self.fake_B[j])
                    util.save_image(input_im,
                                    os.path.join(save_dir, 'input',
                                                 '%s.png' % name),
                                    create_dir=True)
                    util.save_image(real_im,
                                    os.path.join(save_dir, 'real',
                                                 '%s.png' % name),
                                    create_dir=True)
                    util.save_image(fake_im,
                                    os.path.join(save_dir, 'fake',
                                                 '%s.png' % name),
                                    create_dir=True)
                cnt += 1

        fid = get_fid(fakes,
                      self.inception_model,
                      self.npz,
                      device=self.device,
                      batch_size=self.opt.eval_batch_size)
        if fid < self.best_fid:
            self.is_best = True
            self.best_fid = fid
        self.fids.append(fid)
        if len(self.fids) > 3:
            self.fids.pop(0)

        ret = {
            'metric/fid': fid,
            'metric/fid-mean': sum(self.fids) / len(self.fids),
            'metric/fid-best': self.best_fid
        }
        if 'cityscapes' in self.opt.dataroot:
            mAP = get_mAP(fakes,
                          names,
                          self.drn_model,
                          self.device,
                          table_path=self.opt.table_path,
                          data_dir=self.opt.cityscapes_path,
                          batch_size=self.opt.eval_batch_size,
                          num_workers=self.opt.num_threads)
            if mAP > self.best_mAP:
                self.is_best = True
                self.best_mAP = mAP
            self.mAPs.append(mAP)
            if len(self.mAPs) > 3:
                self.mAPs = self.mAPs[1:]
            ret['metric/mAP'] = mAP
            ret['metric/mAP-mean'] = sum(self.mAPs) / len(self.mAPs)
            ret['metric/mAP-best'] = self.best_mAP

        self.netG.train()
        return ret
示例#8
0
        if qualified:
            for i, data_i in enumerate(dataloader):
                model.set_input(data_i)
                model.test(config)
                fakes.append(model.fake_B.cpu())
                for path in model.get_image_paths():
                    short_path = ntpath.basename(path)
                    name = os.path.splitext(short_path)[0]
                    names.append(name)

        result = {'config_str': encode_config(config), 'macs': macs}
        if not opt.no_fid:
            if qualified:
                fid = get_fid(fakes,
                              inception_model,
                              npz,
                              device,
                              opt.batch_size,
                              use_tqdm=False)
                result['fid'] = fid
            else:
                result['fid'] = 1e9
        if 'cityscapes' in opt.dataroot and opt.direction == 'BtoA':
            if qualified:
                mIoU = get_mIoU(fakes,
                                names,
                                drn_model,
                                device,
                                data_dir=opt.cityscapes_path,
                                batch_size=opt.batch_size,
                                num_workers=opt.num_threads,
                                use_tqdm=False)
示例#9
0
        for path in model.get_image_paths():
            short_path = ntpath.basename(path)
            name = os.path.splitext(short_path)[0]
            names.append(name)
        if i < opt.num_test:
            save_images(webpage, visuals, model.get_image_paths(), opt)
    webpage.save()  # save the HTML
    device = copy.deepcopy(model.device)
    del model
    torch.cuda.empty_cache()

    inception_model, drn_model, deeplabv2_model = create_metric_models(
        opt, device)
    if inception_model is not None:
        npz = np.load(opt.real_stat_path)
        fid = get_fid(fakes, inception_model, npz, device, opt.batch_size)
        print('fid score: %.2f' % fid, flush=True)

    if drn_model is not None:
        mIoU = get_cityscapes_mIoU(fakes,
                                   names,
                                   drn_model,
                                   device,
                                   table_path=opt.table_path,
                                   data_dir=opt.cityscapes_path,
                                   batch_size=opt.batch_size,
                                   num_workers=opt.num_threads)
        print('mIoU: %.2f' % mIoU)

    if deeplabv2_model is not None:
        accu, mIoU = get_coco_scores(fakes,
示例#10
0
def main(configs, opt, gpu_id, queue, verbose):
    opt.gpu_ids = [gpu_id]
    dataloader = create_dataloader(opt, verbose)
    model = create_model(opt, verbose)
    model.setup(opt, verbose)
    device = model.device
    if not opt.no_fid:
        block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
        inception_model = InceptionV3([block_idx])
        inception_model.to(device)
        inception_model.eval()
    if 'cityscapes' in opt.dataroot and opt.direction == 'BtoA':
        drn_model = DRNSeg('drn_d_105', 19, pretrained=False)
        util.load_network(drn_model, opt.drn_path, verbose=False)
        if len(opt.gpu_ids) > 0:
            drn_model = nn.DataParallel(drn_model, opt.gpu_ids)
        drn_model.eval()

    npz = np.load(opt.real_stat_path)
    results = []

    for data_i in dataloader:
        model.set_input(data_i)
        break

    for config in tqdm.tqdm(configs):
        qualified = True
        macs, _ = model.profile(config)
        if macs > opt.budget:
            qualified = False
        else:
            qualified = True

        fakes, names = [], []

        if qualified:
            for i, data_i in enumerate(dataloader):
                model.set_input(data_i)

                model.test(config)
                fakes.append(model.fake_B.cpu())
                for path in model.get_image_paths():
                    short_path = ntpath.basename(path)
                    name = os.path.splitext(short_path)[0]
                    names.append(name)

        result = {'config_str': encode_config(config), 'macs': macs}
        if not opt.no_fid:
            if qualified:
                fid = get_fid(fakes,
                              inception_model,
                              npz,
                              device,
                              opt.batch_size,
                              use_tqdm=False)
                result['fid'] = fid
            else:
                result['fid'] = 1e9
        if 'cityscapes' in opt.dataroot and opt.direction == 'BtoA':
            if qualified:
                mIoU = get_cityscapes_mIoU(fakes,
                                           names,
                                           drn_model,
                                           device,
                                           data_dir=opt.cityscapes_path,
                                           batch_size=opt.batch_size,
                                           num_workers=opt.num_threads,
                                           use_tqdm=False)
                result['mIoU'] = mIoU
            else:
                result['mIoU'] = mIoU
        print(result, flush=True)
        results.append(result)
    queue.put(results)
示例#11
0
    def training_loop(self,
                      dataloader,
                      num_iter,
                      batch_size,
                      image_size,
                      network_type='basic',
                      iter_per_tick=1000,
                      ticks_per_snapshot=10,
                      D_lr=1e-4,
                      G_lr=1e-4,
                      device=torch.device('cuda:0')):

        iter_per_tick = 10
        ticks_per_snapshot = 1
        batch_size = 256

        self.log('Starting training..')

        D, G = get_networks_by_type(network_type, image_size)

        D = D.to(device)
        G = G.to(device)

        D.apply(weights_init)
        G.apply(weights_init)

        D_optim = Adam(D.parameters(), lr=D_lr, weight_decay=5e-4)
        G_optim = Adam(G.parameters(), lr=G_lr, weight_decay=5e-4)

        D_criterion = D_logistic
        G_criterion = G_logistic_nonsaturating

        iter_counter = 0
        tick_counter = 0
        running_D_loss = 0
        running_G_loss = 0

        # Setting up grid
        grid_latents = get_grid_latents(6, 6).to(device)
        cur_iter = 0

        while cur_iter < num_iter:

            for real_batch, _ in dataloader:

                D.train()
                G.train()

                cur_iter += 1
                iter_counter += 1

                # Train discriminator
                D_optim.zero_grad()

                real_batch = real_batch.to(device)
                real_out = D(real_batch)

                latent = torch.randn(batch_size, 100)
                latent = latent.to(device)
                fake_batch = G(latent)
                fake_out = D(fake_batch.detach())

                D_loss = D_criterion(real_out, fake_out)
                D_loss.backward()
                D_optim.step()
                running_D_loss += D_loss.item()

                # Train generator
                G_optim.zero_grad()

                fake_out = D(fake_batch)
                G_loss = G_criterion(fake_out)
                G_loss.backward()
                G_optim.step()
                running_G_loss += G_loss.item()

                if iter_counter >= iter_per_tick:
                    # print(running_D_loss, iter_counter)
                    self.log(
                        '[Iteration {:07d}] D_loss: {:.5f} G_loss: {:.5f}'.
                        format(cur_iter, running_D_loss / iter_counter,
                               running_G_loss / iter_counter))
                    # Log to tensorboard
                    self.writer.add_scalar('loss/D_loss',
                                           running_D_loss / iter_counter,
                                           cur_iter)
                    self.writer.add_scalar('loss/G_loss',
                                           running_G_loss / iter_counter,
                                           cur_iter)

                    # Save grid
                    G.eval()
                    save_grid_images(G, grid_latents, 6, 6, self.run_dir,
                                     cur_iter)

                    iter_counter = 0
                    tick_counter += 1
                    running_D_loss = 0
                    running_G_loss = 0

                if tick_counter >= ticks_per_snapshot:
                    tick_counter = 0

                    # Evaluate
                    fid = get_fid(G, 'stats/fid_stats_lsun_train.npz')
                    self.log('FID: {}'.format(fid))
                    self.writer.add_scalar('loss/FID', fid, cur_iter)

                    real_fid = get_real_fid(real_batch)
                    print(real_batch.size())
                    print(real_fid)

                    # Save model
                    save_dict = {
                        'G': G.state_dict(),
                        'D': D.state_dict(),
                    }
                    torch.save(
                        save_dict,
                        os.path.join(
                            self.run_dir,
                            'network-snapshot-{:07d}.pkl'.format(cur_iter)))
                    if cur_iter >= num_iter:
                        return

        self.close()
        return
示例#12
0
 def evaluate_model(self, step):
     self.is_best = False
     save_dir = os.path.join(self.opt.log_dir, 'eval', str(step))
     os.makedirs(save_dir, exist_ok=True)
     self.netG_student.eval()
     fakes, names = [], []
     cnt = 0
     for i, data_i in enumerate(
             tqdm(self.eval_dataloader,
                  desc='Eval       ',
                  position=2,
                  leave=False)):
         if self.opt.dataset_mode == 'aligned':
             self.set_input(data_i)
         else:
             self.set_single_input(data_i)
         self.test()
         fakes.append(self.Sfake_B.cpu())
         for j in range(len(self.image_paths)):
             short_path = ntpath.basename(self.image_paths[j])
             name = os.path.splitext(short_path)[0]
             names.append(name)
             if cnt < 10:
                 input_im = util.tensor2im(self.real_A[j])
                 Sfake_im = util.tensor2im(self.Sfake_B[j])
                 Tfake_im = util.tensor2im(self.Tfake_B[j])
                 util.save_image(input_im,
                                 os.path.join(save_dir, 'input', '%s.png') %
                                 name,
                                 create_dir=True)
                 util.save_image(Sfake_im,
                                 os.path.join(save_dir, 'Sfake',
                                              '%s.png' % name),
                                 create_dir=True)
                 util.save_image(Tfake_im,
                                 os.path.join(save_dir, 'Tfake',
                                              '%s.png' % name),
                                 create_dir=True)
                 if self.opt.dataset_mode == 'aligned':
                     real_im = util.tensor2im(self.real_B[j])
                     util.save_image(real_im,
                                     os.path.join(save_dir, 'real',
                                                  '%s.png' % name),
                                     create_dir=True)
             cnt += 1
     fid = get_fid(fakes,
                   self.inception_model,
                   self.npz,
                   device=self.device,
                   batch_size=self.opt.eval_batch_size,
                   tqdm_position=2)
     if fid < self.best_fid:
         self.is_best = True
         self.best_fid = fid
     self.fids.append(fid)
     if len(self.fids) > 3:
         self.fids.pop(0)
     ret = {
         'metric/fid': fid,
         'metric/fid-mean': sum(self.fids) / len(self.fids),
         'metric/fid-best': self.best_fid
     }
     self.netG_student.train()
     return ret
示例#13
0
 fakes, names = [], []
 if qualified:
     if isinstance(model, SPADEModel):
         model.calibrate(config)
     for i, data_i in enumerate(dataloader):
         model.set_input(data_i)
         model.test(config)
         fakes.append(model.fake_B.cpu())
         for path in model.get_image_paths():
             short_path = ntpath.basename(path)
             name = os.path.splitext(short_path)[0]
             names.append(name)
 tqdm_position = 1
 if inception_model is not None:
     if qualified:
         result['fid'] = get_fid(fakes, inception_model, npz, device, opt.batch_size,
                                 tqdm_position=tqdm_position)
         tqdm_position += 1
     else:
         result['fid'] = 1e9
 if drn_model is not None:
     if qualified:
         result['mIoU'] = get_cityscapes_mIoU(fakes, names, drn_model, device, data_dir=opt.cityscapes_path,
                                              batch_size=opt.batch_size, num_workers=opt.num_threads,
                                              tqdm_position=tqdm_position)
         tqdm_position += 1
     else:
         result['mIoU'] = 0
 if deeplabv2_model is not None:
     if qualified:
         torch.cuda.empty_cache()
         result['accu'], result['mIoU'] = get_coco_scores(fakes, names, deeplabv2_model, device, opt.dataroot, 1,
    def evaluate_model(self, step):
        self.is_best = False
        save_dir = os.path.join(self.opt.log_dir, 'eval', str(step))
        os.makedirs(save_dir, exist_ok=True)
        self.netG_student.eval()
        fakes, names = [], []
        cnt = 0
        for i, data_i in enumerate(tqdm(self.eval_dataloader)):
            if self.opt.dataset_mode in ['aligned', 'triplet']:
                self.set_input(data_i)
            else:
                self.set_single_input(data_i)
            self.test()
            fakes.append(self.Sfake_B.cpu())
            for j in range(len(self.image_paths)):
                short_path = ntpath.basename(self.image_paths[j])
                name = os.path.splitext(short_path)[0]
                names.append(name)
                if cnt < 10:
                    if self.opt.input_nc == 6:
                        A, D = torch.split(self.real_A[j], 3, dim=0)
                        sample = [
                            A, D, self.real_B[j], self.Tfake_B[j],
                            self.Sfake_B[j]
                        ]
                        sample_im = util.tensor2im(torch.cat(sample, dim=2))
                        # save
                        util.save_image(sample_im,
                                        os.path.join(save_dir, 'sample',
                                                     '%s.png' % name),
                                        create_dir=True)
                        #                         if self.opt.use_motion:
                        #                             # dense motion field
                        #                             residual, identity = get_only_grids(self.netG, self.real_A[j][None,...])
                        #                             util.visualize_grid(
                        #                                 identity, residual,
                        #                                 wandb=wandb, name=name,
                        #                                 step=step, img_size=None
                        #                             )
                        if self.opt.use_wandb:
                            wandb.log(
                                {
                                    f'sample_{step}/{name}':
                                    [wandb.Image(sample_im)]
                                },
                                step=step)
                    else:
                        input_im = util.tensor2im(self.real_A[j])
                        Sfake_im = util.tensor2im(self.Sfake_B[j])
                        Tfake_im = util.tensor2im(self.Tfake_B[j])
                        util.save_image(
                            input_im,
                            os.path.join(save_dir, 'input', '%s.png') % name,
                            create_dir=True)
                        util.save_image(Sfake_im,
                                        os.path.join(save_dir, 'Sfake',
                                                     '%s.png' % name),
                                        create_dir=True)
                        util.save_image(Tfake_im,
                                        os.path.join(save_dir, 'Tfake',
                                                     '%s.png' % name),
                                        create_dir=True)
                    if self.opt.dataset_mode == 'aligned':
                        real_im = util.tensor2im(self.real_B[j])
                        util.save_image(real_im,
                                        os.path.join(save_dir, 'real',
                                                     '%s.png' % name),
                                        create_dir=True)
                cnt += 1

        fid = get_fid(fakes,
                      self.inception_model,
                      self.npz,
                      device=self.device,
                      batch_size=self.opt.eval_batch_size)
        if fid < self.best_fid:
            self.is_best = True
            self.best_fid = fid
        self.fids.append(fid)
        if len(self.fids) > 3:
            self.fids.pop(0)
        ret = {
            'metric/fid': fid,
            'metric/fid-mean': sum(self.fids) / len(self.fids),
            'metric/fid-best': self.best_fid
        }
        if 'cityscapes' in self.opt.dataroot and self.opt.direction == 'BtoA':
            mAP = get_mAP(fakes,
                          names,
                          self.drn_model,
                          self.device,
                          table_path=self.opt.table_path,
                          data_dir=self.opt.cityscapes_path,
                          batch_size=self.opt.eval_batch_size,
                          num_workers=self.opt.num_threads)
            if mAP > self.best_mAP:
                self.is_best = True
                self.best_mAP = mAP
            self.mAPs.append(mAP)
            if len(self.mAPs) > 3:
                self.mAPs = self.mAPs[1:]
            ret['metric/mAP'] = mAP
            ret['metric/mAP-mean'] = sum(self.mAPs) / len(self.mAPs)
            ret['metric/mAP-best'] = self.best_mAP
        self.netG_student.train()
        return ret