示例#1
0
def main(opt):
    if opt.model == 'mobile_resnet':
        from models.modules.resnet_architecture.mobile_resnet_generator import MobileResnetGenerator as SuperModel
        from models.modules.resnet_architecture.sub_mobile_resnet_generator import SubMobileResnetGenerator as SubModel
    elif opt.model == 'mobile_spade':
        # TODO
        raise NotImplementedError
    else:
        raise NotImplementedError('Unknown architecture [%s]!' % opt.model)

    config = decode_config(opt.config_str)

    input_nc, output_nc = opt.input_nc, opt.output_nc
    super_model = SuperModel(input_nc,
                             output_nc,
                             ngf=opt.ngf,
                             norm_layer=nn.InstanceNorm2d,
                             n_blocks=9)
    sub_model = SubModel(input_nc,
                         output_nc,
                         config=config,
                         norm_layer=nn.InstanceNorm2d,
                         n_blocks=9)

    load_network(super_model, opt.input_path)
    transfer_weight(super_model, sub_model)

    output_dir = os.path.dirname(opt.output_path)
    os.makedirs(output_dir, exist_ok=True)
    torch.save(sub_model.state_dict(), opt.output_path)
    print('Successfully export the subnet at [%s].' % opt.output_path)
示例#2
0
def create_metric_models(opt, device):
    if not opt.no_fid:
        block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
        inception_model = InceptionV3([block_idx])
        if len(opt.gpu_ids) > 1:
            inception_model = nn.DataParallel(inception_model, opt.gpu_ids)
        inception_model.to(device)
        inception_model.eval()
    else:
        inception_model = None
    if 'cityscapes' in opt.dataroot and opt.direction == 'BtoA':
        drn_model = DRNSeg('drn_d_105', 19, pretrained=False)
        util.load_network(drn_model, opt.drn_path, verbose=False)
        if len(opt.gpu_ids) > 0:
            drn_model = nn.DataParallel(drn_model, opt.gpu_ids)
        drn_model.to(device)
        drn_model.eval()
    else:
        drn_model = None
    if 'coco' in opt.dataroot and not opt.no_mIoU and opt.direction == 'BtoA':
        deeplabv2_model = MSC(DeepLabV2(n_classes=182,
                                        n_blocks=[3, 4, 23, 3],
                                        atrous_rates=[6, 12, 18, 24]),
                              scales=[0.5, 0.75])
        util.load_network(deeplabv2_model, opt.deeplabv2_path, verbose=False)
        if len(opt.gpu_ids) > 1:
            deeplabv2_model = nn.DataParallel(deeplabv2_model, opt.gpu_ids)
        deeplabv2_model.to(device)
        deeplabv2_model.eval()
    else:
        deeplabv2_model = None
    return inception_model, drn_model, deeplabv2_model
示例#3
0
 def __init__(self, opt):
     super(SPADEModel, self).__init__(opt)
     self.model_names = ['G']
     self.visual_names = ['labels', 'fake_B', 'real_B']
     self.modules = SPADEModelModules(opt).to(self.device)
     if len(opt.gpu_ids) > 0:
         self.modules = DataParallelWithCallback(self.modules, device_ids=opt.gpu_ids)
         self.modules_on_one_gpu = self.modules.module
     else:
         self.modules_on_one_gpu = self.modules
     if opt.isTrain:
         self.model_names.append('D')
         self.loss_names = ['G_gan', 'G_feat', 'G_vgg', 'D_real', 'D_fake']
         self.optimizer_G, self.optimizer_D = self.modules_on_one_gpu.create_optimizers()
         self.optimizers = [self.optimizer_G, self.optimizer_D]
         if not opt.no_fid:
             block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
             self.inception_model = InceptionV3([block_idx])
             self.inception_model.to(self.device)
             self.inception_model.eval()
         if 'cityscapes' in opt.dataroot and not opt.no_mIoU:
             self.drn_model = DRNSeg('drn_d_105', 19, pretrained=False)
             util.load_network(self.drn_model, opt.drn_path, verbose=False)
             self.drn_model.to(self.device)
             self.drn_model.eval()
         self.eval_dataloader = create_eval_dataloader(self.opt)
         self.best_fid = 1e9
         self.best_mIoU = -1e9
         self.fids, self.mIoUs = [], []
         self.is_best = False
         self.npz = np.load(opt.real_stat_path)
     else:
         self.modules.eval()
     self.train_dataloader = create_train_dataloader(opt)
示例#4
0
def main(opt):
    config = decode_config(opt.config_str)
    if opt.model == 'mobile_resnet':
        from models.modules.resnet_architecture.mobile_resnet_generator import MobileResnetGenerator as SuperModel
        from models.modules.resnet_architecture.sub_mobile_resnet_generator import SubMobileResnetGenerator as SubModel
        input_nc, output_nc = opt.input_nc, opt.output_nc
        super_model = SuperModel(input_nc,
                                 output_nc,
                                 ngf=opt.ngf,
                                 norm_layer=nn.InstanceNorm2d,
                                 n_blocks=9)
        sub_model = SubModel(input_nc,
                             output_nc,
                             config=config,
                             norm_layer=nn.InstanceNorm2d,
                             n_blocks=9)
    elif opt.model == 'mobile_spade':
        from models.modules.spade_architecture.mobile_spade_generator import MobileSPADEGenerator as SuperModel
        from models.modules.spade_architecture.sub_mobile_spade_generator import SubMobileSPADEGenerator as SubModel
        opt.norm_G = 'spadesyncbatch3x3'
        opt.num_upsampling_layers = 'more'
        opt.semantic_nc = opt.input_nc + (1 if opt.contain_dontcare_label else
                                          0) + (0 if opt.no_instance else 1)
        super_model = SuperModel(opt)
        sub_model = SubModel(opt, config)
    else:
        raise NotImplementedError('Unknown architecture [%s]!' % opt.model)

    load_network(super_model, opt.input_path)
    transfer_weight(super_model, sub_model)

    output_dir = os.path.dirname(opt.output_path)
    os.makedirs(output_dir, exist_ok=True)
    torch.save(sub_model.state_dict(), opt.output_path)
    print('Successfully export the subnet at [%s].' % opt.output_path)
示例#5
0
 def load_networks(self, verbose=True):
     if self.opt.restore_pretrained_G_path is not None:
         util.load_network(self.netG_pretrained, self.opt.restore_pretrained_G_path, verbose)
         load_pretrained_weight(self.opt.pretrained_netG, self.opt.student_netG,
                                self.netG_pretrained, self.netG_student,
                                self.opt.pretrained_ngf, self.opt.student_ngf)
         del self.netG_pretrained
     super(ResnetDistiller, self).load_networks()
示例#6
0
文件: base_model.py 项目: deJQK/CAT
 def load_networks(self,
                   verbose=True,
                   teacher_only=False,
                   restore_pretrain=True):
     for name in self.model_names:
         net = getattr(self, 'net' + name, None)
         path = getattr(self.opt, 'restore_%s_path' % name, None)
         if path is not None:
             util.load_network(net, path, verbose)
示例#7
0
def main(configs, opt, gpu_id, queue, verbose):
    opt.gpu_ids = [gpu_id]
    dataloader = create_dataloader(opt, verbose)
    model = create_model(opt, verbose)
    model.setup(opt, verbose)
    device = model.device
    if not opt.no_fid:
        block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
        inception_model = InceptionV3([block_idx])
        inception_model.to(device)
        inception_model.eval()
    if 'cityscapes' in opt.dataroot and opt.direction == 'BtoA':
        drn_model = DRNSeg('drn_d_105', 19, pretrained=False)
        util.load_network(drn_model, opt.drn_path, verbose=False)
        if len(opt.gpu_ids) > 0:
            drn_model = nn.DataParallel(drn_model, opt.gpu_ids)
        drn_model.eval()

    npz = np.load(opt.real_stat_path)
    results = []
    for config in tqdm.tqdm(configs):
        fakes, names = [], []
        for i, data_i in enumerate(dataloader):
            model.set_input(data_i)
            if i == 0:
                macs, _ = model.profile(config)
            model.test(config)
            fakes.append(model.fake_B.cpu())
            for path in model.get_image_paths():
                short_path = ntpath.basename(path)
                name = os.path.splitext(short_path)[0]
                names.append(name)

        result = {'config_str': encode_config(config), 'macs': macs}
        if not opt.no_fid:
            fid = get_fid(fakes,
                          inception_model,
                          npz,
                          device,
                          opt.batch_size,
                          use_tqdm=False)
            result['fid'] = fid
        if 'cityscapes' in opt.dataroot and opt.direction == 'BtoA':
            mAP = get_mAP(fakes,
                          names,
                          drn_model,
                          device,
                          data_dir=opt.cityscapes_path,
                          batch_size=opt.batch_size,
                          num_workers=opt.num_threads,
                          use_tqdm=False)
            result['mAP'] = mAP
        print(result, flush=True)
        # print('Time Cost: %.2fmin' % ((time.time() - start_time) / 60), flush=True)
        results.append(result)
    queue.put(results)
def main(opt):
    # define the generator with spectral normalization. Only the last argument counts
    netG = networks.define_G(opt.netG, opt=opt)
    util.load_network(netG, opt.restore_G_path, True)
    print(netG)
    netG.remove_spectral_norm()
    dirname = os.path.dirname(opt.output_path)
    os.makedirs(dirname, exist_ok=True)
    torch.save(netG.cpu().state_dict(), opt.output_path)
    print('Successfully export the model at [%s]!' % opt.output_path)
示例#9
0
    def __init__(self, opt):
        super(SPADEModel, self).__init__(opt)
        self.model_names = ['G_student', 'G_teacher', 'D']
        self.visual_names = ['labels', 'Tfake_B', 'Sfake_B', 'real_B']
        self.model_names.append('D')
        self.loss_names = [
            'G_gan', 'G_feat', 'G_vgg', 'G_distill', 'D_real', 'D_fake'
        ]
        if hasattr(opt, 'distiller'):
            self.modules = SPADEDistillerModules(opt).to(self.device)
            if len(opt.gpu_ids) > 0:
                self.modules = DataParallelWithCallback(self.modules,
                                                        device_ids=opt.gpu_ids)
                self.modules_on_one_gpu = self.modules.module
            else:
                self.modules_on_one_gpu = self.modules
        for i in range(len(self.modules_on_one_gpu.mapping_layers)):
            self.loss_names.append('G_distill%d' % i)
        self.optimizer_G, self.optimizer_D = self.modules_on_one_gpu.create_optimizers(
        )
        self.optimizers = [self.optimizer_G, self.optimizer_D]
        if not opt.no_fid:
            block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
            self.inception_model = InceptionV3([block_idx])
            self.inception_model.to(self.device)
            self.inception_model.eval()
        if 'cityscapes' in opt.dataroot and not opt.no_mIoU:
            self.drn_model = DRNSeg('drn_d_105', 19, pretrained=False)
            util.load_network(self.drn_model, opt.drn_path, verbose=False)
            self.drn_model.to(self.device)
            self.drn_model.eval()
        self.eval_dataloader = create_eval_dataloader(self.opt)
        self.best_fid = 1e9
        self.best_mIoU = -1e9
        self.fids, self.mIoUs = [], []
        self.is_best = False
        self.npz = np.load(opt.real_stat_path)

        model_profiling(self.modules_on_one_gpu.netG_teacher,
                        self.opt.data_height,
                        self.opt.data_width,
                        channel=self.opt.data_channel,
                        num_forwards=0,
                        verbose=False)
        model_profiling(self.modules_on_one_gpu.netG_student,
                        self.opt.data_height,
                        self.opt.data_width,
                        channel=self.opt.data_channel,
                        num_forwards=0,
                        verbose=False)
        print(
            f'Teacher FLOPs: {self.modules_on_one_gpu.netG_teacher.n_macs}, Student FLOPs: {self.modules_on_one_gpu.netG_student.n_macs}.'
        )
示例#10
0
 def load_networks(self, verbose=True):
     for name in self.model_names:
         net = getattr(self, 'net' + name, None)
         path = getattr(self.opt, 'restore_%s_path' % name, None)
         if path is not None:
             util.load_network(net, path, verbose)
     if self.isTrain:
         if self.opt.restore_O_path is not None:
             for i, optimizer in enumerate(self.optimizers):
                 path = '%s-%d.pth' % (self.opt.restore_O_path, i)
                 util.load_optimizer(optimizer, path, verbose)
                 for param_group in optimizer.param_groups:
                     param_group['lr'] = self.opt.lr
 def __init__(self, opt):
     assert opt.isTrain
     valid_netGs = [
         'spade', 'mobile_spade', 'super_mobile_spade', 'sub_mobile_spade'
     ]
     assert opt.teacher_netG in valid_netGs and opt.student_netG in valid_netGs
     super(SPADEModel, self).__init__(opt)
     self.model_names = ['G_student', 'G_teacher', 'D']
     self.visual_names = ['labels', 'Tfake_B', 'Sfake_B', 'real_B']
     self.model_names.append('D')
     self.loss_names = [
         'G_gan', 'G_feat', 'G_vgg', 'G_distill', 'D_real', 'D_fake'
     ]
     if hasattr(opt, 'distiller'):
         self.modules = SPADEDistillerModules(opt).to(self.device)
         if len(opt.gpu_ids) > 0:
             self.modules = DataParallelWithCallback(self.modules,
                                                     device_ids=opt.gpu_ids)
             self.modules_on_one_gpu = self.modules.module
         else:
             self.modules_on_one_gpu = self.modules
     else:
         self.modules = SPADESupernetModules(opt).to(self.device)
         if len(opt.gpu_ids) > 0:
             self.modules = DataParallelWithCallback(self.modules,
                                                     device_ids=opt.gpu_ids)
             self.modules_on_one_gpu = self.modules.module
         else:
             self.modules_on_one_gpu = self.modules
     for i in range(len(self.modules_on_one_gpu.mapping_layers)):
         self.loss_names.append('G_distill%d' % i)
     self.optimizer_G, self.optimizer_D = self.modules_on_one_gpu.create_optimizers(
     )
     self.optimizers = [self.optimizer_G, self.optimizer_D]
     if not opt.no_fid:
         block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
         self.inception_model = InceptionV3([block_idx])
         self.inception_model.to(self.device)
         self.inception_model.eval()
     if 'cityscapes' in opt.dataroot and not opt.no_mIoU:
         self.drn_model = DRNSeg('drn_d_105', 19, pretrained=False)
         util.load_network(self.drn_model, opt.drn_path, verbose=False)
         self.drn_model.to(self.device)
         self.drn_model.eval()
     self.eval_dataloader = create_eval_dataloader(self.opt)
     self.best_fid = 1e9
     self.best_mIoU = -1e9
     self.fids, self.mIoUs = [], []
     self.is_best = False
     self.npz = np.load(opt.real_stat_path)
示例#12
0
文件: test.py 项目: 25thengineer/DMAD
def test_pix2pix_mIoU(model, opt):
    opt.phase = 'val'
    opt.num_threads = 0
    opt.batch_size = 1
    opt.serial_batches = True
    opt.no_flip = True
    opt.load_size = 256
    opt.display_id = -1
    dataset = create_dataset(opt)
    model.model_eval()

    result_dir = os.path.join(opt.checkpoints_dir, opt.name, 'test_results')
    util.mkdirs(result_dir)

    fake_B = {}
    names = []
    for i, data in enumerate(dataset):
        model.set_input(data)

        with torch.no_grad():
            model.forward()

        visuals = model.get_current_visuals()
        fake_B[data['A_paths'][0]] = visuals['fake_B']

        for path in range(len(model.image_paths)):
            short_path = ntpath.basename(model.image_paths[0][0])
            name = os.path.splitext(short_path)[0]
            if name not in names:
                names.append(name)
        util.save_images(visuals,
                         model.image_paths,
                         result_dir,
                         direction=opt.direction,
                         aspect_ratio=opt.aspect_ratio)

    drn_model = DRNSeg('drn_d_105', 19, pretrained=False).to(model.device)
    util.load_network(drn_model, opt.drn_path, verbose=False)
    drn_model.eval()

    mIoU = get_mIoU(list(fake_B.values()),
                    names,
                    drn_model,
                    model.device,
                    table_path=os.path.join(opt.dataroot, 'table.txt'),
                    data_dir=opt.dataroot,
                    batch_size=opt.batch_size,
                    num_workers=opt.num_threads)
    return mIoU
示例#13
0
文件: eval.py 项目: ferrophile/fentl
def eval_data():
    opt = EvalOptions().parse()
    train_dataloader, test_dataloader = create_dataloader(opt)
    model = create_model(opt)

    if opt.which_epoch is not None:
        model = load_network(model, opt)

    print("Extracting train set features...")
    train_data = extract_features(train_dataloader, model)
    print("Extracting test set features...")
    test_data = extract_features(test_dataloader, model)

    classfiers = load_classifiers(opt)
    '''
    if opt.num_splits > 0:
        train_features, train_labels = train_data
        split_ids = np.linspace(0, len(train_labels), opt.num_splits + 1, dtype=np.int)
        for i in range(opt.num_splits):
            test_mask = np.zeros_like(train_labels, dtype=np.int)
            test_mask[split_ids[i]:split_ids[i+1]] = 1
            train_mask = 1 - test_mask

            train_split_data = (train_features[train_mask], train_labels[train_mask])
            test_split_data = (train_features[test_mask], train_labels[test_mask])

            print('Running split {:d}...'.format(i+1))
            run_classifiers(classfiers, train_split_data, test_split_data)
    else:
    '''
    run_classifiers(classfiers, train_data, test_data)

    stats, measures = get_stats(train_data)
    for m in measures.keys():
        print('{}: '.format(m), measures[m])
示例#14
0
 def load_networks(self, verbose=True):
     util.load_network(self.netG_teacher, self.opt.restore_teacher_G_path, verbose)
     if self.opt.restore_student_G_path is not None:
         util.load_network(self.netG_student, self.opt.restore_student_G_path, verbose)
         if hasattr(self, 'netG_student_tmp'):
             util.load_network(self.netG_student_tmp, self.opt.restore_student_G_path, verbose)
     if self.opt.restore_D_path is not None:
         util.load_network(self.netD, self.opt.restore_D_path, verbose)
     if self.opt.restore_A_path is not None:
         for i, netA in enumerate(self.netAs):
             path = '%s-%d.pth' % (self.opt.restore_A_path, i)
             util.load_network(netA, path, verbose)
     if self.opt.restore_O_path is not None:
         for i, optimizer in enumerate(self.optimizers):
             path = '%s-%d.pth' % (self.opt.restore_O_path, i)
             util.load_optimizer(optimizer, path, verbose)
             for param_group in optimizer.param_groups:
                 param_group['lr'] = self.opt.lr
示例#15
0
 def load_networks(self, verbose=True):
     util.load_network(self.netG_teacher, self.opt.restore_teacher_G_path,
                       verbose)
     if self.opt.restore_student_G_path is not None:
         util.load_network(self.netG_student,
                           self.opt.restore_student_G_path, verbose)
     if self.opt.restore_D_path is not None:
         util.load_network(self.netD, self.opt.restore_D_path, verbose)
     if self.opt.restore_A_path is not None:
         for i, netA in enumerate(self.netAs):
             path = '%s-%d.pth' % (self.opt.restore_A_path, i)
             util.load_network(netA, path, verbose)
示例#16
0
def main(cfgs):
    fluid.enable_imperative()
    config = decode_config(args.config_str)
    if args.model == 'mobile_resnet':
        from model.mobile_generator import MobileResnetGenerator as SuperModel
        from model.sub_mobile_generator import SubMobileResnetGenerator as SubModel
        input_nc, output_nc = args.input_nc, args.output_nc
        super_model = SuperModel(input_nc, output_nc, ngf=args.ngf, norm_layer=InstanceNorm, n_blocks=9)
        sub_model = SubModel(input_nc, output_nc, config=config, norm_layer=InstanceNorm, n_blocks=9)
    else:
        raise NotImplementedError

    load_network(super_model, args.input_path)
    transfer_weight(super_model, sub_model)

    if not os.path.exists(args.output_path):
        os.makedirs(args.output_path)
    save_path = os.path.join(args.output_path, 'final_net')
    fluid.save_dygraph(sub_model.state_dict(), save_path)
    print('Successfully export the subnet at [%s].' % save_path)
    def load_networks(self, model_weight=None):
        if self.cfgs.restore_pretrained_G_path != False:
            if self.cfgs.restore_pretrained_G_path != None:
                pretrained_G_path = self.cfgs.restore_pretrained_G_path
                util.load_network(self.netG_pretrained, pretrained_G_path)
            else:
                assert len(
                    model_weight
                ) != 0, "restore_pretrained_G_path and model_weight can not be None at the same time"
                if self.cfgs.direction == 'AtoB':
                    self.netG_pretrained.set_dict(
                        model_weight['netG_A'] or model_weight['netG_teacher'])
                else:
                    self.netG_pretrained.set_dict(
                        model_weight['netG_B'] or model_weight['netG_teacher'])

            load_pretrained_weight(self.cfgs.pretrained_netG,
                                   self.cfgs.distiller_student_netG,
                                   self.netG_pretrained, self.netG_student,
                                   self.cfgs.pretrained_ngf,
                                   self.cfgs.student_ngf)
            del self.netG_pretrained

        super(ResnetDistiller, self).load_networks(model_weight)
示例#18
0
    def load_networks(self, model_weight=None):
        if self.cfgs.restore_teacher_G_path is None:
            assert len(
                model_weight
            ) != 0, "restore_teacher_G_path and model_weight cannot be None at the same time."
            if self.cfgs.direction == 'AtoB':
                key = 'netG_A' if 'netG_A' in model_weight else 'netG_teacher'
                self.netG_teacher.set_dict(model_weight[key])
            else:
                key = 'netG_B' if 'netG_B' in model_weight else 'netG_teacher'
                self.netG_teacher.set_dict(model_weight[key])
        else:
            util.load_network(self.netG_teacher, self.cfgs.teacher_G_path)

        if self.cfgs.restore_student_G_path is not None:
            util.load_network(self.netG_student,
                              self.cfgs.restore_student_G_path)
        else:
            if self.task == 'supernet':
                self.netG_student.set_dict(model_weight['netG_student'])

        if self.cfgs.restore_D_path is not None:
            util.load_network(self.netD, self.cfgs.restore_D_path)
        else:
            if self.cfgs.direction == 'AtoB':
                key = 'netD_A' if 'netD_A' in model_weight else 'netD'
                self.netD.set_dict(model_weight[key])
            else:
                key = 'netD_B' if 'netD_B' in model_weight else 'netD'
                self.netD.set_dict(model_weight[key])

        if self.cfgs.restore_A_path is not None:
            for i, netA in enumerate(self.netAs):
                netA_path = '%s-%d.pth' % (self.cfgs.restore_A_path, i)
                util.load_network(netA, netA_path)

        if self.cfgs.restore_O_path is not None:
            util.load_optimizer(self.optimizer_G,
                                self.cfgs.restore_G_optimizer_path)
            util.load_optimizer(self.optimizer_D,
                                self.cfgs.restore_D_optimizer_path)
示例#19
0
    def __init__(self, opt):
        """Initialize the pix2pix class.

        Parameters:
            opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        assert opt.isTrain
        BaseModel.__init__(self, opt)
        # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
        self.loss_names = ['G_gan', 'G_recon', 'D_real', 'D_fake']
        # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
        self.visual_names = ['real_A', 'fake_B', 'real_B']
        # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
        self.model_names = ['G', 'D']
        # define networks (both generator and discriminator)
        self.netG = networks.define_G(opt.input_nc,
                                      opt.output_nc,
                                      opt.ngf,
                                      opt.netG,
                                      opt.norm,
                                      opt.dropout_rate,
                                      opt.init_type,
                                      opt.init_gain,
                                      self.gpu_ids,
                                      opt=opt)

        self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf,
                                      opt.netD, opt.n_layers_D, opt.norm,
                                      opt.init_type, opt.init_gain,
                                      self.gpu_ids)

        # define loss functions
        self.criterionGAN = GANLoss(opt.gan_mode).to(self.device)
        if opt.recon_loss_type == 'l1':
            self.criterionRecon = torch.nn.L1Loss()
        elif opt.recon_loss_type == 'l2':
            self.criterionRecon = torch.nn.MSELoss()
        elif opt.recon_loss_type == 'smooth_l1':
            self.criterionRecon = torch.nn.SmoothL1Loss()
        else:
            raise NotImplementedError(
                'Unknown reconstruction loss type [%s]!' % opt.loss_type)
        # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
        self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                            lr=opt.lr,
                                            betas=(opt.beta1, 0.999))
        self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=opt.lr,
                                            betas=(opt.beta1, 0.999))
        self.optimizers = []
        self.optimizers.append(self.optimizer_G)
        self.optimizers.append(self.optimizer_D)

        self.eval_dataloader = create_eval_dataloader(self.opt)

        block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
        self.inception_model = InceptionV3([block_idx])
        self.inception_model.to(self.device)
        self.inception_model.eval()

        if 'cityscapes' in opt.dataroot:
            self.drn_model = DRNSeg('drn_d_105', 19, pretrained=False)
            util.load_network(self.drn_model, opt.drn_path, verbose=False)
            if len(opt.gpu_ids) > 0:
                self.drn_model.to(self.device)
                self.drn_model = nn.DataParallel(self.drn_model, opt.gpu_ids)
            self.drn_model.eval()

        self.best_fid = 1e9
        self.best_mIoU = -1e9
        self.fids, self.mIoUs = [], []
        self.is_best = False
        self.Tacts, self.Sacts = {}, {}
        self.npz = np.load(opt.real_stat_path)
示例#20
0
    def __init__(self, opt):
        """Initialize the CycleGAN class.

        Parameters:
            opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        assert opt.isTrain
        assert opt.direction == 'AtoB'
        assert opt.dataset_mode == 'unaligned'
        BaseModel.__init__(self, opt)
        # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
        self.loss_names = [
            'D_A', 'G_A', 'G_cycle_A', 'G_idt_A', 'D_B', 'G_B', 'G_cycle_B',
            'G_idt_B'
        ]
        # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
        visual_names_A = ['real_A', 'fake_B', 'rec_A']
        visual_names_B = ['real_B', 'fake_A', 'rec_B']
        if self.opt.lambda_identity > 0.0:  # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)
            visual_names_A.append('idt_B')
            visual_names_B.append('idt_A')

        self.visual_names = visual_names_A + visual_names_B  # combine visualizations for A and B
        # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.
        self.model_names = ['G_A', 'G_B', 'D_A', 'D_B']

        # define networks (both Generators and discriminators)
        # The naming is different from those used in the paper.
        # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)
        self.netG_A = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
                                        opt.netG, opt.norm, opt.dropout_rate,
                                        opt.init_type, opt.init_gain,
                                        self.gpu_ids)
        self.netG_B = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf,
                                        opt.netG, opt.norm, opt.dropout_rate,
                                        opt.init_type, opt.init_gain,
                                        self.gpu_ids)

        self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
                                        opt.n_layers_D, opt.norm,
                                        opt.init_type, opt.init_gain,
                                        self.gpu_ids)
        self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.netD,
                                        opt.n_layers_D, opt.norm,
                                        opt.init_type, opt.init_gain,
                                        self.gpu_ids)

        if opt.lambda_identity > 0.0:  # only works when input and output images have the same number of channels
            assert (opt.input_nc == opt.output_nc)
        self.fake_A_pool = ImagePool(
            opt.pool_size
        )  # create image buffer to store previously generated images
        self.fake_B_pool = ImagePool(
            opt.pool_size
        )  # create image buffer to store previously generated images

        # define loss functions
        self.criterionGAN = models.modules.loss.GANLoss(opt.gan_mode).to(
            self.device)  # define GAN loss.
        self.criterionCycle = torch.nn.L1Loss()
        self.criterionIdt = torch.nn.L1Loss()

        # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
        self.optimizer_G = torch.optim.Adam(itertools.chain(
            self.netG_A.parameters(), self.netG_B.parameters()),
                                            lr=opt.lr,
                                            betas=(opt.beta1, 0.999))
        self.optimizer_D = torch.optim.Adam(itertools.chain(
            self.netD_A.parameters(), self.netD_B.parameters()),
                                            lr=opt.lr,
                                            betas=(opt.beta1, 0.999))

        self.optimizers = []
        self.optimizers.append(self.optimizer_G)
        self.optimizers.append(self.optimizer_D)

        self.eval_dataloader_AtoB = create_eval_dataloader(self.opt,
                                                           direction='AtoB')
        self.eval_dataloader_BtoA = create_eval_dataloader(self.opt,
                                                           direction='BtoA')

        block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
        self.inception_model = InceptionV3([block_idx])
        self.inception_model.to(self.device)
        self.inception_model.eval()

        if 'cityscapes' in opt.dataroot:
            self.drn_model = DRNSeg('drn_d_105', 19, pretrained=False)
            util.load_network(self.drn_model, opt.drn_path, verbose=False)
            if len(opt.gpu_ids) > 0:
                self.drn_model = nn.DataParallel(self.drn_model, opt.gpu_ids)
            self.drn_model.eval()

        self.best_fid_A, self.best_fid_B = 1e9, 1e9
        self.best_mIoU = -1e9
        self.fids_A, self.fids_B = [], []
        self.mIoUs = []
        self.is_best = False
        self.npz_A = np.load(opt.real_stat_A_path)
        self.npz_B = np.load(opt.real_stat_B_path)
示例#21
0
    configs = get_configs(config_name=opt.config_set)
    configs = list(configs.all_configs())

    dataloader = create_dataloader(opt)
    model = create_model(opt)
    model.setup(opt)
    device = model.device

    if not opt.no_fid:
        block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
        inception_model = InceptionV3([block_idx])
        inception_model.to(device)
        inception_model.eval()
    if 'cityscapes' in opt.dataroot and opt.direction == 'BtoA':
        drn_model = DRNSeg('drn_d_105', 19, pretrained=False)
        util.load_network(drn_model, opt.drn_path, verbose=False)
        if len(opt.gpu_ids) > 0:
            drn_model = nn.DataParallel(drn_model, opt.gpu_ids)
        drn_model.eval()

    npz = np.load(opt.real_stat_path)
    results = []

    for data_i in dataloader:
        model.set_input(data_i)
        break

    for config in tqdm.tqdm(configs):
        qualified = True
        macs, _ = model.profile(config)
        if macs > opt.budget:
示例#22
0
    def __init__(self, opt):
        assert opt.isTrain
        super(BaseResnetDistiller, self).__init__(opt)
        self.loss_names = ['G_gan', 'G_distill', 'G_recon', 'D_fake', 'D_real']
        self.optimizers = []
        self.image_paths = []
        self.visual_names = ['real_A', 'Sfake_B', 'Tfake_B', 'real_B']
        self.model_names = ['netG_student', 'netG_teacher', 'netD']
        self.netG_teacher = networks.define_G(opt.input_nc, opt.output_nc, opt.teacher_ngf,
                                              opt.teacher_netG, opt.norm, opt.teacher_dropout_rate,
                                              opt.init_type, opt.init_gain, self.gpu_ids, opt=opt)
        self.netG_student = networks.define_G(opt.input_nc, opt.output_nc, opt.student_ngf,
                                              opt.student_netG, opt.norm, opt.student_dropout_rate,
                                              opt.init_type, opt.init_gain, self.gpu_ids, opt=opt)

        if getattr(opt, 'sort_channels', False) and opt.restore_student_G_path is not None:
            self.netG_student_tmp = networks.define_G(opt.input_nc, opt.output_nc, opt.student_ngf,
                                                      opt.student_netG.replace('super_', ''), opt.norm,
                                                      opt.student_dropout_rate, opt.init_type, opt.init_gain,
                                                      self.gpu_ids, opt=opt)
        if hasattr(opt, 'distiller'):
            self.netG_pretrained = networks.define_G(opt.input_nc, opt.output_nc, opt.pretrained_ngf,
                                                     opt.pretrained_netG, opt.norm, 0,
                                                     opt.init_type, opt.init_gain, self.gpu_ids, opt=opt)

        if opt.dataset_mode == 'aligned':
            self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
                                          opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
        elif opt.dataset_mode == 'unaligned':
            self.netD = networks.define_D(opt.output_nc, opt.ndf, opt.netD,
                                          opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
        else:
            raise NotImplementedError('Unknown dataset mode [%s]!!!' % opt.dataset_mode)

        self.netG_teacher.eval()
        self.criterionGAN = models.modules.loss.GANLoss(opt.gan_mode).to(self.device)
        if opt.recon_loss_type == 'l1':
            self.criterionRecon = torch.nn.L1Loss()
        elif opt.recon_loss_type == 'l2':
            self.criterionRecon = torch.nn.MSELoss()
        elif opt.recon_loss_type == 'smooth_l1':
            self.criterionRecon = torch.nn.SmoothL1Loss()
        elif opt.recon_loss_type == 'vgg':
            self.criterionRecon = models.modules.loss.VGGLoss(self.device)
        else:
            raise NotImplementedError('Unknown reconstruction loss type [%s]!' % opt.loss_type)

        if isinstance(self.netG_teacher, nn.DataParallel):
            self.mapping_layers = ['module.model.%d' % i for i in range(9, 21, 3)]
        else:
            self.mapping_layers = ['model.%d' % i for i in range(9, 21, 3)]

        self.netAs = []
        self.Tacts, self.Sacts = {}, {}

        G_params = [self.netG_student.parameters()]
        for i, n in enumerate(self.mapping_layers):
            ft, fs = self.opt.teacher_ngf, self.opt.student_ngf
            if hasattr(opt, 'distiller'):
                netA = nn.Conv2d(in_channels=fs * 4, out_channels=ft * 4, kernel_size=1). \
                    to(self.device)
            else:
                netA = SuperConv2d(in_channels=fs * 4, out_channels=ft * 4, kernel_size=1). \
                    to(self.device)
            networks.init_net(netA)
            G_params.append(netA.parameters())
            self.netAs.append(netA)
            self.loss_names.append('G_distill%d' % i)

        self.optimizer_G = torch.optim.Adam(itertools.chain(*G_params), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
        self.optimizers.append(self.optimizer_G)
        self.optimizers.append(self.optimizer_D)

        self.eval_dataloader = create_eval_dataloader(self.opt, direction=opt.direction)

        block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
        self.inception_model = InceptionV3([block_idx])
        self.inception_model.to(self.device)
        self.inception_model.eval()

        if 'cityscapes' in opt.dataroot:
            self.drn_model = DRNSeg('drn_d_105', 19, pretrained=False)
            util.load_network(self.drn_model, opt.drn_path, verbose=False)
            if len(opt.gpu_ids) > 0:
                self.drn_model.to(self.device)
                self.drn_model = nn.DataParallel(self.drn_model, opt.gpu_ids)
            self.drn_model.eval()

        self.npz = np.load(opt.real_stat_path)
        self.is_best = False
示例#23
0
    def __init__(self, opt):
        """Initialize the pix2pix class.

        Parameters:
            opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        assert opt.isTrain
        BaseModel.__init__(self, opt)
        self.loss_names = [
            'G_gan', 'G_recon', 'D_real', 'D_fake', 'G_comp_cost'
        ]
        self.visual_names = ['real_A', 'fake_B', 'real_B']
        self.model_names = ['G', 'D']
        self.netG = networks.define_G(opt.input_nc,
                                      opt.output_nc,
                                      opt.ngf,
                                      opt.netG,
                                      opt.norm,
                                      opt.dropout_rate,
                                      opt.init_type,
                                      opt.init_gain,
                                      self.gpu_ids,
                                      opt=opt)

        self.netD = networks.define_D(opt.input_nc + opt.output_nc,
                                      opt.ndf,
                                      opt.netD,
                                      opt.n_layers_D,
                                      opt.norm,
                                      opt.init_type,
                                      opt.init_gain,
                                      self.gpu_ids,
                                      opt=opt)

        self.criterionGAN = GANLoss(opt.gan_mode).to(self.device)
        if opt.recon_loss_type == 'l1':
            self.criterionRecon = torch.nn.L1Loss()
        elif opt.recon_loss_type == 'l2':
            self.criterionRecon = torch.nn.MSELoss()
        elif opt.recon_loss_type == 'smooth_l1':
            self.criterionRecon = torch.nn.SmoothL1Loss()
        else:
            raise NotImplementedError(
                'Unknown reconstruction loss type [%s]!' % opt.loss_type)
        self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                            lr=opt.lr,
                                            betas=(opt.beta1, 0.999))
        self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                            lr=opt.lr,
                                            betas=(opt.beta1, 0.999))
        self.optimizers = []
        self.optimizers.append(self.optimizer_G)
        self.optimizers.append(self.optimizer_D)

        self.eval_dataloader = create_eval_dataloader(self.opt)

        block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
        self.inception_model = InceptionV3([block_idx])
        self.inception_model.to(self.device)
        self.inception_model.eval()

        if 'cityscapes' in opt.dataroot:
            self.drn_model = DRNSeg('drn_d_105', 19, pretrained=False)
            util.load_network(self.drn_model, opt.drn_path, verbose=False)
            if len(opt.gpu_ids) > 0:
                self.drn_model.to(self.device)
                self.drn_model = nn.DataParallel(self.drn_model, opt.gpu_ids)
            self.drn_model.eval()

        self.best_fid = 1e9
        self.best_mIoU = -1e9
        self.fids, self.mIoUs = [], []
        self.is_best = False
        self.Tacts, self.Sacts = {}, {}
        self.npz = np.load(opt.real_stat_path)
示例#24
0
        default='/home/hy/vscode/reid-custom/experiments/Market1501')
    parser.add_argument('--batch_size',
                        default=512,
                        type=int,
                        help='batchsize')
    parser.add_argument('--share_conv', default=False, action='store_true')
    args = parser.parse_args()

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Make saving directory
    save_dir_path = os.path.join(args.save_path, args.dataset)
    os.makedirs(save_dir_path, exist_ok=True)

    logger = util.Logger(save_dir_path)
    logger.info(vars(args))

    model = build_model(args.experiment,
                        num_classes=1,
                        hare_conv=args.share_conv)
    model = util.load_network(model, args.checkpoint, args.which_epoch)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = model.to(device)

    CMC, mAP = test(model, args.dataset, args.dataset_path, args.batch_size)

    logger.info('Testing: top1:%.2f top5:%.2f top10:%.2f mAP:%.2f' %
                (CMC[0], CMC[4], CMC[9], mAP))

    # torch.cuda.empty_cache()
 def load_network(self):
     for name in self.model_names:
         net = getattr(self, name, None)
         path = getattr(self.cfgs, 'restore_%s_path' % name[3:], None)
         if path is not None:
             util.load_network(net, path)