Exemple #1
0
# Load experiment setting
with open(opts.config, 'r') as stream:
    config = yaml.load(stream, Loader=yaml.FullLoader)

# Activate GPUs
config['gpu_ids'] = opts.gpu_ids
gpu_info = trainer_util.activate_gpus(config)

# Get data loaders
cfg_val_loader = config['val_dataloader']
cfg_test_loader1 = config['test_dataloader1']
cfg_test_loader2 = config['test_dataloader2']
cfg_test_loader3 = config['test_dataloader3']
cfg_test_loader4 = config['test_dataloader4']

val_loader = trainer_util.get_dataloader(cfg_val_loader['dataset_args'],
                                         cfg_val_loader['dataloader_args'])
test_loader1 = trainer_util.get_dataloader(cfg_test_loader1['dataset_args'],
                                           cfg_test_loader1['dataloader_args'])
test_loader2 = trainer_util.get_dataloader(cfg_test_loader2['dataset_args'],
                                           cfg_test_loader2['dataloader_args'])
test_loader3 = trainer_util.get_dataloader(cfg_test_loader3['dataset_args'],
                                           cfg_test_loader3['dataloader_args'])
test_loader4 = trainer_util.get_dataloader(cfg_test_loader4['dataset_args'],
                                           cfg_test_loader4['dataloader_args'])

# get model
if 'vgg' in config['model']['architecture'] and 'guided' in config['model'][
        'architecture']:
    diss_model = GuidedDissimNet(**config['model']).cuda()
if 'vgg' in config['model']['architecture'] and 'correlated' in config[
        'model']['architecture']:
Exemple #2
0
def mae_features(config_file_path, gpu_ids, dataroot, data_origin):
    
    soft_fdr = os.path.join(dataroot, 'mae_features_' + data_origin)
    
    if not os.path.exists(soft_fdr):
        os.makedirs(soft_fdr)

    # load experiment setting
    with open(config_file_path, 'r') as stream:
        config = yaml.load(stream, Loader=yaml.FullLoader)
    
    # activate GPUs
    config['gpu_ids'] = gpu_ids
    gpu = int(gpu_ids)
    
    # get data_loaders
    cfg_test_loader = config['test_dataloader']
    cfg_test_loader['dataset_args']['dataroot'] = dataroot
    test_loader = trainer_util.get_dataloader(cfg_test_loader['dataset_args'], cfg_test_loader['dataloader_args'])
    
    class VGG19(torch.nn.Module):
        def __init__(self, requires_grad=False):
            super().__init__()
            vgg_pretrained_features = torchvision.models.vgg19(pretrained=True).features
    
            self.slice1 = torch.nn.Sequential()
            self.slice2 = torch.nn.Sequential()
            self.slice3 = torch.nn.Sequential()
            self.slice4 = torch.nn.Sequential()
            self.slice5 = torch.nn.Sequential()
            for x in range(2):
                self.slice1.add_module(str(x), vgg_pretrained_features[x])
            for x in range(2, 7):
                self.slice2.add_module(str(x), vgg_pretrained_features[x])
            for x in range(7, 12):
                self.slice3.add_module(str(x), vgg_pretrained_features[x])
            for x in range(12, 21):
                self.slice4.add_module(str(x), vgg_pretrained_features[x])
            for x in range(21, 30):
                self.slice5.add_module(str(x), vgg_pretrained_features[x])
            if not requires_grad:
                for param in self.parameters():
                    param.requires_grad = False
    
        def forward(self, X):
            h_relu1 = self.slice1(X)
            h_relu2 = self.slice2(h_relu1)
            h_relu3 = self.slice3(h_relu2)
            h_relu4 = self.slice4(h_relu3)
            h_relu5 = self.slice5(h_relu4)
            out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
            return out
        
    from  torch.nn.modules.upsampling import Upsample
    up5 = Upsample(scale_factor=16, mode='bicubic')
    up4 = Upsample(scale_factor=8, mode='bicubic')
    up3 = Upsample(scale_factor=4, mode='bicubic')
    up2 = Upsample(scale_factor=2, mode='bicubic')
    up1 = Upsample(scale_factor=1, mode='bicubic')
    to_pil = ToPILImage()
    
    # Going through visualization loader
    weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
    vgg = VGG19().cuda(gpu)
    
    with torch.no_grad():
        for i, data_i in enumerate(test_loader):
            print('Generating image %i out of %i'%(i+1, len(test_loader)))
            img_name = os.path.basename(data_i['original_path'][0])
            original = data_i['original'].cuda(gpu)
            synthesis = data_i['synthesis'].cuda(gpu)
            
            x_vgg, y_vgg = vgg(original), vgg(synthesis)
            feat5 = torch.mean(torch.abs(x_vgg[4] - y_vgg[4]), dim=1).unsqueeze(1)
            feat4 = torch.mean(torch.abs(x_vgg[3] - y_vgg[3]), dim=1).unsqueeze(1)
            feat3 = torch.mean(torch.abs(x_vgg[2] - y_vgg[2]), dim=1).unsqueeze(1)
            feat2 = torch.mean(torch.abs(x_vgg[1] - y_vgg[1]), dim=1).unsqueeze(1)
            feat1 = torch.mean(torch.abs(x_vgg[0] - y_vgg[0]), dim=1).unsqueeze(1)
            
            img_5 = up5(feat5)
            img_4 = up4(feat4)
            img_3 = up3(feat3)
            img_2 = up2(feat2)
            img_1 = up1(feat1)
            
            combined = weights[0] * img_1 + weights[1] * img_2 + weights[2] * img_3 + weights[3] * img_4 + weights[
                4] * img_5
            min_v = torch.min(combined.squeeze())
            max_v = torch.max(combined.squeeze())
            combined = (combined.squeeze() - min_v) / (max_v - min_v)
    
            combined = to_pil(combined.cpu())
            pred_name = 'mea_' + img_name
            combined.save(os.path.join(soft_fdr, pred_name))
Exemple #3
0
if not os.path.isdir(os.path.join(store_fdr_exp, 'pred')):
    os.makedirs(os.path.join(store_fdr_exp, 'label'), exist_ok=True)
    os.makedirs(os.path.join(store_fdr_exp, 'pred'), exist_ok=True)
    os.makedirs(os.path.join(store_fdr_exp, 'soft'), exist_ok=True)

# Activate GPUs
config['gpu_ids'] = opts.gpu_ids
gpu_info = trainer_util.activate_gpus(config)

# checks if we are using prior images
prior = config['model']['prior']
# Get data loaders
cfg_test_loader = config['test_dataloader']
# adds logic to dataloaders (avoid repetition in config file)
cfg_test_loader['dataset_args']['prior'] = prior
test_loader = trainer_util.get_dataloader(cfg_test_loader['dataset_args'],
                                          cfg_test_loader['dataloader_args'])

# get model
if config['model']['prior']:
    diss_model = DissimNetPrior(**config['model']).cuda()
elif 'vgg' in config['model']['architecture']:
    diss_model = DissimNet(**config['model']).cuda()
else:
    raise NotImplementedError()

diss_model.eval()
model_path = os.path.join(save_fdr, exp_name,
                          '%s_net_%s.pth' % (epoch, exp_name))
model_weights = torch.load(model_path)
diss_model.load_state_dict(model_weights)