Beispiel #1
0
def get_loader(config, split):
    if config.norm_ways is None:
        normalizations = None
    else:
        normalizations = image_normalizations(config.norm_ways)

    if split == 'test':
        test_dataset = dataset_generalize(config,
                                          split=split,
                                          normalizations=normalizations)
        test_loader = TD.DataLoader(dataset=test_dataset,
                                    batch_size=config.batch_size,
                                    shuffle=False,
                                    drop_last=False)

        return test_loader
    else:
        assert split in ['train', 'val']
        if config.augmentation and split == 'train':
            augmentations = Augmentations(config)
        else:
            augmentations = None

        dataset = dataset_generalize(config,
                                     split=split,
                                     augmentations=augmentations,
                                     normalizations=normalizations)
        loader = TD.DataLoader(dataset=dataset,
                               batch_size=config.batch_size,
                               shuffle=(split == 'train'),
                               drop_last=(split == 'train'),
                               num_workers=config.batch_size)

        return loader
Beispiel #2
0
def test_dataset(dataset_name):
    config = edict()
    config.dataset_name = dataset_name
    config.with_path = True
    config.input_shape = (512, 1024)
    config.max_crop_size = (1024, 2048)
    config.aug_library = 'imgaug'

    config = get_dataset_generalize_config(config, dataset_name)

    config = get_default_augmentor_config(config)
    augmentations = Augmentations(config)
    dataset = dataset_generalize(config,
                                 split='train',
                                 augmentations=augmentations)

    N = len(dataset)

    idx = np.random.randint(0, N)

    data = dataset.__getitem__(idx)
    img, ann = data['image']
    img = img.transpose((1, 2, 0))
    ann = add_color_map(ann)
    img_path, ann_path = data['filename']

    ori_img = cv2.imread(img_path, cv2.IMREAD_COLOR)
    ori_img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2RGB)

    ori_ann = read_ann_file(ann_path)
    ori_ann = add_color_map(ori_ann)
    show_images([img, ann, ori_img, ori_ann],
                ['img', 'ann', 'origin img', 'origin ann'])
Beispiel #3
0
def class_weight_count(dataset_name, split='train'):
    config = get_dataset_generalize_config(None, dataset_name)
    dataset = dataset_generalize(config, split=split, bchw=False)

    N = len(dataset)
    class_number = len(config.foreground_class_ids)
    COUNT = dataset_class_count(class_number)
    for idx in trange(N):
        img, ann = dataset.__getitem__(idx)
        COUNT.update(ann)

    count = COUNT.summary()
    return count
Beispiel #4
0
def static_dataset(dataset_name, split='train'):
    config = get_dataset_generalize_config(None, dataset_name)
    dataset = dataset_generalize(config, split=split, bchw=False)

    N = len(dataset)
    MEAN = dataset_mean()
    for idx in trange(N):
        img, ann = dataset.__getitem__(idx)
        MEAN.update(img)

    mean = MEAN.summary()

    STD = dataset_std(mean)
    for idx in trange(N):
        img, ann = dataset.__getitem__(idx)
        STD.update(img)

    std = STD.summary()

    return mean, std
Beispiel #5
0
def test_dataset_loader(dataset_name):
    config = edict()
    config.dataset_name = dataset_name
    config.print_path = True
    config.input_shape = (224, 224)
    config.ignore_index = 255
    config.with_edge = False
    config.batch_size = 2
    config = get_dataset_generalize_config(config, dataset_name)

    config = get_default_augmentor_config(config)
    augmentations = Augmentations(config)
    dataset = dataset_generalize(config,
                                 split='train',
                                 augmentations=augmentations)
    loader = TD.DataLoader(dataset=dataset,
                           batch_size=config.batch_size,
                           shuffle=True,
                           drop_last=False)
    plt.ion()
    for i, data in enumerate(loader):
        imgs, labels = data
        print(i, imgs.shape, labels.shape)

        show_tensor_list([imgs], ['img'])
        show_tensor_list([labels], ['labels'])

        np_labels = labels.data.cpu().numpy()
        print('label id: ', np.unique(np_labels))

        image_list = np.split(np_labels, config.batch_size)
        image_list = [np.squeeze(img) for img in image_list]
        image_list = [add_color_map(img) for img in image_list]
        show_images(image_list, ['label'] * config.batch_size)
        if i > 1:
            break

    plt.ioff()
    plt.show()
    def test_cityscapes_category(self):
        """
        the cityscapes category id != huawei class index, need remap them
        
        and the cityscape dataset may ignore some object like parking
        (class id=9, category=flat ...)
        
        Returns
        -------
        None.

        """
        config=get_dataset_generalize_config(None,'Cityscapes_Category')
        config.with_path=True
        cat_val_dataset = dataset_generalize(config,
                                     split='val',
                                     augmentations=None,
                                     normalizations=None)
        
        config=get_dataset_generalize_config(None,'Cityscapes_Fine')
        config.with_path=True
        class_val_dataset = dataset_generalize(config,
                                     split='val',
                                     augmentations=None,
                                     normalizations=None)
        
        assert len(cat_val_dataset)==len(class_val_dataset)
        N=len(cat_val_dataset)
        
        for key,value in id2catId.items():
            print('key={}, value={}'.format(key,value))
            
        for idx in range(min(N,3)):
            cat_data=cat_val_dataset.__getitem__(idx)
            cat_img,cat_ann=cat_data['image']
            class_data=class_val_dataset.__getitem__(idx)
            class_img,class_ann=class_data['image']
            
            assert cat_data['filename'][1]==class_data['filename'][1]
            origin_ann=Image.open(class_data['filename'][1])
            origin_ann=np.array(origin_ann,np.uint8)
            
            class2cat=origin_ann.copy()
            for key,value in id2catId.items():
                if key>=0:
                    class2cat[origin_ann==key]=value
                    
            for class_id in config.foreground_class_ids:
                print('class_id={},catId={}'.format(class_id,id2catId[class_id]))
            
            class2cat[origin_ann==0]=255
            class2cat[origin_ann!=255]-=1
            
            diff=(class2cat!=cat_ann)
            negative=(origin_ann==255)
            print(np.unique(origin_ann[diff]))
            cat_img=cat_img.transpose((1,2,0))
            class_img=class_img.transpose((1,2,0))
            cat_ann=add_color_map(cat_ann)
            class_ann=add_color_map(class_ann)
            class2cat=add_color_map(class2cat)
            show_images([cat_img,cat_ann,class_img,class_ann,class2cat,diff,negative],['cat','cat','class','class','class2cat','diff','negative'])
Beispiel #7
0
        if config.norm_ways is None:
            normalizations = None
        else:
            normalizations = image_normalizations(config.norm_ways)

        if config.augmentation:
            augmentations = Augmentations(config)
        else:
            augmentations = None

        train_datasets = []
        for dataset_name in ['Cityscapes_Category', 'HuaWei']:
            config = get_dataset_generalize_config(config, dataset_name)
            train_dataset = dataset_generalize(config,
                                               split='train',
                                               augmentations=augmentations,
                                               normalizations=normalizations)
            train_datasets.append(train_dataset)

        merge_dataset = TD.ConcatDataset(train_datasets)
        train_loader = TD.DataLoader(dataset=merge_dataset,
                                     batch_size=batch_size,
                                     shuffle=True,
                                     drop_last=True,
                                     num_workers=8)

        config = get_dataset_generalize_config(config, "HuaWei")
        val_dataset = dataset_generalize(config,
                                         split='val',
                                         augmentations=None,
                                         normalizations=normalizations)
Beispiel #8
0
experiment on downsample and miou
"""

from torchseg.dataset.dataset_generalize import dataset_generalize, \
    get_dataset_generalize_config

from torchseg.utils.metrics import runningScore
import cv2
from easydict import EasyDict as edict
if __name__ == '__main__':
    config = edict()
    config.with_edge = False
    config = get_dataset_generalize_config(config, 'HuaWei')

    val_dataset = dataset_generalize(config,
                                     split='val',
                                     augmentations=None,
                                     normalizations=None)

    N = len(val_dataset)

    if config.ignore_index == 0:
        config.class_number = len(config.foreground_class_ids) + 1
    else:
        config.class_number = len(config.foreground_class_ids)

    metric = runningScore(config.class_number)
    print('| scale | miou |')
    print('| -\t | -\t |')
    for scale in range(2, 9):
        metric.reset()
        for i in range(N):