def test_dataset(dataset_name): config = edict() config.dataset_name = dataset_name config.with_path = True config.input_shape = (512, 1024) config.max_crop_size = (1024, 2048) config.aug_library = 'imgaug' config = get_dataset_generalize_config(config, dataset_name) config = get_default_augmentor_config(config) augmentations = Augmentations(config) dataset = dataset_generalize(config, split='train', augmentations=augmentations) N = len(dataset) idx = np.random.randint(0, N) data = dataset.__getitem__(idx) img, ann = data['image'] img = img.transpose((1, 2, 0)) ann = add_color_map(ann) img_path, ann_path = data['filename'] ori_img = cv2.imread(img_path, cv2.IMREAD_COLOR) ori_img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2RGB) ori_ann = read_ann_file(ann_path) ori_ann = add_color_map(ori_ann) show_images([img, ann, ori_img, ori_ann], ['img', 'ann', 'origin img', 'origin ann'])
def class_weight_count(dataset_name, split='train'): config = get_dataset_generalize_config(None, dataset_name) dataset = dataset_generalize(config, split=split, bchw=False) N = len(dataset) class_number = len(config.foreground_class_ids) COUNT = dataset_class_count(class_number) for idx in trange(N): img, ann = dataset.__getitem__(idx) COUNT.update(ann) count = COUNT.summary() return count
def static_dataset(dataset_name, split='train'): config = get_dataset_generalize_config(None, dataset_name) dataset = dataset_generalize(config, split=split, bchw=False) N = len(dataset) MEAN = dataset_mean() for idx in trange(N): img, ann = dataset.__getitem__(idx) MEAN.update(img) mean = MEAN.summary() STD = dataset_std(mean) for idx in trange(N): img, ann = dataset.__getitem__(idx) STD.update(img) std = STD.summary() return mean, std
def test_dataset_loader(dataset_name): config = edict() config.dataset_name = dataset_name config.print_path = True config.input_shape = (224, 224) config.ignore_index = 255 config.with_edge = False config.batch_size = 2 config = get_dataset_generalize_config(config, dataset_name) config = get_default_augmentor_config(config) augmentations = Augmentations(config) dataset = dataset_generalize(config, split='train', augmentations=augmentations) loader = TD.DataLoader(dataset=dataset, batch_size=config.batch_size, shuffle=True, drop_last=False) plt.ion() for i, data in enumerate(loader): imgs, labels = data print(i, imgs.shape, labels.shape) show_tensor_list([imgs], ['img']) show_tensor_list([labels], ['labels']) np_labels = labels.data.cpu().numpy() print('label id: ', np.unique(np_labels)) image_list = np.split(np_labels, config.batch_size) image_list = [np.squeeze(img) for img in image_list] image_list = [add_color_map(img) for img in image_list] show_images(image_list, ['label'] * config.batch_size) if i > 1: break plt.ioff() plt.show()
def test_cityscapes_category(self): """ the cityscapes category id != huawei class index, need remap them and the cityscape dataset may ignore some object like parking (class id=9, category=flat ...) Returns ------- None. """ config=get_dataset_generalize_config(None,'Cityscapes_Category') config.with_path=True cat_val_dataset = dataset_generalize(config, split='val', augmentations=None, normalizations=None) config=get_dataset_generalize_config(None,'Cityscapes_Fine') config.with_path=True class_val_dataset = dataset_generalize(config, split='val', augmentations=None, normalizations=None) assert len(cat_val_dataset)==len(class_val_dataset) N=len(cat_val_dataset) for key,value in id2catId.items(): print('key={}, value={}'.format(key,value)) for idx in range(min(N,3)): cat_data=cat_val_dataset.__getitem__(idx) cat_img,cat_ann=cat_data['image'] class_data=class_val_dataset.__getitem__(idx) class_img,class_ann=class_data['image'] assert cat_data['filename'][1]==class_data['filename'][1] origin_ann=Image.open(class_data['filename'][1]) origin_ann=np.array(origin_ann,np.uint8) class2cat=origin_ann.copy() for key,value in id2catId.items(): if key>=0: class2cat[origin_ann==key]=value for class_id in config.foreground_class_ids: print('class_id={},catId={}'.format(class_id,id2catId[class_id])) class2cat[origin_ann==0]=255 class2cat[origin_ann!=255]-=1 diff=(class2cat!=cat_ann) negative=(origin_ann==255) print(np.unique(origin_ann[diff])) cat_img=cat_img.transpose((1,2,0)) class_img=class_img.transpose((1,2,0)) cat_ann=add_color_map(cat_ann) class_ann=add_color_map(class_ann) class2cat=add_color_map(class2cat) show_images([cat_img,cat_ann,class_img,class_ann,class2cat,diff,negative],['cat','cat','class','class','class2cat','diff','negative'])
fractal_fusion_type = 'mean' config.before_upsample = before_upsample config.fractal_depth = fractal_depth config.fractal_fusion_type = fractal_fusion_type location_str = 'before' if before_upsample else 'after' config.note = '_'.join([ config.note, location_str, 'depth', str(fractal_depth), 'fusion', fractal_fusion_type ]) net = psp_fractal(config) keras_fit(net, train_loader, val_loader) elif test == 'coarse': net = get_net(config) for dataset_name in ['Cityscapes_Coarse', 'Cityscapes_Fine']: config = get_dataset_generalize_config(config, dataset_name) #config.dataset_name = dataset_name.lower() coarse_train_loader, coarse_val_loader = get_loaders(config) keras_fit(net, coarse_train_loader, coarse_val_loader) elif test == 'summary': net = get_net(config) config_str = json.dumps(config, indent=2, sort_keys=True) print(config_str) print('args is ' + '*' * 30) print(args) height, width = config.input_shape device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') torchsummary.summary(net.to(device), (3, height, width)) elif test == 'huawei': # train on cityscapes, validation on huawei
# -*- coding: utf-8 -*- """ experiment on downsample and miou """ from torchseg.dataset.dataset_generalize import dataset_generalize, \ get_dataset_generalize_config from torchseg.utils.metrics import runningScore import cv2 from easydict import EasyDict as edict if __name__ == '__main__': config = edict() config.with_edge = False config = get_dataset_generalize_config(config, 'HuaWei') val_dataset = dataset_generalize(config, split='val', augmentations=None, normalizations=None) N = len(val_dataset) if config.ignore_index == 0: config.class_number = len(config.foreground_class_ids) + 1 else: config.class_number = len(config.foreground_class_ids) metric = runningScore(config.class_number) print('| scale | miou |') print('| -\t | -\t |')