def test_albu_transform(): results = dict(img_prefix=osp.join(osp.dirname(__file__), '../data'), img_info=dict(filename='color.jpg')) # Define simple pipeline load = dict(type='LoadImageFromFile') load = build_from_cfg(load, PIPELINES) albu_transform = dict(type='Albu', transforms=[dict(type='ChannelShuffle', p=1)]) albu_transform = build_from_cfg(albu_transform, PIPELINES) normalize = dict(type='Normalize', mean=[0] * 3, std=[0] * 3, to_rgb=True) normalize = build_from_cfg(normalize, PIPELINES) # Execute transforms results = load(results) results = albu_transform(results) results = normalize(results) assert results['img'].dtype == np.float32
def __init__(self, transforms): assert isinstance(transforms, Sequence) self.transforms = [] for transform in transforms: if isinstance(transform, dict): transform = build_from_cfg(transform, PIPELINES) self.transforms.append(transform) elif callable(transform): self.transforms.append(transform) else: raise TypeError('transform must be callable or a dict, but got' f' {type(transform)}')
def build_activation_layer(cfg): """Build activation layer. Args: cfg (dict): The activation layer config, which should contain: - type (str): Layer type. - layer args: Args needed to instantiate an activation layer. Returns: nn.Module: Created activation layer. """ return build_from_cfg(cfg, ACTIVATION_LAYERS)
def build(cfg, registry, default_args=None): """Build a module. Args: cfg (dict, list[dict]): The config of modules, it is either a dict or a list of configs. registry (:obj:`Registry`): A registry the module belongs to. default_args (dict, optional): Default arguments to build the module. Defaults to None. Returns: nn.Module: A built nn module. """ if isinstance(cfg, list): modules = [ build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg ] return nn.Sequential(*modules) return build_from_cfg(cfg, registry, default_args)
def test_aug_test_size(): results = dict(img_prefix=osp.join(osp.dirname(__file__), '../../../data'), img_info=dict(filename='color.jpg')) # Define simple pipeline load = dict(type='LoadImageFromFile') load = build_from_cfg(load, PIPELINES) # get config transform = dict(type='MultiScaleFlipAug', transforms=[], img_scale=[(1333, 800), (800, 600), (640, 480)], flip=True, flip_direction=['horizontal', 'vertical', 'diagonal']) multi_aug_test_module = build_from_cfg(transform, PIPELINES) results = load(results) results = multi_aug_test_module(load(results)) # len(["original", "horizontal", "vertical", "diagonal"]) * # len([(1333, 800), (800, 600), (640, 480)]) assert len(results['img']) == 12
def test_flip(): # test assertion for invalid flip_ratio with pytest.raises(AssertionError): transform = dict(type='RandomFlip', flip_ratio=1.5) build_from_cfg(transform, PIPELINES) # test assertion for invalid direction with pytest.raises(AssertionError): transform = dict( type='RandomFlip', flip_ratio=1, direction='horizonta') build_from_cfg(transform, PIPELINES) transform = dict(type='RandomFlip', flip_ratio=1) flip_module = build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') original_img = copy.deepcopy(img) results['img'] = img results['img2'] = copy.deepcopy(img) results['img_shape'] = img.shape results['ori_shape'] = img.shape # Set initial values for default meta_keys results['pad_shape'] = img.shape results['scale_factor'] = 1.0 results['img_fields'] = ['img', 'img2'] results = flip_module(results) assert np.equal(results['img'], results['img2']).all() flip_module = build_from_cfg(transform, PIPELINES) results = flip_module(results) assert np.equal(results['img'], results['img2']).all() assert np.equal(original_img, results['img']).all()
def build_dataset(cfg, default_args=None): from .dataset_wrappers import ConcatDataset, RepeatDataset if isinstance(cfg, (list, tuple)): dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) elif cfg['type'] == 'RepeatDataset': dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args), cfg['times']) elif isinstance(cfg.get('ann_file'), (list, tuple)): dataset = _concat_dataset(cfg, default_args) else: dataset = build_from_cfg(cfg, DATASETS, default_args) return dataset
def build_dataset(cfg, default_args=None): """Build a dataset from config dict. Args: cfg (dict): Config dict. It should at least contain the key "type". default_args (dict | None, optional): Default initialization arguments. Default: None. Returns: Dataset: The constructed dataset. """ dataset = build_from_cfg(cfg, DATASETS, default_args) return dataset
def test_resize(): # test assertion if img_scale is a list with pytest.raises(AssertionError): transform = dict(type='Resize', img_scale=[1333, 800], keep_ratio=True) build_from_cfg(transform, PIPELINES) # test assertion if len(img_scale) while ratio_range is not None with pytest.raises(AssertionError): transform = dict(type='Resize', img_scale=[(1333, 800), (1333, 600)], ratio_range=(0.9, 1.1), keep_ratio=True) build_from_cfg(transform, PIPELINES) # test assertion for invalid multiscale_mode with pytest.raises(AssertionError): transform = dict(type='Resize', img_scale=[(1333, 800), (1333, 600)], keep_ratio=True, multiscale_mode='2333') build_from_cfg(transform, PIPELINES) transform = dict(type='Resize', img_scale=(1333, 800), keep_ratio=True) resize_module = build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread(osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') results['img'] = img results['img2'] = copy.deepcopy(img) results['img_shape'] = img.shape results['ori_shape'] = img.shape # Set initial values for default meta_keys results['pad_shape'] = img.shape results['scale_factor'] = 1.0 results['img_fields'] = ['img', 'img2'] results = resize_module(results) assert np.equal(results['img'], results['img2']).all() results.pop('scale') transform = dict(type='Resize', img_scale=(1280, 800), multiscale_mode='value', keep_ratio=False) resize_module = build_from_cfg(transform, PIPELINES) results = resize_module(results) assert np.equal(results['img'], results['img2']).all() assert results['img_shape'] == (800, 1280, 3)
def test_seq_color_jitter(self): results = self.results.copy() transform = dict(type='SeqPhotoMetricDistortion', share_params=False) transform = build_from_cfg(transform, PIPELINES) outs = transform(results) assert outs[0]['img_info']['color_jitter'] != outs[1]['img_info'][ 'color_jitter'] transform.share_params = True outs = transform(results) assert outs[0]['img_info']['color_jitter'] == outs[1]['img_info'][ 'color_jitter']
def test_seq_pad(self): results = copy.deepcopy(self.results) transform = dict(type='SeqPad', size_divisor=32) transform = build_from_cfg(transform, PIPELINES) results = transform(results) for result in results: img_shape = result['img'].shape assert img_shape[0] % 32 == 0 assert img_shape[1] % 32 == 0 resize_transform = dict(type='SeqResize', img_scale=(1333, 800), keep_ratio=True) resize_module = build_from_cfg(resize_transform, PIPELINES) results = resize_module(results) results = transform(results) for result in results: img_shape = result['img'].shape assert img_shape[0] % 32 == 0 assert img_shape[1] % 32 == 0
def test_pad(): # test assertion if both size_divisor and size is None with pytest.raises(AssertionError): transform = dict(type='Pad') build_from_cfg(transform, PIPELINES) transform = dict(type='Pad', size_divisor=32) transform = build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') original_img = copy.deepcopy(img) results['img'] = img results['img2'] = copy.deepcopy(img) results['img_shape'] = img.shape results['ori_shape'] = img.shape # Set initial values for default meta_keys results['pad_shape'] = img.shape results['scale_factor'] = 1.0 results['img_fields'] = ['img', 'img2'] results = transform(results) assert np.equal(results['img'], results['img2']).all() # original img already divisible by 32 assert np.equal(results['img'], original_img).all() img_shape = results['img'].shape assert img_shape[0] % 32 == 0 assert img_shape[1] % 32 == 0 resize_transform = dict(type='Resize', img_scale=(1333, 800), keep_ratio=True) resize_module = build_from_cfg(resize_transform, PIPELINES) results = resize_module(results) results = transform(results) img_shape = results['img'].shape assert np.equal(results['img'], results['img2']).all() assert img_shape[0] % 32 == 0 assert img_shape[1] % 32 == 0
def test_albu_transform(): data_prefix = 'tests/data/coco/' results = dict(image_file=osp.join(data_prefix, '000000000785.jpg')) # Define simple pipeline load = dict(type='LoadImageFromFile') load = build_from_cfg(load, PIPELINES) albu_transform = dict( type='Albumentation', transforms=[ dict(type='RandomBrightnessContrast', p=0.2), dict(type='ToFloat') ]) albu_transform = build_from_cfg(albu_transform, PIPELINES) # Execute transforms results = load(results) results = albu_transform(results) assert results['img'].dtype == np.float32
def test_rotate(): # test assertion degree should be tuple[float] or float with pytest.raises(AssertionError): transform = dict(type='RandomRotate', prob=0.5, degree=-10) build_from_cfg(transform, PIPELINES) # test assertion degree should be tuple[float] or float with pytest.raises(AssertionError): transform = dict(type='RandomRotate', prob=0.5, degree=(10., 20., 30.)) build_from_cfg(transform, PIPELINES) transform = dict(type='RandomRotate', degree=10., prob=1.) transform = build_from_cfg(transform, PIPELINES) assert str(transform) == f'RandomRotate(' \ f'prob={1.}, ' \ f'degree=({-10.}, {10.}), ' \ f'pad_val={0}, ' \ f'seg_pad_val={255}, ' \ f'center={None}, ' \ f'auto_bound={False})' results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../data/color.jpg'), 'color') h, w, _ = img.shape seg = np.array( Image.open(osp.join(osp.dirname(__file__), '../data/seg.png'))) results['img'] = img results['gt_semantic_seg'] = seg results['seg_fields'] = ['gt_semantic_seg'] results['img_shape'] = img.shape results['ori_shape'] = img.shape # Set initial values for default meta_keys results['pad_shape'] = img.shape results['scale_factor'] = 1.0 results = transform(results) assert results['img'].shape[:2] == (h, w) assert results['gt_semantic_seg'].shape[:2] == (h, w)
def test_mosaic(): # test assertion for invalid img_scale with pytest.raises(AssertionError): transform = dict(type='Mosaic', img_scale=640) build_from_cfg(transform, PIPELINES) # test assertion for invalid probability with pytest.raises(AssertionError): transform = dict(type='Mosaic', prob=1.5) build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') results['img'] = img # TODO: add img_fields test results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore'] h, w, _ = img.shape gt_bboxes = create_random_bboxes(8, w, h) gt_bboxes_ignore = create_random_bboxes(2, w, h) results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64) results['gt_bboxes'] = gt_bboxes results['gt_bboxes_ignore'] = gt_bboxes_ignore transform = dict(type='Mosaic', img_scale=(10, 12)) mosaic_module = build_from_cfg(transform, PIPELINES) # test assertion for invalid mix_results with pytest.raises(AssertionError): mosaic_module(results) results['mix_results'] = [copy.deepcopy(results)] * 3 results = mosaic_module(results) assert results['img'].shape[:2] == (20, 24) assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0] assert results['gt_labels'].dtype == np.int64 assert results['gt_bboxes'].dtype == np.float32 assert results['gt_bboxes_ignore'].dtype == np.float32
def test_random_shift(): # test assertion for invalid shift_ratio with pytest.raises(AssertionError): transform = dict(type='RandomShift', shift_ratio=1.5) build_from_cfg(transform, PIPELINES) # test assertion for invalid max_shift_px with pytest.raises(AssertionError): transform = dict(type='RandomShift', max_shift_px=-1) build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') results['img'] = img # TODO: add img_fields test results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore'] def create_random_bboxes(num_bboxes, img_w, img_h): bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2)) bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2)) bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1) bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype( np.int) return bboxes h, w, _ = img.shape gt_bboxes = create_random_bboxes(8, w, h) gt_bboxes_ignore = create_random_bboxes(2, w, h) results['gt_labels'] = torch.ones(gt_bboxes.shape[0]) results['gt_bboxes'] = gt_bboxes results['gt_bboxes_ignore'] = gt_bboxes_ignore transform = dict(type='RandomShift', shift_ratio=1.0) random_shift_module = build_from_cfg(transform, PIPELINES) results = random_shift_module(results) assert results['img'].shape[:2] == (h, w) assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0]
def test_solarize_add(): # test assertion for invalid type of magnitude with pytest.raises(AssertionError): transform = dict(type='SolarizeAdd', magnitude=(1, 2)) build_from_cfg(transform, PIPELINES) # test assertion for invalid type of thr with pytest.raises(AssertionError): transform = dict(type='SolarizeAdd', magnitude=100, thr=(1, 2)) build_from_cfg(transform, PIPELINES) # test case when prob=0, therefore no solarize results = construct_toy_data_photometric() transform = dict(type='SolarizeAdd', magnitude=100, thr=128, prob=0.) pipeline = build_from_cfg(transform, PIPELINES) results = pipeline(results) assert (results['img'] == results['ori_img']).all() # test case when thr=0, therefore no solarize results = construct_toy_data_photometric() transform = dict(type='SolarizeAdd', magnitude=100, thr=0, prob=1.) pipeline = build_from_cfg(transform, PIPELINES) results = pipeline(results) assert (results['img'] == results['ori_img']).all() # test case when thr=128, magnitude=100 results = construct_toy_data_photometric() transform = dict(type='SolarizeAdd', magnitude=100, thr=128, prob=1.) pipeline = build_from_cfg(transform, PIPELINES) results = pipeline(results) img_solarized = np.array( [[100, 128, 255], [101, 227, 254], [102, 129, 253]], dtype=np.uint8) img_solarized = np.stack([img_solarized, img_solarized, img_solarized], axis=-1) assert (results['img'] == img_solarized).all() assert (results['img'] == results['img2']).all() # test case when thr=100, magnitude=50 results = construct_toy_data_photometric() transform = dict(type='SolarizeAdd', magnitude=50, thr=100, prob=1.) pipeline = build_from_cfg(transform, PIPELINES) results = pipeline(results) img_solarized = np.array([[50, 128, 255], [51, 127, 254], [52, 129, 253]], dtype=np.uint8) img_solarized = np.stack([img_solarized, img_solarized, img_solarized], axis=-1) assert (results['img'] == img_solarized).all() assert (results['img'] == results['img2']).all()
def test_posterize(): # test assertion for invalid type of bits with pytest.raises(AssertionError): transform = dict(type='Posterize', bits=4.5) build_from_cfg(transform, PIPELINES) # test assertion for invalid value of bits with pytest.raises(AssertionError): transform = dict(type='Posterize', bits=10) build_from_cfg(transform, PIPELINES) # test case when prob=0, therefore no posterize results = construct_toy_data_photometric() transform = dict(type='Posterize', bits=4, prob=0.) pipeline = build_from_cfg(transform, PIPELINES) results = pipeline(results) assert (results['img'] == results['ori_img']).all() # test case when bits=8, therefore no solarize results = construct_toy_data_photometric() transform = dict(type='Posterize', bits=8, prob=1.) pipeline = build_from_cfg(transform, PIPELINES) results = pipeline(results) assert (results['img'] == results['ori_img']).all() # test case when bits=1 results = construct_toy_data_photometric() transform = dict(type='Posterize', bits=1, prob=1.) pipeline = build_from_cfg(transform, PIPELINES) results = pipeline(results) img_posterized = np.array([[0, 128, 128], [0, 0, 128], [0, 128, 128]], dtype=np.uint8) img_posterized = np.stack([img_posterized, img_posterized, img_posterized], axis=-1) assert (results['img'] == img_posterized).all() assert (results['img'] == results['img2']).all() # test case when bits=3 results = construct_toy_data_photometric() transform = dict(type='Posterize', bits=3, prob=1.) pipeline = build_from_cfg(transform, PIPELINES) results = pipeline(results) img_posterized = np.array([[0, 128, 224], [0, 96, 224], [0, 128, 224]], dtype=np.uint8) img_posterized = np.stack([img_posterized, img_posterized, img_posterized], axis=-1) assert (results['img'] == img_posterized).all() assert (results['img'] == results['img2']).all()
def test_rename_keys(): results = dict(joints_3d=np.ones([17, 3]), joints_3d_visible=np.ones([17, 3])) pipeline = dict(type='RenameKeys', key_pairs=[('joints_3d', 'target'), ('joints_3d_visible', 'target_weight')]) pipeline = build_from_cfg(pipeline, PIPELINES) results = pipeline(results) assert 'joints_3d' not in results assert 'joints_3d_visible' not in results assert 'target' in results assert 'target_weight' in results assert results['target'].shape == (17, 3) assert results['target_weight'].shape == (17, 3)
def test_seq_normalize(self): results = copy.deepcopy(self.results) img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) transform = dict(type='SeqNormalize', **img_norm_cfg) transform = build_from_cfg(transform, PIPELINES) results = transform(results) mean = np.array(img_norm_cfg['mean']) std = np.array(img_norm_cfg['std']) for i, result in enumerate(results): converted_img = (self.results[i]['img'][..., ::-1] - mean) / std assert np.allclose(result['img'], converted_img)
def test_seq_color_aug(self): results = copy.deepcopy(self.results) imgs_shape = [result['img'].shape for result in results] transform = dict(type='SeqColorAug', prob=[1.0, 1.0], rgb_var=[[-0.55919361, 0.98062831, -0.41940627], [1.72091413, 0.19879334, -1.82968581], [4.64467907, 4.73710203, 4.88324118]]) seq_color_aug = build_from_cfg(transform, PIPELINES) results = seq_color_aug(results) assert results[0]['img'].shape == imgs_shape[0] assert results[1]['img'].shape == imgs_shape[0]
def test_seq_bbox_jitter(self): results = copy.deepcopy(self.results) for res in results: res['gt_bboxes'] = random_boxes(1, 256) res['bbox_fields'] = ['gt_bboxes'] transform = dict(type='SeqBboxJitter', center_jitter_factor=[0, 4.5], scale_jitter_factor=[0, 0.5], crop_size_factor=[2, 5]) seq_bbox_jitter = build_from_cfg(transform, PIPELINES) results = seq_bbox_jitter(results) assert results[0]['jittered_bboxes'].shape == (1, 4) assert results[1]['jittered_bboxes'].shape == (1, 4)
def test_mosaic(): # test assertion for invalid img_scale with pytest.raises(AssertionError): transform = dict(type='Mosaic', img_scale=640) build_from_cfg(transform, PIPELINES) results = dict() img = mmcv.imread( osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color') results['img'] = img # TODO: add img_fields test results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore'] def create_random_bboxes(num_bboxes, img_w, img_h): bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2)) bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2)) bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1) bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype( np.int) return bboxes h, w, _ = img.shape gt_bboxes = create_random_bboxes(8, w, h) gt_bboxes_ignore = create_random_bboxes(2, w, h) results['gt_labels'] = torch.ones(gt_bboxes.shape[0]) results['gt_bboxes'] = gt_bboxes results['gt_bboxes_ignore'] = gt_bboxes_ignore transform = dict(type='Mosaic', img_scale=(10, 12)) mosaic_module = build_from_cfg(transform, PIPELINES) # test assertion for invalid mix_results with pytest.raises(AssertionError): mosaic_module(results) results['mix_results'] = [copy.deepcopy(results)] * 3 results = mosaic_module(results) assert results['img'].shape[:2] == (20, 24)
def __call__(self, model): if hasattr(model, 'module'): model = model.module conv1_lr_mult = self.paramwise_cfg.get('conv1_lr_mult', 1.) params = [] for name, param in model.named_parameters(): param_group = {'params': [param]} if name.startswith('conv1') and param.requires_grad: param_group['lr'] = self.base_lr * conv1_lr_mult params.append(param_group) optimizer_cfg['params'] = params return build_from_cfg(optimizer_cfg, OPTIMIZERS)
def build_dataset(cfg, default_args=None): from .dataset_wrappers import (ConcatDataset, RepeatDataset, ClassBalancedDataset) if isinstance(cfg, (list, tuple)): dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) elif cfg['type'] == 'RepeatDataset': dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args), cfg['times']) elif cfg['type'] == 'ClassBalancedDataset': dataset = ClassBalancedDataset( build_dataset(cfg['dataset'], default_args), cfg['oversample_thr']) else: dataset = build_from_cfg(cfg, DATASETS, default_args) return dataset
def test_seq_shift_scale_aug(self): results = copy.deepcopy(self.results) for res in results: res['gt_bboxes'] = random_boxes(1, 256).numpy() res['bbox_fields'] = ['gt_bboxes'] transform = dict(type='SeqShiftScaleAug', target_size=[127, 255], shift=[4, 64], scale=[0.05, 0.18]) seq_shift_scale_aug = build_from_cfg(transform, PIPELINES) results = seq_shift_scale_aug(results) assert results[0]['img'].shape == (127, 127, 3) assert results[1]['img'].shape == (255, 255, 3)
def test_seq_crop_like_siamfc(self): results = copy.deepcopy(self.results) for res in results: res['gt_bboxes'] = random_boxes(1, 256) res['bbox_fields'] = ['gt_bboxes'] transform = dict(type='SeqCropLikeSiamFC', context_amount=0.5, exemplar_size=127, crop_size=511) seq_crop_like_siamfc = build_from_cfg(transform, PIPELINES) results = seq_crop_like_siamfc(results) assert results[0]['img'].shape == (511, 511, 3) assert results[1]['img'].shape == (511, 511, 3)
def test_seq_random_crop(self): # test assertion for invalid random crop with pytest.raises(AssertionError): transform = dict(type='SeqRandomCrop', crop_size=(-1, 0), share_params=False) build_from_cfg(transform, PIPELINES) crop_size = (256, 384) transform = dict(type='SeqRandomCrop', crop_size=crop_size, share_params=False) crop_module = build_from_cfg(transform, PIPELINES) results = copy.deepcopy(self.results) for res in results: res['gt_bboxes'] = random_boxes(8, 256) res['gt_labels'] = np.random.randint(8) res['gt_instance_ids'] = np.random.randint(8) res['gt_bboxes_ignore'] = random_boxes(2, 256) outs = crop_module(results) assert len(outs) == len(results) for res in results: assert res['img'].shape[:2] == crop_size # All bboxes should be reserved after crop assert res['img_shape'][:2] == crop_size assert res['gt_bboxes'].shape[0] == 8 assert res['gt_bboxes_ignore'].shape[0] == 2 assert outs[0]['img_info']['crop_offsets'] != outs[1]['img_info'][ 'crop_offsets'] crop_module.share_params = True outs = crop_module(results) assert outs[0]['img_info']['crop_offsets'] == outs[1]['img_info'][ 'crop_offsets']
def test_color_jitter(): # read test image results = dict() img = mmcv.imread(osp.join(osp.dirname(__file__), '../../data/color.jpg'), 'color') original_img = copy.deepcopy(img) results['img'] = img results['img2'] = copy.deepcopy(img) results['img_shape'] = img.shape results['ori_shape'] = img.shape results['img_fields'] = ['img', 'img2'] def reset_results(results, original_img): results['img'] = copy.deepcopy(original_img) results['img2'] = copy.deepcopy(original_img) results['img_shape'] = original_img.shape results['ori_shape'] = original_img.shape return results transform = dict(type='ColorJitter', brightness=0., contrast=0., saturation=0.) colorjitter_module = build_from_cfg(transform, PIPELINES) results = colorjitter_module(results) assert np.equal(results['img'], original_img).all() assert np.equal(results['img'], results['img2']).all() results = reset_results(results, original_img) transform = dict(type='ColorJitter', brightness=0.3, contrast=0.3, saturation=0.3) colorjitter_module = build_from_cfg(transform, PIPELINES) results = colorjitter_module(results) assert not np.equal(results['img'], original_img).all()
def build_dataset(cfg, default_args=None): if isinstance(cfg, (list, tuple)): dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) elif cfg["type"] == "RepeatDataset": dataset = RepeatDataset(build_dataset(cfg["dataset"], default_args), cfg["times"]) elif cfg["type"] == "ClassBalancedDataset": dataset = ClassBalancedDataset(build_dataset(cfg["dataset"], default_args), cfg["oversample_thr"]) elif cfg["type"] == "SourceBalancedDataset": dataset = SourceBalancedDataset(build_dataset(cfg["dataset"], default_args), cfg["oversample_thr"]) elif isinstance(cfg.get("ann_file"), (list, tuple)): dataset = _concat_dataset(cfg, default_args) else: dataset = build_from_cfg(cfg, DATASETS, default_args) return dataset