def build_transform(self): """ Creates a basic transformation that was used to train the models """ cfg = self.cfg # we are loading images with OpenCV, so we don't need to convert them # to BGR, they are already! So all we need to do is to normalize # by 255 if we want to convert to BGR255 format, or flip the channels # if we want it to be in RGB in [0-1] range. if cfg.INPUT.TO_BGR255: to_bgr_transform = T.Lambda(lambda x: x * 255) else: to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]]) normalize_transform = T.ImageNormalize( mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD ) min_size = cfg.INPUT.MIN_SIZE_TEST max_size = cfg.INPUT.MAX_SIZE_TEST transform = T.Compose( [ T.ToPILImage(), Resize(min_size, max_size), T.ToTensor(), to_bgr_transform, normalize_transform, ] ) return transform
def get_transform(params, gray=False, mask=False): transform_ = [] # resize transform_.append( transform.Resize((params['load_h'], params['load_w']), Image.BICUBIC)) # flip if params['flip']: transform_.append(transform.Lambda(lambda img: transform.hflip(img))) if gray: transform_.append(transform.Gray()) if mask: transform_.append(transform.ImageNormalize([ 0., ], [ 1., ])) else: if not gray: transform_.append( transform.ImageNormalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])) else: transform_.append(transform.ImageNormalize([ 0.5, ], [ 0.5, ])) return transform.Compose(transform_)
def build_transform(): if cfg.INPUT.TO_BGR255: to_bgr_transform = T.Lambda(lambda x: x * 255) else: to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]]) normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD) min_size = cfg.INPUT.MIN_SIZE_TEST max_size = cfg.INPUT.MAX_SIZE_TEST transform = T.Compose([ T.ToPILImage(), Resize(min_size, max_size), T.ToTensor(), to_bgr_transform, normalize_transform, ]) return transform
def test_lambda(self): trans = transform.Lambda(lambda x: x.add(10)) x = jt.random([10]) y = trans(x) self.assertTrue(np.allclose(y.data, jt.add(x, 10).data))