def __init__(self, quality, mode, maxSkip=0, joint_transform_list=None, transform=None, target_transform=None, dump_images=False, class_uniform_pct=0, class_uniform_tile=0, test=False, cv_split=None, scf=None, hardnm=0): self.quality = quality self.mode = mode self.maxSkip = maxSkip self.joint_transform_list = joint_transform_list self.transform = transform self.target_transform = target_transform self.dump_images = dump_images self.class_uniform_pct = class_uniform_pct self.class_uniform_tile = class_uniform_tile self.scf = scf self.hardnm = hardnm self.cv_split = cv_split self.centroids = [] self.imgs, self.aug_imgs = make_dataset(quality, mode, self.maxSkip, cv_split=self.cv_split, hardnm=self.hardnm) assert len(self.imgs), 'Found 0 images, please check the data set' # Centroids for GT data if self.class_uniform_pct > 0: json_fn = 'camvid_tile{}_cv{}_{}.json'.format(self.class_uniform_tile, self.cv_split, self.mode) if os.path.isfile(json_fn): with open(json_fn, 'r') as json_data: centroids = json.load(json_data) self.centroids = {int(idx): centroids[idx] for idx in centroids} else: self.centroids = uniform.class_centroids_all( self.imgs, num_classes, id2trainid=None, tile_size=class_uniform_tile) with open(json_fn, 'w') as outfile: json.dump(self.centroids, outfile, indent=4) self.fine_centroids = self.centroids.copy() if self.maxSkip > 0: json_fn = 'camvid_tile{}_cv{}_{}_skip{}.json'.format(self.class_uniform_tile, self.cv_split, self.mode, self.maxSkip) if os.path.isfile(json_fn): with open(json_fn, 'r') as json_data: centroids = json.load(json_data) self.aug_centroids = {int(idx): centroids[idx] for idx in centroids} else: self.aug_centroids = uniform.class_centroids_all( self.aug_imgs, num_classes, id2trainid=None, tile_size=class_uniform_tile) with open(json_fn, 'w') as outfile: json.dump(self.aug_centroids, outfile, indent=4) for class_id in range(num_classes): self.centroids[class_id].extend(self.aug_centroids[class_id]) self.build_epoch()
def __init__(self, quality, mode, joint_transform_list=None, transform=None, target_transform=None, target_aux_transform=None, image_in=False, dump_images=False, class_uniform_pct=0, class_uniform_tile=768, test=False): """ class_uniform_pct = Percent of class uniform samples. 1.0 means fully uniform. 0.0 means fully random. class_uniform_tile_size = Class uniform tile size """ gen_id_to_ignore() self.quality = quality self.mode = mode self.joint_transform_list = joint_transform_list self.transform = transform self.target_transform = target_transform self.image_in = image_in self.target_aux_transform = target_aux_transform self.dump_images = dump_images self.class_uniform_pct = class_uniform_pct self.class_uniform_tile = class_uniform_tile self.id2name = gen_colormap() self.imgs_uniform = None # find all images self.imgs = make_dataset(quality, mode) if len(self.imgs) == 0: raise RuntimeError('Found 0 images, please check the data set') if test: np.random.shuffle(self.imgs) self.imgs = self.imgs[:200] if self.class_uniform_pct: json_fn = 'mapillary_tile{}.json'.format(self.class_uniform_tile) if os.path.isfile(json_fn): with open(json_fn, 'r') as json_data: centroids = json.load(json_data) self.centroids = { int(idx): centroids[idx] for idx in centroids } else: # centroids is a dict (indexed by class) of lists of centroids self.centroids = uniform.class_centroids_all( self.imgs, num_classes, id2trainid=None, tile_size=self.class_uniform_tile) with open(json_fn, 'w') as outfile: json.dump(self.centroids, outfile, indent=4) else: self.centroids = [] self.build_epoch()
def __init__(self, quality, mode, maxSkip=0, joint_transform_list=None, transform=None, target_transform=None, dump_images=False, class_uniform_pct=0, class_uniform_tile=0, test=False, cv_split=None, scf=None, hardnm=0): self.quality = quality self.mode = mode self.maxSkip = maxSkip self.joint_transform_list = joint_transform_list self.transform = transform self.target_transform = target_transform self.dump_images = dump_images self.class_uniform_pct = class_uniform_pct self.class_uniform_tile = class_uniform_tile self.scf = scf self.hardnm = hardnm if cv_split: self.cv_split = cv_split assert cv_split < cfg.DATASET.CV_SPLITS, \ 'expected cv_split {} to be < CV_SPLITS {}'.format( cv_split, cfg.DATASET.CV_SPLITS) else: self.cv_split = 0 if self.mode == 'test': self.imgs, _ = make_test_dataset(quality, mode, self.maxSkip, cv_split=self.cv_split) else: self.imgs, _ = make_dataset(quality, mode, self.maxSkip, cv_split=self.cv_split, hardnm=self.hardnm) assert len(self.imgs), 'Found 0 images, please check the data set' # Centroids for GT data #print(self.class_uniform_pct) if self.class_uniform_pct > 0: if self.scf: json_fn = 'kitti_tile{}_cv{}_scf.json'.format(self.class_uniform_tile, self.cv_split) else: json_fn = 'kitti_tile{}_cv{}_{}_hardnm{}.json'.format(self.class_uniform_tile, self.cv_split, self.mode, self.hardnm) if os.path.isfile(json_fn): with open(json_fn, 'r') as json_data: centroids = json.load(json_data) self.centroids = {int(idx): centroids[idx] for idx in centroids} else: if self.scf: self.centroids = kitti_uniform.class_centroids_all( self.imgs, num_classes, id2trainid=id_to_trainid, tile_size=class_uniform_tile) else: print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!") self.centroids = uniform.class_centroids_all( self.imgs, num_classes, id2trainid=id_to_trainid, tile_size=class_uniform_tile) with open(json_fn, 'w') as outfile: json.dump(self.centroids, outfile, indent=4) self.build_epoch()
def __init__(self, mode, maxSkip=0, joint_transform_list=None, sliding_crop=None, transform=None, target_transform=None, target_aux_transform=None, dump_images=False, cv_split=None, class_uniform_pct=0.5, class_uniform_tile=1024, test=False, coarse_boost_classes=None): self.mode = mode self.maxSkip = maxSkip self.joint_transform_list = joint_transform_list self.sliding_crop = sliding_crop self.transform = transform self.target_transform = target_transform self.target_aux_transform = target_aux_transform self.dump_images = dump_images self.class_uniform_pct = class_uniform_pct self.class_uniform_tile = class_uniform_tile self.coarse_boost_classes = coarse_boost_classes if cv_split: self.cv_split = cv_split assert cv_split < cfg.DATASET.CV_SPLITS, \ 'expected cv_split {} to be < CV_SPLITS {}'.format( cv_split, cfg.DATASET.CV_SPLITS) else: self.cv_split = 0 self.imgs, self.aug_imgs = make_dataset(mode, self.maxSkip, cv_split=self.cv_split) assert len(self.imgs), 'Found 0 images, please check the data set' # Centroids for fine data json_fn = 'bdd100k_{}_cv{}_tile{}.json'.format(self.mode, self.cv_split, self.class_uniform_tile) if os.path.isfile(json_fn): with open(json_fn, 'r') as json_data: centroids = json.load(json_data) self.centroids = {int(idx): centroids[idx] for idx in centroids} else: self.centroids = uniform.class_centroids_all( self.imgs, num_classes, id2trainid=trainid_to_trainid, tile_size=class_uniform_tile) with open(json_fn, 'w') as outfile: json.dump(self.centroids, outfile, indent=4) self.fine_centroids = self.centroids.copy() self.build_epoch()
def __init__(self, quality, mode, maxSkip=0, joint_transform_list=None, sliding_crop=None, transform=None, target_transform=None, dump_images=False, cv_split=None, class_uniform_pct=0.5, class_uniform_tile=1024, test=False, coarse_boost_classes=None, edge_map=False): self.quality = quality self.mode = mode self.maxSkip = maxSkip self.joint_transform_list = joint_transform_list self.sliding_crop = sliding_crop self.transform = transform self.target_transform = target_transform self.dump_images = dump_images self.class_uniform_pct = class_uniform_pct self.class_uniform_tile = class_uniform_tile self.coarse_boost_classes = coarse_boost_classes self.edge_map = edge_map if cv_split: self.cv_split = cv_split assert cv_split < cfg.DATASET.CV_SPLITS, \ 'expected cv_split {} to be < CV_SPLITS {}'.format( cv_split, cfg.DATASET.CV_SPLITS) else: self.cv_split = 0 self.imgs, self.aug_imgs = make_dataset(quality, mode, self.maxSkip, cv_split=self.cv_split) assert len(self.imgs), 'Found 0 images, please check the data set' # Centroids for fine data json_fn = 'cityscapes_{}_cv{}_tile{}.json'.format( self.mode, self.cv_split, self.class_uniform_tile) if os.path.isfile(json_fn): with open(json_fn, 'r') as json_data: centroids = json.load(json_data) self.centroids = {int(idx): centroids[idx] for idx in centroids} else: self.centroids = uniform.class_centroids_all( self.imgs, num_classes, id2trainid=id_to_trainid, tile_size=class_uniform_tile) with open(json_fn, 'w') as outfile: json.dump(self.centroids, outfile, indent=4) self.fine_centroids = self.centroids.copy() # Centroids for augmented data if self.maxSkip > 0: json_fn = 'cityscapes_{}_cv{}_tile{}_skip{}.json'.format( self.mode, self.cv_split, self.class_uniform_tile, self.maxSkip) if os.path.isfile(json_fn): with open(json_fn, 'r') as json_data: centroids = json.load(json_data) self.aug_centroids = { int(idx): centroids[idx] for idx in centroids } else: self.aug_centroids = uniform.class_centroids_all( self.aug_imgs, num_classes, id2trainid=id_to_trainid, tile_size=class_uniform_tile) with open(json_fn, 'w') as outfile: json.dump(self.aug_centroids, outfile, indent=4) # add centroids for augmented data # TODO: later, we can also pick classes for augmented data for class_id in range(num_classes): self.centroids[class_id].extend(self.aug_centroids[class_id]) # Add in coarse centroids for certain classes if self.coarse_boost_classes is not None: json_fn = 'cityscapes_coarse_{}_tile{}.json'.format( self.mode, self.class_uniform_tile) if os.path.isfile(json_fn): with open(json_fn, 'r') as json_data: centroids = json.load(json_data) self.coarse_centroids = { int(idx): centroids[idx] for idx in centroids } else: self.coarse_imgs, _ = make_dataset('coarse', mode, cv_split=0) self.coarse_centroids = uniform.class_centroids_all( self.coarse_imgs, num_classes, id2trainid=id_to_trainid, tile_size=class_uniform_tile) with open(json_fn, 'w') as outfile: json.dump(self.coarse_centroids, outfile, indent=4) # add centroids for boost classes for class_id in self.coarse_boost_classes: self.centroids[class_id].extend( self.coarse_centroids[class_id]) self.build_epoch()
def __init__(self, mode, maxSkip=0, joint_transform_list=None, sliding_crop=None, transform=None, target_transform=None, target_aux_transform=None, dump_images=False, cv_split=None, class_uniform_pct=0.5, class_uniform_tile=1024, test=False, coarse_boost_classes=None, pos_rfactor=8): self.mode = mode self.maxSkip = maxSkip self.joint_transform_list = joint_transform_list self.sliding_crop = sliding_crop self.transform = transform self.target_transform = target_transform self.target_aux_transform = target_aux_transform self.dump_images = dump_images self.class_uniform_pct = class_uniform_pct self.class_uniform_tile = class_uniform_tile self.coarse_boost_classes = coarse_boost_classes self.pos_rfactor = pos_rfactor # position information self.pos_h = torch.arange(0, 1024).unsqueeze(0).unsqueeze(2).expand( -1, -1, 2048) // 8 self.pos_w = torch.arange(0, 2048).unsqueeze(0).unsqueeze(1).expand( -1, 1024, -1) // 16 self.pos_h = self.pos_h[0].byte().numpy() self.pos_w = self.pos_w[0].byte().numpy() # pos index to image self.pos_h = Image.fromarray(self.pos_h, mode="L") self.pos_w = Image.fromarray(self.pos_w, mode="L") # position information if cv_split: self.cv_split = cv_split assert cv_split < cfg.DATASET.CV_SPLITS, \ 'expected cv_split {} to be < CV_SPLITS {}'.format( cv_split, cfg.DATASET.CV_SPLITS) else: self.cv_split = 0 self.imgs, self.aug_imgs = make_dataset(mode, self.maxSkip, cv_split=self.cv_split) assert len(self.imgs), 'Found 0 images, please check the data set' # Centroids for fine data json_fn = 'bdd100k_{}_cv{}_tile{}.json'.format(self.mode, self.cv_split, self.class_uniform_tile) if os.path.isfile(json_fn): with open(json_fn, 'r') as json_data: centroids = json.load(json_data) self.centroids = {int(idx): centroids[idx] for idx in centroids} else: self.centroids = uniform.class_centroids_all( self.imgs, num_classes, id2trainid=trainid_to_trainid, tile_size=class_uniform_tile) with open(json_fn, 'w') as outfile: json.dump(self.centroids, outfile, indent=4) self.fine_centroids = self.centroids.copy() self.build_epoch()
def __init__(self, quality, mode, maxSkip=0, joint_transform_list=None, transform=None, target_transform=None, target_aux_transform=None, dump_images=False, class_uniform_pct=0, class_uniform_tile=0, test=False, cv_split=None, scf=None, hardnm=0, pos_rfactor=8): self.quality = quality self.mode = mode self.maxSkip = maxSkip self.joint_transform_list = joint_transform_list self.transform = transform self.target_transform = target_transform self.target_aux_transform = target_aux_transform self.dump_images = dump_images self.class_uniform_pct = class_uniform_pct self.class_uniform_tile = class_uniform_tile self.scf = scf self.hardnm = hardnm self.cv_split = cv_split self.centroids = [] self.pos_rfactor = pos_rfactor # position information self.pos_h = torch.arange(0, 1024).unsqueeze(0).unsqueeze(2).expand(-1,-1,2048)//8 self.pos_w = torch.arange(0, 2048).unsqueeze(0).unsqueeze(1).expand(-1,1024,-1)//16 self.pos_h = self.pos_h[0].byte().numpy() self.pos_w = self.pos_w[0].byte().numpy() # pos index to image self.pos_h = Image.fromarray(self.pos_h, mode="L") self.pos_w = Image.fromarray(self.pos_w, mode="L") # position information self.imgs, self.aug_imgs = make_dataset(quality, mode, self.maxSkip, cv_split=self.cv_split, hardnm=self.hardnm) assert len(self.imgs), 'Found 0 images, please check the data set' # Centroids for GT data if self.class_uniform_pct > 0: json_fn = 'camvid_tile{}_cv{}_{}.json'.format(self.class_uniform_tile, self.cv_split, self.mode) if os.path.isfile(json_fn): with open(json_fn, 'r') as json_data: centroids = json.load(json_data) self.centroids = {int(idx): centroids[idx] for idx in centroids} else: self.centroids = uniform.class_centroids_all( self.imgs, num_classes, id2trainid=None, tile_size=class_uniform_tile) with open(json_fn, 'w') as outfile: json.dump(self.centroids, outfile, indent=4) self.fine_centroids = copy.deepcopy(self.centroids) if self.maxSkip > 0: json_fn = 'camvid_tile{}_cv{}_{}_skip{}.json'.format(self.class_uniform_tile, self.cv_split, self.mode, self.maxSkip) if os.path.isfile(json_fn): with open(json_fn, 'r') as json_data: centroids = json.load(json_data) self.aug_centroids = {int(idx): centroids[idx] for idx in centroids} else: self.aug_centroids = uniform.class_centroids_all( self.aug_imgs, num_classes, id2trainid=None, tile_size=class_uniform_tile) with open(json_fn, 'w') as outfile: json.dump(self.aug_centroids, outfile, indent=4) for class_id in range(num_classes): self.centroids[class_id].extend(self.aug_centroids[class_id]) self.build_epoch()
def __init__(self, quality, mode, maxSkip=0, joint_transform_list=None, sliding_crop=None, transform=None, target_transform=None, target_aux_transform=None, dump_images=False, cv_split=None, class_uniform_pct=0.5, class_uniform_tile=1024, test=False, coarse_boost_classes=None, pos_rfactor=8): print("######## CityScapesUniformWithPos #########") self.quality = quality self.mode = mode self.maxSkip = maxSkip self.joint_transform_list = joint_transform_list self.sliding_crop = sliding_crop self.transform = transform self.target_transform = target_transform self.target_aux_transform = target_aux_transform self.dump_images = dump_images self.class_uniform_pct = class_uniform_pct self.class_uniform_tile = class_uniform_tile self.coarse_boost_classes = coarse_boost_classes self.pos_rfactor = pos_rfactor # position information self.pos_h = torch.arange(0, 1024).unsqueeze(0).unsqueeze(2).expand( -1, -1, 2048) // 8 self.pos_w = torch.arange(0, 2048).unsqueeze(0).unsqueeze(1).expand( -1, 1024, -1) // 16 self.pos_h = self.pos_h[0].byte().numpy() self.pos_w = self.pos_w[0].byte().numpy() # pos index to image self.pos_h = Image.fromarray(self.pos_h, mode="L") self.pos_w = Image.fromarray(self.pos_w, mode="L") # position information if cv_split: self.cv_split = cv_split assert cv_split < cfg.DATASET.CV_SPLITS, \ 'expected cv_split {} to be < CV_SPLITS {}'.format( cv_split, cfg.DATASET.CV_SPLITS) else: self.cv_split = 0 self.imgs, self.aug_imgs = make_dataset(quality, mode, self.maxSkip, cv_split=self.cv_split) assert len(self.imgs), 'Found 0 images, please check the data set' # Centroids for fine data json_fn = 'cityscapes_{}_cv{}_tile{}.json'.format( self.mode, self.cv_split, self.class_uniform_tile) if os.path.isfile(json_fn): with open(json_fn, 'r') as json_data: centroids = json.load(json_data) self.centroids = {int(idx): centroids[idx] for idx in centroids} else: self.centroids = uniform.class_centroids_all( self.imgs, num_classes, id2trainid=id_to_trainid, tile_size=class_uniform_tile) with open(json_fn, 'w') as outfile: json.dump(self.centroids, outfile, indent=4) self.fine_centroids = copy.deepcopy(self.centroids) # Centroids for augmented data if self.maxSkip > 0: json_fn = 'cityscapes_{}_cv{}_tile{}_skip{}.json'.format( self.mode, self.cv_split, self.class_uniform_tile, self.maxSkip) if os.path.isfile(json_fn): with open(json_fn, 'r') as json_data: centroids = json.load(json_data) self.aug_centroids = { int(idx): centroids[idx] for idx in centroids } else: self.aug_centroids = uniform.class_centroids_all( self.aug_imgs, num_classes, id2trainid=id_to_trainid, tile_size=class_uniform_tile) with open(json_fn, 'w') as outfile: json.dump(self.aug_centroids, outfile, indent=4) # add centroids for augmented data # TODO: later, we can also pick classes for augmented data for class_id in range(num_classes): self.centroids[class_id].extend(self.aug_centroids[class_id]) # Add in coarse centroids for certain classes if self.coarse_boost_classes is not None: json_fn = 'cityscapes_coarse_{}_tile{}.json'.format( self.mode, self.class_uniform_tile) if os.path.isfile(json_fn): with open(json_fn, 'r') as json_data: centroids = json.load(json_data) self.coarse_centroids = { int(idx): centroids[idx] for idx in centroids } else: self.coarse_imgs, _ = make_dataset('coarse', 'train', cv_split=0) self.coarse_centroids = uniform.class_centroids_all( self.coarse_imgs, num_classes, id2trainid=id_to_trainid, tile_size=class_uniform_tile) with open(json_fn, 'w') as outfile: json.dump(self.coarse_centroids, outfile, indent=4) # add centroids for boost classes for class_id in self.coarse_boost_classes: self.centroids[class_id].extend( self.coarse_centroids[class_id]) self.build_epoch()