def get_dict(name, data_path, nasbench_file, num_samples, num_operations, val_acc_threshold=0., seed=1234, **kwargs): nasbench_file_abs = os.path.join(data_path, nasbench_file) print(f'Loading nasbench101: {nasbench_file_abs}') nasbench = api.NASBench(nasbench_file_abs, num_samples=num_samples, seed=seed) archs = [] seqs = [] valid_accs = [] all_keys = list(nasbench.hash_iterator()) dataset_dicts = [] min_val_acc = float('inf') max_val_acc = 0 min_data = max_data = None for idx, key in enumerate(all_keys): fixed_stat, computed_stat = nasbench.get_metrics_from_hash(key) if len(fixed_stat['module_operations']) not in num_operations: continue arch = api.ModelSpec(matrix=fixed_stat['module_adjacency'], ops=fixed_stat['module_operations']) data = nasbench.query(arch) if data['validation_accuracy'] < val_acc_threshold: continue if min_val_acc > data['validation_accuracy']: min_val_acc = data['validation_accuracy'] min_data = data if max_val_acc < data['validation_accuracy']: max_val_acc = data['validation_accuracy'] max_data = data data["id"] = idx dataset_dicts.append(data) meta_dict = {} meta_dict['num_samples'] = len(dataset_dicts) meta_dict['num_operations'] = num_operations meta_dict['min_val_acc'] = min_val_acc meta_dict['max_val_acc'] = max_val_acc meta_dict['min_data'] = min_data meta_dict['max_data'] = max_data print(f'min_val_acc: {min_val_acc}, max_val_acc: {max_val_acc}') if name not in MetadataCatalog.list(): MetadataCatalog.get(name).set(**meta_dict) return dataset_dicts
def register_dataset(self): # register the training dataset, only need to do this once catalog_list = MetadataCatalog.list() if 'wb_train' not in catalog_list: register_coco_instances("wb_train", {}, "roboflow/train/_annotations.coco.json", "/roboflow/train") if 'wb_val' not in catalog_list: register_coco_instances("wb_val", {}, "roboflow/valid/_annotations.coco.json", "/roboflow/valid") if 'wb_test' not in catalog_list: register_coco_instances("wb_test", {}, "roboflow/test/_annotations.coco.json", "/roboflow/test")
cv2_imshow(masked_image) else: if save_out: cv2.imwrite(os.path.join(image_outpath, filename), im_input) else: cv2_imshow(im_input) check_annotations(image_infile, im_input) print("\tSaving to ", os.path.join(image_outpath, filename)) return predictions, vis_output """ python model_segm.py --input """ if __name__ == "__main__": print("Metadata List: ", MetadataCatalog.list()) input_folders = _PREDEFINED_SPLITS_GRC_MD["rdd2020_source"][ "rdd2020_train"] try: for folder in input_folders: print("\n----------- ", folder, "------------\n") #image_filepath = os.path.join(ROADDAMAGE_DATASET, "train/Japan/images", "Japan_000000.jpg") # single image image_filepath = os.path.join(ROADDAMAGE_DATASET, folder, "images") # in directory image_outpath = os.path.join(image_filepath, "../images_segm") # out directory os.makedirs(image_outpath, exist_ok=True) if os.path.isdir(image_filepath): for id, imfile in enumerate( sorted(glob.glob(os.path.join( image_filepath, '*.jpg')))): # assuming jpg images
from detectron2.data import DatasetCatalog, MetadataCatalog import copy from .build import DATASET_MAPPER_REGISTRY @DATASET_MAPPER_REGISTRY.register() class NoneMapper(object): def __init__(self, cfg, **kwargs): pass def __call__(self, dataset_dict): dataset_dict = copy.deepcopy( dataset_dict) # it will be modified by code below return dataset_dict DatasetCatalog.register('NoneDataset', (lambda: [{'test': 'test'}] * 10)) if 'NoneDataset' not in MetadataCatalog.list(): MetadataCatalog.get('NoneDataset').set(**{'num_samples': 10})