def register_mot(basedir): basedir = os.path.expanduser(basedir) classes_path = os.path.join(basedir, "classes.json") class_names = ["BG"] + get_class_names(classes_path) for split in ["train", "val"]: name = "mot_" + split DatasetRegistry.register(name, lambda x=split: MotDataset(basedir, split=x)) DatasetRegistry.register_metadata(name, "class_names", class_names)
def do_evaluate(pred_config, output_file): num_tower = max(cfg.TRAIN.NUM_GPUS, 1) graph_funcs = MultiTowerOfflinePredictor(pred_config, list( range(num_tower))).get_predictors() for dataset in cfg.DATA.VAL: logger.info("Evaluating {} ...".format(dataset)) dataflows = [ get_eval_dataflow(dataset, shard=k, num_shards=num_tower) for k in range(num_tower) ] all_results = multithread_predict_dataflow(dataflows, graph_funcs) output = output_file + '-' + dataset DatasetRegistry.get(dataset).eval_inference_results( all_results, output)
def register_mot( base_dir: str, dataset_file: str = "dataset.json", classes_file: str = "classes.json", images_folder: str = "Images_md5" ): """Register a dataset to the registry. See `MotDataset` for more details on arguments. """ base_dir = os.path.expanduser(base_dir) classes_path = os.path.join(base_dir, "classes.json") class_names = ["BG"] + get_class_names(classes_path) for split in ["train", "val"]: name = "mot_" + split DatasetRegistry.register(name, lambda x=split: MotDataset(base_dir, split=x)) DatasetRegistry.register_metadata(name, "class_names", class_names)
def _eval(self): logdir = self._output_dir if cfg.TRAINER == 'replicated': all_results = multithread_predict_dataflow(self.dataflows, self.predictors) else: filenames = [ os.path.join( logdir, 'outputs{}-part{}.json'.format(self.global_step, rank)) for rank in range(hvd.local_size()) ] if self._horovod_run_eval: local_results = predict_dataflow(self.dataflow, self.predictor) fname = filenames[hvd.local_rank()] with open(fname, 'w') as f: json.dump(local_results, f) self.barrier.eval() if hvd.rank() > 0: return all_results = [] for fname in filenames: with open(fname, 'r') as f: obj = json.load(f) all_results.extend(obj) os.unlink(fname) scores = DatasetRegistry.get( self._eval_dataset).eval_inference_results(all_results) for k, v in scores.items(): self.trainer.monitors.put_scalar(self._eval_dataset + '-' + k, v)
def print_class_histogram(roidbs): """ Args: roidbs (list[dict]): the same format as the output of `training_roidbs`. """ class_names = DatasetRegistry.get_metadata(cfg.DATA.TRAIN[0], 'class_names') # labels are in [1, NUM_CATEGORY], hence +2 for bins hist_bins = np.arange(cfg.DATA.NUM_CATEGORY + 2) # Histogram of ground-truth objects gt_hist = np.zeros((cfg.DATA.NUM_CATEGORY + 1, ), dtype=np.int) for entry in roidbs: # filter crowd? gt_inds = np.where((entry["class"] > 0) & (entry["is_crowd"] == 0))[0] gt_classes = entry["class"][gt_inds] gt_hist += np.histogram(gt_classes, bins=hist_bins)[0] data = list( itertools.chain(*[[class_names[i + 1], v] for i, v in enumerate(gt_hist[1:])])) COL = min(6, len(data)) total_instances = sum(data[1::2]) data.extend([None] * ((COL - len(data) % COL) % COL)) data.extend(["total", total_instances]) data = itertools.zip_longest(*[data[i::COL] for i in range(COL)]) # the first line is BG table = tabulate(data, headers=["class", "#box"] * (COL // 2), tablefmt="pipe", stralign="center", numalign="left") logger.info("Ground-Truth category distribution:\n" + colored(table, "cyan"))
def get_eval_dataflow(name, shard=0, num_shards=1): """ Args: name (str): name of the dataset to evaluate shard, num_shards: to get subset of evaluation data """ roidbs = DatasetRegistry.get(name).inference_roidbs() logger.info("Found {} images for inference.".format(len(roidbs))) num_imgs = len(roidbs) img_per_shard = num_imgs // num_shards img_range = (shard * img_per_shard, (shard + 1) * img_per_shard if shard + 1 < num_shards else num_imgs) # no filter for training ds = DataFromListOfDict(roidbs[img_range[0]:img_range[1]], ["file_name", "image_id"]) def f(fname): im = cv2.imread(fname, cv2.IMREAD_COLOR) assert im is not None, fname return im ds = MapDataComponent(ds, f, 0) # Evaluation itself may be multi-threaded, therefore don't add prefetch here. return ds
def get_train_dataflow(): """ Return a training dataflow. Each datapoint consists of the following: An image: (h, w, 3), 1 or more pairs of (anchor_labels, anchor_boxes): anchor_labels: (h', w', NA) anchor_boxes: (h', w', NA, 4) gt_boxes: (N, 4) gt_labels: (N,) If MODE_MASK, gt_masks: (N, h, w) """ roidbs = list( itertools.chain.from_iterable( DatasetRegistry.get(x).training_roidbs() for x in cfg.DATA.TRAIN)) print_class_histogram(roidbs) # Filter out images that have no gt boxes, but this filter shall not be applied for testing. # The model does support training with empty images, but it is not useful for COCO. num = len(roidbs) roidbs = list( filter(lambda img: len(img["boxes"][img["is_crowd"] == 0]) > 0, roidbs)) logger.info( "Filtered {} images which contain no non-crowd groudtruth boxes. Total #images for training: {}" .format(num - len(roidbs), len(roidbs))) ds = DataFromList(roidbs, shuffle=True) preprocess = TrainingDataPreprocessor(cfg) if cfg.DATA.NUM_WORKERS > 0: if cfg.TRAINER == "horovod": buffer_size = cfg.DATA.NUM_WORKERS * 10 # one dataflow for each process, therefore don't need large buffer ds = MultiThreadMapData(ds, cfg.DATA.NUM_WORKERS, preprocess, buffer_size=buffer_size) # MPI does not like fork() else: buffer_size = cfg.DATA.NUM_WORKERS * 20 ds = MultiProcessMapData(ds, cfg.DATA.NUM_WORKERS, preprocess, buffer_size=buffer_size) else: ds = MapData(ds, preprocess) return ds
def finalize_configs(is_training): """ Run some sanity checks, and populate some configs from others """ _C.freeze(False) # populate new keys now if isinstance(_C.DATA.VAL, six.string_types ): # support single string (the typical case) as well _C.DATA.VAL = (_C.DATA.VAL, ) if isinstance(_C.DATA.TRAIN, six.string_types): # support single string _C.DATA.TRAIN = (_C.DATA.TRAIN, ) # finalize dataset definitions ... from mot.object_detection.dataset import DatasetRegistry datasets = list(_C.DATA.TRAIN) + list(_C.DATA.VAL) _C.DATA.CLASS_NAMES = DatasetRegistry.get_metadata(datasets[0], "class_names") _C.DATA.NUM_CATEGORY = len(_C.DATA.CLASS_NAMES) - 1 assert _C.BACKBONE.NORM in ['FreezeBN', 'SyncBN', 'GN', 'None'], _C.BACKBONE.NORM if _C.BACKBONE.NORM != 'FreezeBN': assert not _C.BACKBONE.FREEZE_AFFINE assert _C.BACKBONE.FREEZE_AT in [0, 1, 2] _C.RPN.NUM_ANCHOR = len(_C.RPN.ANCHOR_SIZES) * len(_C.RPN.ANCHOR_RATIOS) assert len(_C.FPN.ANCHOR_STRIDES) == len(_C.RPN.ANCHOR_SIZES) # image size into the backbone has to be multiple of this number _C.FPN.RESOLUTION_REQUIREMENT = _C.FPN.ANCHOR_STRIDES[ 3] # [3] because we build FPN with features r2,r3,r4,r5 if _C.MODE_FPN: size_mult = _C.FPN.RESOLUTION_REQUIREMENT * 1. _C.PREPROC.MAX_SIZE = np.ceil( _C.PREPROC.MAX_SIZE / size_mult) * size_mult assert _C.FPN.PROPOSAL_MODE in ['Level', 'Joint'] assert _C.FPN.FRCNN_HEAD_FUNC.endswith('_head') assert _C.FPN.MRCNN_HEAD_FUNC.endswith('_head') assert _C.FPN.NORM in ['None', 'GN'] if _C.FPN.CASCADE: # the first threshold is the proposal sampling threshold assert _C.CASCADE.IOUS[0] == _C.FRCNN.FG_THRESH assert len(_C.CASCADE.BBOX_REG_WEIGHTS) == len(_C.CASCADE.IOUS) if is_training: train_scales = _C.PREPROC.TRAIN_SHORT_EDGE_SIZE if isinstance( train_scales, (list, tuple)) and train_scales[1] - train_scales[0] > 100: # don't autotune if augmentation is on os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0' os.environ['TF_AUTOTUNE_THRESHOLD'] = '1' assert _C.TRAINER in ['horovod', 'replicated'], _C.TRAINER lr = _C.TRAIN.LR_SCHEDULE if isinstance(lr, six.string_types): if lr.endswith("x"): LR_SCHEDULE_KITER = { "{}x".format(k): [180 * k - 120, 180 * k - 40, 180 * k] for k in range(2, 10) } LR_SCHEDULE_KITER["1x"] = [120, 160, 180] _C.TRAIN.LR_SCHEDULE = [ x * 1000 for x in LR_SCHEDULE_KITER[lr] ] else: _C.TRAIN.LR_SCHEDULE = eval(lr) # setup NUM_GPUS if _C.TRAINER == 'horovod': import horovod.tensorflow as hvd ngpu = hvd.size() logger.info("Horovod Rank={}, Size={}, LocalRank={}".format( hvd.rank(), hvd.size(), hvd.local_rank())) else: assert 'OMPI_COMM_WORLD_SIZE' not in os.environ ngpu = get_num_gpu() assert ngpu > 0, "Has to train with GPU!" assert ngpu % 8 == 0 or 8 % ngpu == 0, "Can only train with 1,2,4 or >=8 GPUs, but found {} GPUs".format( ngpu) else: # autotune is too slow for inference os.environ['TF_CUDNN_USE_AUTOTUNE'] = '0' ngpu = get_num_gpu() if _C.TRAIN.NUM_GPUS is None: _C.TRAIN.NUM_GPUS = ngpu else: if _C.TRAINER == 'horovod': assert _C.TRAIN.NUM_GPUS == ngpu else: assert _C.TRAIN.NUM_GPUS <= ngpu _C.freeze() logger.info("Config: ------------------------------------------\n" + str(_C))