def __init__(self, sess, model, dataset, opt, model_opt, output_folder, threshold_list, analyzer_names, foreground_folder=None, render_gt=False, render_output=False, output_count=False): outputs = ['y_out', 's_out'] if not os.path.exists(output_folder): os.makedirs(output_folder) if threshold_list is None: threshold_list = np.arange(10) * 0.1 if analyzer_names is None: analyzer_names = [ 'sbd', 'wt_cov', 'unwt_cov', 'fg_dice', 'fg_iou', 'fg_iou_all', 'bg_iou_all', 'avg_fp', 'avg_fn', 'avg_pr', 'avg_re', 'obj_pr', 'obj_re', 'count_acc', 'count_mse', 'dic', 'dic_abs' ] self.output_folder = output_folder self.threshold_list = threshold_list self.analyzer_names = analyzer_names self.foreground_folder = foreground_folder self.analyzers = [] self.render_gt = render_gt if render_gt: self.gt_render = RenderGroundtruthInstanceAnalyzer( os.path.join(output_folder, 'gt'), dataset) self.render_output = render_output self.output_count = output_count # Create a set of analyzers for each threshold. for tt in threshold_list: _analyzers = [] thresh_suffix = ' {:.2f}'.format(tt) thresh_folder = '{:02d}'.format(int(tt * 100)) for name in analyzer_names: fname = os.path.join(output_folder, '{}.csv'.format(name)) _analyzers.append( create_analyzer(name, display_name=name + thresh_suffix, fname=fname)) if output_folder is not None: if render_output: _analyzers.append( RenderInstanceAnalyzer( os.path.join(output_folder, thresh_folder), dataset)) if output_count: _analyzers.append( CountAnalyzer( os.path.join(output_folder, thresh_folder, 'count.csv'))) self.analyzers.append(_analyzers) super(EvalRunner, self).__init__(sess, model, dataset, opt, model_opt, outputs)
def __init__(self, sess, model, dataset, opt, model_opt, output_folder, threshold_list, analyzer_names, split, foreground_folder=None): outputs = ['y_out'] if opt['split_id'] == -1: start_idx = -1 end_idx = -1 else: start_idx = opt['split_id'] * opt['num_split'] end_idx = (opt['split_id'] + 1) * opt['num_split'] if output_folder is not None: if not os.path.exists(output_folder): os.makedirs(output_folder) else: fname = None raise Exception('not output') if threshold_list is None: threshold_list = np.arange(10) * 0.1 if analyzer_names is None: analyzer_names = [ 'sbd', 'wt_cov', 'unwt_cov', 'fg_dice', 'fg_iou', 'fg_iou_all', 'bg_iou_all', 'avg_fp', 'avg_fn', 'avg_pr', 'avg_re', 'obj_pr', 'obj_re', 'count_acc', 'count_mse', 'dic', 'dic_abs' ] self.output_folder = output_folder self.foreground_folder = foreground_folder self.threshold_list = threshold_list self.analyzer_names = analyzer_names self.analyzers = [] self.split = split self.gt_render = RenderGroundtruthInstanceAnalyzer( os.path.join(output_folder, 'gt'), dataset) for tt in threshold_list: _analyzers = [] thresh_suffix = ' {:.2f}'.format(tt) thresh_folder = '{:02d}'.format(int(tt * 100)) for name in analyzer_names: fname = os.path.join(output_folder, '{}.csv'.format(name)) _analyzers.append( create_analyzer(name, display_name=name + thresh_suffix, fname=fname)) if output_folder is not None: if dataset.get_name() == 'cityscapes': _analyzers.append( RenderCityScapesOutputAnalyzer( os.path.join(output_folder, 'cityscapes'), dataset)) sem_labels = [ 'person', 'rider', 'car', 'truck', 'bus', 'train', 'moto', 'bike' ] else: sem_labels = None _analyzers.append( RenderInstanceAnalyzer(os.path.join( output_folder, thresh_folder), dataset, semantic_labels=sem_labels)) _analyzers.append( CountAnalyzer( os.path.join(output_folder, thresh_folder, 'count.csv'))) self.analyzers.append(_analyzers) opt['batch_size'] = 1 # Set batch size to 1. super(CityscapesEvalRunner, self).__init__(sess, model, dataset, opt, model_opt, outputs, start_idx=start_idx, end_idx=end_idx)
class EvalRunner(OneTimeEvalBase): def __init__(self, sess, model, dataset, opt, model_opt, output_folder, threshold_list, analyzer_names, foreground_folder=None, render_gt=False, render_output=False, output_count=False): outputs = ['y_out', 's_out'] if not os.path.exists(output_folder): os.makedirs(output_folder) if threshold_list is None: threshold_list = np.arange(10) * 0.1 if analyzer_names is None: analyzer_names = [ 'sbd', 'wt_cov', 'unwt_cov', 'fg_dice', 'fg_iou', 'fg_iou_all', 'bg_iou_all', 'avg_fp', 'avg_fn', 'avg_pr', 'avg_re', 'obj_pr', 'obj_re', 'count_acc', 'count_mse', 'dic', 'dic_abs' ] self.output_folder = output_folder self.threshold_list = threshold_list self.analyzer_names = analyzer_names self.foreground_folder = foreground_folder self.analyzers = [] self.render_gt = render_gt if render_gt: self.gt_render = RenderGroundtruthInstanceAnalyzer( os.path.join(output_folder, 'gt'), dataset) self.render_output = render_output self.output_count = output_count # Create a set of analyzers for each threshold. for tt in threshold_list: _analyzers = [] thresh_suffix = ' {:.2f}'.format(tt) thresh_folder = '{:02d}'.format(int(tt * 100)) for name in analyzer_names: fname = os.path.join(output_folder, '{}.csv'.format(name)) _analyzers.append( create_analyzer(name, display_name=name + thresh_suffix, fname=fname)) if output_folder is not None: if render_output: _analyzers.append( RenderInstanceAnalyzer( os.path.join(output_folder, thresh_folder), dataset)) if output_count: _analyzers.append( CountAnalyzer( os.path.join(output_folder, thresh_folder, 'count.csv'))) self.analyzers.append(_analyzers) super(EvalRunner, self).__init__(sess, model, dataset, opt, model_opt, outputs) def read_foreground(self, idx, y_gt=None): if self.foreground_folder is None: return None else: fg = [] for ii in idx: fg_fname = os.path.join(self.foreground_folder, self.dataset.get_fname(ii)) fg_ = cv2.imread(fg_fname).astype('float32').max( axis=2) / 255.0 fg.append(fg_) return fg def write_log(self, results): """Process results Args: results: y_out, s_out """ self.log.info("----------------------- write_log-----\n") self.opt['Total_time'].append(results['step_time']) self.log.info( '----------------------- results step_time --- {:.2f}ms'.format( results['step_time'])) # self.loggers['step_time'].add(self.step.get(), results['step_time']) inp = results['_batches'][0] y_gt_h = self.dataset.get_full_size_labels( inp['idx_map'], timespan=results['y_out'].shape[1]) y_out = results['y_out'] s_out = results['s_out'] # Multi-class if len(s_out.shape) == 3: s_out = s_out[:, :, 0] y_out, s_out = pp.apply_confidence(y_out, s_out) fg = self.read_foreground(inp['idx_map']) y_out = pp.upsample(y_out, y_gt_h) if fg is not None: if not self.opt['no_morph']: y_out = pp.morph(y_out) y_out = pp.apply_one_label(y_out) for tt, thresh in enumerate(self.threshold_list): y_out_thresh = pp.apply_threshold(y_out, thresh) if fg is not None: y_out_thresh = pp.mask_foreground(y_out_thresh, fg) y_out_thresh, s_out = pp.remove_tiny( y_out_thresh, s_out, threshold=self.opt['remove_tiny']) iou_pairwise = [ f_iou_pairwise(a, b) for a, b in zip(y_out_thresh, y_gt_h) ] results_thresh = { 'y_out': y_out_thresh, 'y_gt': y_gt_h, 's_out': s_out, 's_gt': inp['_s_gt'], 'iou_pairwise': iou_pairwise, 'indices': inp['idx_map'] } # Run each analyzer. [aa.stage(results_thresh) for aa in self.analyzers[tt]] # Plot groundtruth. if self.render_gt: self.gt_render.stage(results_thresh) def finalize(self): """Finalize report""" for tt, thresh in enumerate(self.threshold_list): [aa.finalize() for aa in self.analyzers[tt]]
class CityscapesEvalRunner(OneTimeEvalBase): def __init__(self, sess, model, dataset, opt, model_opt, output_folder, threshold_list, analyzer_names, split, foreground_folder=None): outputs = ['y_out'] if opt['split_id'] == -1: start_idx = -1 end_idx = -1 else: start_idx = opt['split_id'] * opt['num_split'] end_idx = (opt['split_id'] + 1) * opt['num_split'] if output_folder is not None: if not os.path.exists(output_folder): os.makedirs(output_folder) else: fname = None raise Exception('not output') if threshold_list is None: threshold_list = np.arange(10) * 0.1 if analyzer_names is None: analyzer_names = [ 'sbd', 'wt_cov', 'unwt_cov', 'fg_dice', 'fg_iou', 'fg_iou_all', 'bg_iou_all', 'avg_fp', 'avg_fn', 'avg_pr', 'avg_re', 'obj_pr', 'obj_re', 'count_acc', 'count_mse', 'dic', 'dic_abs' ] self.output_folder = output_folder self.foreground_folder = foreground_folder self.threshold_list = threshold_list self.analyzer_names = analyzer_names self.analyzers = [] self.split = split self.gt_render = RenderGroundtruthInstanceAnalyzer( os.path.join(output_folder, 'gt'), dataset) for tt in threshold_list: _analyzers = [] thresh_suffix = ' {:.2f}'.format(tt) thresh_folder = '{:02d}'.format(int(tt * 100)) for name in analyzer_names: fname = os.path.join(output_folder, '{}.csv'.format(name)) _analyzers.append( create_analyzer(name, display_name=name + thresh_suffix, fname=fname)) if output_folder is not None: if dataset.get_name() == 'cityscapes': _analyzers.append( RenderCityScapesOutputAnalyzer( os.path.join(output_folder, 'cityscapes'), dataset)) sem_labels = [ 'person', 'rider', 'car', 'truck', 'bus', 'train', 'moto', 'bike' ] else: sem_labels = None _analyzers.append( RenderInstanceAnalyzer(os.path.join( output_folder, thresh_folder), dataset, semantic_labels=sem_labels)) _analyzers.append( CountAnalyzer( os.path.join(output_folder, thresh_folder, 'count.csv'))) self.analyzers.append(_analyzers) opt['batch_size'] = 1 # Set batch size to 1. super(CityscapesEvalRunner, self).__init__(sess, model, dataset, opt, model_opt, outputs, start_idx=start_idx, end_idx=end_idx) def get_input_variables(self): variables = [ 'x_full', 'y_gt_full', 'y_out', 'd_out', 'y_out_ins', 's_out', 's_gt', 'idx_map' ] return set(variables) def _run_step(self, inp): return {} def get_batch(self, idx): """Transform a dataset get_batch into a dictionary to feed.""" idx_new = self.all_idx[idx] _batch = self.dataset.get_batch(idx_new, variables=self.input_variables) batch = {} x = _batch['x_full'] y_in = _batch['y_out_ins'] fg_in = _batch['y_out'] d_in = _batch['d_out'] s_out = _batch['s_out'] # [T, H, W, C] x = np.tile(np.expand_dims(x, 0), [y_in.shape[1], 1, 1, 1]) fg_in = np.tile(fg_in, [y_in.shape[1], 1, 1, 1]) d_in = np.tile(d_in, [y_in.shape[1], 1, 1, 1]) # [T, H, W] y_in = y_in.reshape([-1, y_in.shape[2], y_in.shape[3]]) batch['x'] = x batch['y_in'] = y_in batch['fg_in'] = fg_in batch['d_in'] = d_in batch['idx_map'] = _batch['idx_map'] batch['_y_gt_full'] = _batch['y_gt_full'] # [T, H, W] batch['_s_gt'] = _batch['s_gt'] batch['_s_out'] = _batch['s_out'] return batch def write_log(self, results): """Process results Args: results: y_out, s_out """ inp = results['_batches'][0] s_out = inp['_s_out'] conf = s_out s_gt = inp['_s_gt'] # [T] y_gt_h = [inp['_y_gt_full']] # [T, H, W] # Upsample the foreground semantic segmentation full_size = (y_gt_h[0].shape[1], y_gt_h[0].shape[2]) if self.opt['lrr_seg']: fg_h = [self.read_foreground_lrr(inp['idx_map'][0])] fg_mask = [1 - fg_h[0][:, :, 0]] else: fg = inp['fg_in'][0] # [1, H, W, C] fg_h = np.zeros([full_size[0], full_size[1], fg.shape[2]], dtype='float32') for cc in range(fg_h.shape[2]): fg_h[:, :, cc] = cv2.resize(fg[:, :, cc], (full_size[1], full_size[0])) FG_THRESHOLD = 0.3 if fg.shape[2] == 1: fg_mask = [ (np.squeeze(fg_h, 2) > FG_THRESHOLD).astype('float32') ] else: fg_mask = [(fg_h[:, :, 0] <= (1 - FG_THRESHOLD)).astype('float32')] fg_h = [fg_h] y_out = pp.upsample(np.expand_dims(inp['y_in'], 0), y_gt_h) y_out, conf_hard = pp.apply_confidence(y_out, conf) y_out = pp.apply_one_label(y_out) for tt, thresh in enumerate(self.threshold_list): y_out_thresh = pp.apply_threshold(y_out, thresh) y_out_thresh = pp.mask_foreground(y_out_thresh, fg_mask) # Remove tiny patches. y_out_thresh, conf = pp.remove_tiny( y_out_thresh, conf=conf, threshold=self.opt['remove_tiny']) results_thresh = { 'y_out': y_out_thresh, 'y_gt': y_gt_h, 's_out': conf_hard, 'conf': conf, 'y_in': fg_h, 's_gt': s_gt, 'indices': inp['idx_map'] } if not self.opt['no_iou']: results_thresh['iou_pairwise'] = [ f_iou_pairwise(a, b) for a, b in zip(y_out_thresh, y_gt_h) ] [aa.stage(results_thresh) for aa in self.analyzers[tt]] if self.opt['render_gt']: self.gt_render.stage(results_thresh) def finalize(self): """Finalize report""" for tt, thresh in enumerate(self.threshold_list): [aa.finalize() for aa in self.analyzers[tt]] def read_foreground_lrr(self, idx): # 14=car, 12=person, 13=rider, 18=motorcycle, 19=bicycle, 15=truck, 16=bus, # 17=train # LRR/val/munster/munster_000051_000019_ss.mat sem_ids = [12, 13, 14, 15, 16, 17, 18, 19] if self.split.startswith('train'): folder = 'train' elif self.split.startswith('val'): folder = 'val' elif self.split.startswith('test'): folder = 'test' runname = idx.split('_')[0] print(idx) matfn = '/ais/gobi4/mren/models/LRR/{}/{}/{}_ss.mat'.format( folder, runname, idx) fgraw = scipy.io.loadmat(matfn)['semanticPrediction'] fg = np.zeros(list(fgraw.shape) + [9], dtype='float32') for ii in range(8): fg[:, :, ii + 1] = (fgraw == sem_ids[ii]).astype('float32') fg[:, :, 0] = 1 - fg.max(axis=-1) return fg