def _set_data_loaders(self): mode_ids = {'train': 0, 'val': 1, 'test': 2} annotations = np.array(load_annotations( self._task, self._dataset, self._filter_duplicate_rels, self._filter_multiple_preds)[0]) split_ids = np.array([anno['split_id'] for anno in annotations]) if self._dataset != 'VRD': datasets = { split: SGGDataset( annotations[split_ids == split_id].tolist(), self.config, self.features) for split, split_id in mode_ids.items() } else: datasets = { 'train': SGGDataset( annotations[split_ids == 0].tolist() + annotations[split_ids == 1].tolist(), self.config, self.features), 'val': SGGDataset( annotations[split_ids == 2].tolist(), self.config, self.features), 'test': SGGDataset( annotations[split_ids == 2].tolist(), self.config, self.features) } self._data_loaders = { split: SGGDataLoader( datasets[split], batch_size=self._batch_size, shuffle=split == 'train', num_workers=self._num_workers, drop_last=split in {'train', 'val'}, collate_fn=lambda data: sgg_collate_fn(data, self.features), use_cuda=self._use_cuda) for split in mode_ids }
def __init__(self, mode='relationship'): """Initialize model parameters and load annotations.""" self._mode = mode self._created_images_path = CONFIG['created_images_path'] self._orig_images_path = CONFIG['orig_images_path'] if not os.path.exists(self._created_images_path): os.mkdir(self._created_images_path) self._annotations = load_annotations(mode)
def test(self, batch_size=100, test_mode='relationship'): """Test a neural network.""" print("Testing %s on VRD." % (self.net_name)) self.net.eval() data_loader = self._set_test_data_loader( batch_size=batch_size, test_mode=test_mode).eval() if self.use_cuda: self.net.cuda() data_loader.cuda() scores, boxes, labels = {}, {}, {} for batch in data_loader.get_batches(): outputs = self._net_outputs(data_loader, 0, batch) filenames = data_loader.get_files(0, batch) scores.update({ filename: np.array(score_vec) for filename, score_vec in zip(filenames, outputs.cpu().detach().numpy().tolist()) }) boxes.update(data_loader.get_boxes(0, batch)) labels.update(data_loader.get_labels(0, batch)) debug_scores = { filename: np.argmax(scores[filename]) for filename in scores } annotations = load_annotations('test') debug_labels = { rel['filename'][:rel['filename'].rfind('.')]: rel['predicate_id'] for anno in annotations for rel in anno['relationships'] } debug_annos = { rel['filename'][:rel['filename'].rfind('.')]: ( rel, scores[rel['filename'][:rel['filename'].rfind('.')]].tolist() ) for anno in annotations for rel in anno['relationships'] if rel['filename'][:rel['filename'].rfind('.')] in scores } with open(self.net_name + '.json', 'w') as fid: json.dump(debug_annos, fid) print(sum( 1 for name in debug_scores if debug_scores[name] == debug_labels[name])) with open(self.net_name + '.txt', 'w') as fid: fid.write(json.dumps([ name for name in debug_scores if debug_scores[name] == debug_labels[name]])) for mode in ['relationship', 'unseen', 'seen']: for keep in [1, 70]: print( 'Recall@50-100 (top-%d) %s:' % (keep, mode), evaluate_relationship_recall( scores, boxes, labels, keep, mode ) )
def __init__(self, dataset): """Initialize evaluator setup for this dataset.""" self.reset() # Ground-truth labels and boxes annos, _ = load_annotations('preddet', dataset) self._annos = { anno['filename']: anno for anno in annos if anno['split_id'] == 2 }
def _reset(self, mode): """Reset loader.""" self._mode['current'] = mode self._annotations = load_annotations(self._mode['current'], self._json_path) self._set_targets() self._set_boxes() self._set_labels() self._set_probabilities() self._set_subject_object_one_hot_encodings() self._set_subject_object_embeddings() self._files = list(self._labels.keys()) self._epoch = -1
def gt_labels_and_bboxes(mode): """ Return ground truth labels and bounding boxes. - gt_labels: dict of list of (N, 3) arrays (subj, pred, obj) - gt_bboxes: dict of list of (N, 2, 4) arrays (boxes) """ if mode == 'relationship': annotations = load_annotations('test') else: annotations = load_annotations(mode) gt_labels = { anno['filename'][:anno['filename'].rfind('.')]: np.array([[rel['subject_id'], rel['predicate_id'], rel['object_id']] for rel in anno['relationships']]) for anno in annotations } gt_bboxes = { anno['filename'][:anno['filename'].rfind('.')]: np.array([[rel['subject_box'], rel['object_box']] for rel in anno['relationships']]) for anno in annotations } return gt_labels, gt_bboxes
def __init__(self, dataset): """Initialize evaluator setup for this dataset.""" self._recall_types = np.array([20, 50, 100]) # R@20, 50, 100 self._max_recall = self._recall_types[-1] self.reset() # Ground-truth labels and boxes annos, zeroshot_annos = load_annotations('preddet', dataset) self._annos = { 'full': { anno['filename']: anno for anno in annos if anno['split_id'] == 2 }, 'zeroshot': {anno['filename']: anno for anno in zeroshot_annos} }