Ejemplo n.º 1
0
def main(gt_fp: str):
    gt = OBBAnns(gt_fp)
    gt.load_annotations()

    root = split(gt_fp)[0]

    # First make perfect proposals
    bboxes = gt.ann_info[['bbox', 'cat_id', 'img_id']]
    proposals = {'proposals': bboxes.to_dict('records')}
    with open(join(root, 'proposals_perfect.json'), 'w') as prop_file:
        json.dump(proposals, prop_file)

    # Now randomly "forget" certain proposals
    selector = [
        True if random() > 0.2 else False for _ in range(len(gt.ann_info))
    ]

    bboxes = gt.ann_info[['bbox', 'cat_id',
                          'img_id']][selector].apply(fudge_bboxes,
                                                     1,
                                                     result_type='expand')

    proposals = {'proposals': bboxes.to_dict('records')}

    with open(join(root, 'proposals.json'), 'w') as prop_file:
        json.dump(proposals, prop_file)
Ejemplo n.º 2
0
    def load_annotations(self, ann_file):
        self.obb = OBBAnns(ann_file)
        self.obb.load_annotations()
        self.obb.set_annotation_set_filter(['deepscores'])
        # self.obb.set_class_blacklist(["staff"])
        self.cat_ids = list(self.obb.get_cats().keys())
        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
        self.label2cat = {v: k for k, v in self.cat2label.items()}
        self.CLASSES = tuple(
            [v["name"] for (k, v) in self.obb.get_cats().items()])
        self.img_ids = [id['id'] for id in self.obb.img_info]

        return self.obb.img_info
Ejemplo n.º 3
0
  def __init__(self, image_set, year, devkit_path=None):
    imdb.__init__(self, 'DeepScoresV2' + year + '_' + image_set)
    self._year = year
    self._devkit_path = self._get_default_path() if devkit_path is None \
      else devkit_path

    self._image_set = image_set

    self._data_path = self._devkit_path + "/images"

    self.blacklist = ["staff", 'legerLine']


    self.o = OBBAnns(self._devkit_path+'/deepscores_'+image_set+'.json')
    self.o.load_annotations()
    print(self.o.annotation_sets)
    self.o.set_annotation_set_filter(['deepscores'])
    self.o.set_class_blacklist(self.blacklist)

    self._classes = [v["name"] for (k, v) in self.o.get_cats().items()]
    self._class_ids = [k for (k, v) in self.o.get_cats().items()]

    self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
    self._class_ids_to_ind = dict(list(zip(self._class_ids, list(range(self.num_classes)))))
    self._ind_to_class_ids = {v: k for k, v in self._class_ids_to_ind.items()}

    self._image_index = self._load_image_set_index()

    # self.cat_ids = list(self.o.get_cats().keys())
    # self.cat2label = {
    #   cat_id: i
    #   for i, cat_id in enumerate(self.cat_ids)
    # }
    # self.label2cat = {v: k for k, v in self.cat2label.items()}
    # self.CLASSES = tuple([v["name"] for (k, v) in self.o.get_cats().items()])
    # self.img_ids = [id['id'] for id in self.o.img_info]


    self._image_ext = '.png'

    # Default to roidb handler
    self._roidb_handler = self.gt_roidb
    self._salt = str(uuid.uuid4())
    self._comp_id = 'comp4'

    # PASCAL specific config options
    self.config = {'cleanup': True,
                   'use_salt': True,
                   'use_diff': False,
                   'matlab_eval': False,
                   'rpn_file': None}
Ejemplo n.º 4
0
import shutil

import json
import numpy as np
from obb_anns import OBBAnns
from pathlib import Path

ann_in_1 = OBBAnns('data/deep_scores_dense/deepscores_test.json')
ann_in_2 = OBBAnns('../ili_subset/scores.json')
SUFFIX = '_scn'
ann_out_path = Path('..', 'ds_test_ili', 'deepscores_test.json')

ann_in_1.load_annotations()
ann_in_2.load_annotations()
target_ds_root = ann_out_path.parent

assert target_ds_root.exists(), f"Directory of the target dataset ({str(target_ds_root)}) must exist!"

# Make dirs
images_path = Path('images')
segmentation_path = Path('segmentation')
instance_path = Path('instance')
for subdir in [images_path, segmentation_path, instance_path]:
    (target_ds_root / subdir).mkdir(exist_ok=True)

print("Copying image files")
def copy_images(ann: OBBAnns, suffix: str = ''):
    ds_root = Path(ann.ann_file).parent
    for img_info in ann.img_info:
        img_path = Path(img_info['filename'])
        img_path_new = img_path.with_name(img_path.with_suffix('').name + suffix + img_path.suffix)
Ejemplo n.º 5
0
    parser.add_argument('PROPOSAL',
                        type=str,
                        nargs='?',
                        help='name of the proposals json')
    return parser.parse_args()


def extract_bbox_list():
    all_bboxes = []
    return all_bboxes


if __name__ == '__main__':
    args = parse_args()

    a = OBBAnns(join(args.ROOT, args.ANNS))
    a.load_annotations()
    a.set_annotation_set_filter(['deepscores'])

    np_annotations = np.stack(a.ann_info['a_bbox'])
    height = np_annotations[:, 3] - np_annotations[:, 1]
    width = np_annotations[:, 2] - np_annotations[:, 0]
    aspect_ratio = height / width
    area = a.ann_info['area']

    d = {
        'height': height,
        'width': width,
        'aspect_ratio': aspect_ratio,
        'area': area
    }
Ejemplo n.º 6
0
    parser = ArgumentParser(description='runs the obb_anns.py file')
    parser.add_argument('ROOT',
                        type=str,
                        help='path to the root of the dataset directory')
    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()
    root_dir = Path(args.ROOT)
    file_names_in_annotations = []
    num_ann_files = len(list(root_dir.glob("*.json")))

    for i, dataset_ann_fp in enumerate(root_dir.glob("*.json")):
        print(f'Checking file {i + 1} of {num_ann_files}...')
        a = OBBAnns(join(args.ROOT, dataset_ann_fp))
        a.load_annotations()
        a.set_annotation_set_filter(['deepscores'])

        for img in tqdm(a.img_info, unit='imgs'):
            file_names_in_annotations.append(img['filename'])
            try:
                b = a.get_anns(img_id=img['id'])
            except:
                print(f'{img["id"]} caused an exception')

    file_names_in_annotations = set(file_names_in_annotations)

    images_dir = root_dir / 'images_png'

    print("Checking if every image has its annotation in the dataset...")
Ejemplo n.º 7
0
class deep_scoresV2(imdb):
  def __init__(self, image_set, year, devkit_path=None):
    imdb.__init__(self, 'DeepScoresV2' + year + '_' + image_set)
    self._year = year
    self._devkit_path = self._get_default_path() if devkit_path is None \
      else devkit_path

    self._image_set = image_set

    self._data_path = self._devkit_path + "/images"

    self.blacklist = ["staff", 'legerLine']


    self.o = OBBAnns(self._devkit_path+'/deepscores_'+image_set+'.json')
    self.o.load_annotations()
    print(self.o.annotation_sets)
    self.o.set_annotation_set_filter(['deepscores'])
    self.o.set_class_blacklist(self.blacklist)

    self._classes = [v["name"] for (k, v) in self.o.get_cats().items()]
    self._class_ids = [k for (k, v) in self.o.get_cats().items()]

    self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
    self._class_ids_to_ind = dict(list(zip(self._class_ids, list(range(self.num_classes)))))
    self._ind_to_class_ids = {v: k for k, v in self._class_ids_to_ind.items()}

    self._image_index = self._load_image_set_index()

    # self.cat_ids = list(self.o.get_cats().keys())
    # self.cat2label = {
    #   cat_id: i
    #   for i, cat_id in enumerate(self.cat_ids)
    # }
    # self.label2cat = {v: k for k, v in self.cat2label.items()}
    # self.CLASSES = tuple([v["name"] for (k, v) in self.o.get_cats().items()])
    # self.img_ids = [id['id'] for id in self.o.img_info]


    self._image_ext = '.png'

    # Default to roidb handler
    self._roidb_handler = self.gt_roidb
    self._salt = str(uuid.uuid4())
    self._comp_id = 'comp4'

    # PASCAL specific config options
    self.config = {'cleanup': True,
                   'use_salt': True,
                   'use_diff': False,
                   'matlab_eval': False,
                   'rpn_file': None}


  def image_path_at(self, i):
    """
    Return the absolute path to image i in the image sequence.
    """
    return self.image_path_from_index(self._image_index[i])

  def image_path_from_index(self, index):
    """
    Construct an image path from the image's "index" identifier.
    """
    image_path = os.path.join(self._data_path, self.o.get_imgs(ids=[index])[0]["filename"])
    assert os.path.exists(image_path), \
      'Path does not exist: {}'.format(image_path)
    return image_path

  def _load_image_set_index(self):
    """
    Load the indexes listed in this dataset's image set file.
    """
    # Example path to image set file:
    image_index = [x["id"] for x in self.o.img_info]
    return image_index

  def _get_default_path(self):
    """
    Return the default path where PASCAL VOC is expected to be installed.
    """
    return os.path.join(cfg.DATA_DIR, 'DeepScores_' + self._year)

  def gt_roidb(self):
    """
    Return the database of ground-truth regions of interest.

    This function loads/saves from/to a cache file to speed up future calls.
    """
    cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
    # if os.path.exists(cache_file):
    #   with open(cache_file, 'rb') as fid:
    #     try:
    #       roidb = pickle.load(fid)
    #     except:
    #       roidb = pickle.load(fid, encoding='bytes')
    #   print('{} gt roidb loaded from {}'.format(self.name, cache_file))
    #   return roidb

    gt_roidb = [self._load_musical_annotation(index)
                for index in self.image_index]
    with open(cache_file, 'wb') as fid:
      pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
    print('wrote gt roidb to {}'.format(cache_file))

    return gt_roidb

  def rpn_roidb(self):
    if int(self._year) == 2017 or self._image_set != 'debug':
      gt_roidb = self.gt_roidb()
      rpn_roidb = self._load_rpn_roidb(gt_roidb)
      roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
    else:
      roidb = self._load_rpn_roidb(None)

    return roidb

  def _load_rpn_roidb(self, gt_roidb):
    filename = self.config['rpn_file']
    print('loading {}'.format(filename))
    assert os.path.exists(filename), \
      'rpn data not found at: {}'.format(filename)
    with open(filename, 'rb') as f:
      box_list = pickle.load(f)
    return self.create_roidb_from_box_list(box_list, gt_roidb)

  def _load_musical_annotation(self, index):
    """
    Load annotation info from obb_anns in the PASCAL VOC
    format.
    """


    anns = self.o.get_anns(img_id=index)
    boxes = anns['a_bbox']
    boxes = np.round(np.stack(boxes.to_numpy())).astype(np.uint16)

    gt_classes = np.squeeze(np.stack(anns['cat_id'].to_numpy()).astype(np.int32))
    gt_classes = np.array(list(map(self._class_ids_to_ind.get, gt_classes)))
    #blacklisted_anns = [x not in self.blacklist_index for x in gt_classes]
    #boxes = boxes[blacklisted_anns]
    #gt_classes = gt_classes[blacklisted_anns]

    num_objs = boxes.shape[0]
    overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)

    # "Seg" area for pascal is just the box area
    seg_areas = np.zeros((num_objs), dtype=np.float32)

    for ind in range(boxes.shape[0]):
      seg_areas = (boxes[ind,2]-boxes[ind,0]+1) *(boxes[ind,3]-boxes[ind,1]+1)
      overlaps[ind, gt_classes[ind]] = 1.0

    overlaps = scipy.sparse.csr_matrix(overlaps)
    max(gt_classes)
    return {'boxes': boxes,
            'gt_classes': gt_classes,
            'gt_overlaps': overlaps,
            'flipped': False,
            'seg_areas': seg_areas}

  def _get_comp_id(self):
    comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
               else self._comp_id)
    return comp_id

  def _get_voc_results_file_template(self):
    filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
    path = os.path.join(
      self._devkit_path,
      'results',
      'musical' + self._year,
      filename)
    return path

  def _write_voc_results_file(self, all_boxes):
   for cls_ind, cls in enumerate(self.classes):
      if cls == '__background__':
        continue
      print('Writing {} VOC results file'.format(cls))
      filename = self._get_voc_results_file_template().format(cls)
      with open(filename, 'wt') as f:
        for im_ind, index in enumerate(self.image_index):
          dets = all_boxes[cls_ind][im_ind]
          if dets == []:
            continue
          # the VOCdevkit expects 1-based indices
          for k in range(dets.shape[0]):
            f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
                    format(str(index), dets[k, -1],
                           dets[k, 0] + 1, dets[k, 1] + 1,
                           dets[k, 2] + 1, dets[k, 3] + 1))

  def _do_python_eval(self, output_dir='output', path=None):
    annopath = os.path.join(
      self._devkit_path,
      'segmentation_detection',
      'xml_annotations',
      '{:s}.xml')
    imagesetfile = os.path.join(
      self._devkit_path,
      'train_val_test',
      self._image_set + '.txt')
    cachedir = os.path.join(self._devkit_path, 'annotations_cache')
    aps = []
    # The PASCAL VOC metric changed in 2010
    use_07_metric = True if int(self._year) < 2010 else False
    print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
    if not os.path.isdir(output_dir):
      os.mkdir(output_dir)
    for i, cls in enumerate(self._classes):
      if cls == '__background__':
        continue
      filename = self._get_voc_results_file_template().format(cls)
      rec, prec, ap = voc_eval(
        filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
        use_07_metric=use_07_metric)
      aps += [ap]
      print(('AP for {} = {:.4f}'.format(cls, ap)))
      with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
        pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
    print(('Mean AP = {:.4f}'.format(np.mean(aps))))
    print('~~~~~~~~')
    print('Results:')
    # open the file where we want to save the results
    if path is not None:
      res_file = open(os.path.join('/DeepWatershedDetection' + path, 'res.txt'),"w+")
      len_ap = len(aps)
      sum_aps = 0
      present = 0
      for i in range(len_ap):
        print(('{:.3f}'.format(aps[i])))
        if i not in [26, 32,  35, 36, 39, 45, 48, 67, 68, 74, 89, 99, 102, 118]:
          if math.isnan(aps[i]):
            res_file.write(str(0) + "\n")
          else:
            res_file.write(('{:.3f}'.format(aps[i])) + "\n")
            sum_aps += aps[i]
          present += 1
      res_file.write('\n\n\n')
      res_file.write("Mean Average Precision: " + str(sum_aps / float(present)))
      res_file.close()

    print(('{:.3f}'.format(np.mean(aps))))
    print('~~~~~~~~')
    print('')
    print('--------------------------------------------------------------')
    print('Results computed with the **unofficial** Python eval code.')
    print('Results should be very close to the official MATLAB eval code.')
    print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
    print('-- Thanks, The Management')
    print('--------------------------------------------------------------')

  def _do_matlab_eval(self, output_dir='output'):
    print('-----------------------------------------------------')
    print('Computing results with the official MATLAB eval code.')
    print('-----------------------------------------------------')
    path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
                        'VOCdevkit-matlab-wrapper')
    cmd = 'cd {} && '.format(path)
    cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
    cmd += '-r "dbstop if error; '
    cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
      .format(self._devkit_path, self._get_comp_id(),
              self._image_set, output_dir)
    print(('Running:\n{}'.format(cmd)))
    status = subprocess.call(cmd, shell=True)

  def evaluate_detections(self, all_boxes, output_dir, path=None):
    self._write_voc_results_file(all_boxes)
    self._do_python_eval(output_dir, path)
    if self.config['matlab_eval']:
      self._do_matlab_eval(output_dir)
    if self.config['cleanup']:
      for cls in self._classes:
        if cls == '__background__':
          continue
        filename = self._get_voc_results_file_template().format(cls)
        os.remove(filename)

  def competition_mode(self, on):
    if on:
      self.config['use_salt'] = False
      self.config['cleanup'] = False
    else:
      self.config['use_salt'] = True
      self.config['cleanup'] = True

  def prepare_json_dict(self, results):
      json_results = {"annotation_set": "deepscores", "proposals": []}
      for idx in range(len(results)):
          img_id = self._image_index[idx]
          result = results[idx]
          for label in range(len(result)):
              bboxes = result[label]
              for i in range(bboxes.shape[0]):
                  data = dict()
                  data['img_id'] = img_id
                  data['bbox'] = [str(nr) for nr in bboxes[i][0:-1]]
                  data['score'] = str(bboxes[i][-1])
                  data['cat_id'] = self._ind_to_class_ids[label]
                  json_results["proposals"].append(data)
      return json_results

  def write_results_json(self, results, filename=None):
      if filename is None:
          filename = "deepscores_results.json"
      json_results = self.prepare_json_dict(results)

      with open(filename, "w") as fo:
          json.dump(json_results, fo)

      return filename

  def evaluate(self,
               results,
               metric='bbox',
               logger=None,
               jsonfile_prefix=None,
               classwise=True,
               proposal_nums=(100, 300, 1000),
               iou_thrs=np.arange(0.5, 0.96, 0.05),
               average_thrs=False,
               store_pickle=True):
      """Evaluation in COCO protocol.

      Args:
          results (list): Testing results of the dataset.
          metric (str | list[str]): Metrics to be evaluated.
          logger (logging.Logger | str | None): Logger used for printing
              related information during evaluation. Default: None.
          jsonfile_prefix (str | None): The prefix of json files. It includes
              the file path and the prefix of filename, e.g., "a/b/prefix".
              If not specified, a temp file will be created. Default: None.
          classwise (bool): Whether to evaluating the AP for each class.
          proposal_nums (Sequence[int]): Proposal number used for evaluating
              recalls, such as recall@100, recall@1000.
              Default: (100, 300, 1000).
          iou_thrs (Sequence[float]): IoU threshold used for evaluating
              recalls. If set to a list, the average recall of all IoUs will
              also be computed. Default: 0.5.

      Returns:
          dict[str: float]
      """

      metrics = metric if isinstance(metric, list) else [metric]
      allowed_metrics = ['bbox']
      for metric in metrics:
          if metric not in allowed_metrics:
              raise KeyError(f'metric {metric} is not supported')

      filename = self.write_results_json(results)

      self.o.load_proposals(filename)
      metric_results = self.o.calculate_metrics(iou_thrs=iou_thrs, classwise=classwise, average_thrs=average_thrs)

      # import pickle
      # with open('evaluation.pickle', 'rb') as input_file:
      #     metric_results = pickle.load(input_file)

      # add Name
      metric_results = {self._classes[self._class_ids_to_ind[key]]: value for (key, value) in metric_results.items()}

      # add occurences
      occurences_by_class = self.o.get_class_occurences()
      for (key, value) in metric_results.items():
          value.update(no_occurences=occurences_by_class[key])

      if store_pickle:
          import pickle
          pickle.dump(metric_results, open('evaluation_renamed.pickle', 'wb'))
      return metric_results
Ejemplo n.º 8
0
from PIL.PngImagePlugin import PngImageFile
from obb_anns import OBBAnns
from pathlib import Path

# source_images = Path('..', 'ili_subset', 'images')
# source_images = Path('..', 'scanned_deepscore_images_png')
source_images = Path('..', 'scanned_deepscore_images_clean')
# source_index = Path('..', 'clean_scans')
# source_index = Path('..', 'scanned_deepscore_images_png')
source_index = Path('..', 'scanned_deepscore_images_clean')
# target_ds = Path('..', 'ili_subset')
target_ds = Path('..', 'deepscores_scanned')
target_ds_file = target_ds / 'deepscores.json'

orig_ann_root = Path('data', 'deep_scores_dense')
orig_ann_train = OBBAnns(str(orig_ann_root / 'deepscores_train.json'))
orig_ann_train.load_annotations()
orig_ann_test = OBBAnns(str(orig_ann_root / 'deepscores_test.json'))
orig_ann_test.load_annotations()


def construct_reverse_lookup(ann: OBBAnns) -> dict:
    reverse_lookup = {}
    for entry in ann.img_info:
        reverse_lookup[entry['filename']] = entry
    return reverse_lookup


reverse_lookup_train = construct_reverse_lookup(orig_ann_train)
reverse_lookup_test = construct_reverse_lookup(orig_ann_test)
Ejemplo n.º 9
0
from obb_anns import OBBAnns
from argparse import ArgumentParser


def parse_args():
    parser = ArgumentParser(description='runs the obb_anns.py file')
    parser.add_argument('ROOT', type=str,
                        help='path to the root of the dataset directory')
    parser.add_argument('ANNS', type=str,
                        help='name of the annotation file to use')
    parser.add_argument('PROPOSAL', type=str, nargs='?',
                        help='name of the proposals json')
    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()

    a = OBBAnns(join(args.ROOT, args.ANNS))
    a.load_annotations()
    a.set_annotation_set_filter(['deepscores'])
    if args.PROPOSAL:
        a.load_proposals(join(args.ROOT, args.PROPOSAL))
    for i in range(len(a)):
        a.visualize(img_idx=i)
        # a.visualize(img_idx=i, img_dir='images_png')
        response = input('Press q to quit or enter to continue.')
        if response == 'q':
            break

Ejemplo n.º 10
0
def main(image_set, single_scale=False):
    # (1)
    if image_set == 'train':
        #dir_txt = os.path.join(dir_dataset, 'labelTxt', image_set)
        out_dir_json = os.path.join(dir_dataset, 'annotations', image_set)
        os.makedirs(out_dir_json, exist_ok=True)
        #txt2json(dir_txt, out_dir_json)

        o = OBBAnns(dir_dataset + 'deepscores_train.json')
        o.load_annotations()
        #print(o)
        cats = o.get_cats()
        img_idxs = [i for i in range(len(o.img_info))]
        imgs, anns = o.get_img_ann_pair(idxs=img_idxs,
                                        ann_set_filter="deepscores")
        filenames = []

        for img in anns:
            objs = []
            img_np = np.array(img)
            for object_instance in img_np:
                obj = dict()
                coord = np.array(object_instance[1],
                                 dtype=np.float32).reshape([4, 2])
                bbox = cv.boxPoints(cv.minAreaRect(coord)).astype(
                    np.int).tolist()
                obj['name'] = cats[object_instance[2][0]]['name']
                obj['bbox'] = bbox
                objs.append(obj)
            if objs:
                filename = o.get_imgs(ids=[int(img_np[0][4])])[0]['filename']
                filenames.append(os.path.splitext(filename)[0])
                json_filename = os.path.splitext(filename)[0] + '.json'
                json.dump(objs,
                          open(os.path.join(out_dir_json, json_filename),
                               'wt'),
                          indent=2)

        # Split images and json annotations in train and val files
        out_dir_train = os.path.join(dir_dataset, 'images2', 'train')
        out_dir_val = os.path.join(dir_dataset, 'images2', 'val')
        out_dir_val_json = os.path.join(dir_dataset, 'annotations', 'val')
        out_dir_test = os.path.join(dir_dataset, 'images2', 'test')
        os.makedirs(out_dir_train, exist_ok=True)
        os.makedirs(out_dir_val, exist_ok=True)
        os.makedirs(out_dir_test, exist_ok=True)
        os.makedirs(out_dir_val_json, exist_ok=True)

        filenames_train, filenames_val = train_test_split(filenames,
                                                          test_size=272,
                                                          random_state=8)

        for filename in os.listdir(os.path.join(dir_dataset, 'images')):
            if os.path.splitext(filename)[0] in filenames_train:
                shutil.move(os.path.join(dir_dataset, 'images', filename),
                            os.path.join(out_dir_train, filename))
            elif os.path.splitext(filename)[0] in filenames_val:
                shutil.move(os.path.join(dir_dataset, 'images', filename),
                            os.path.join(out_dir_val, filename))
                shutil.move(
                    os.path.join(out_dir_json,
                                 os.path.splitext(filename)[0] + '.json'),
                    os.path.join(out_dir_val_json,
                                 os.path.splitext(filename)[0] + '.json'))
            else:
                shutil.move(os.path.join(dir_dataset, 'images', filename),
                            os.path.join(out_dir_test, filename))

        os.rmdir(os.path.join(dir_dataset, 'images'))
        os.rename(os.path.join(dir_dataset, 'images2'),
                  os.path.join(dir_dataset, 'images'))

    # (2)
    pairs = []
    for filename in os.listdir(os.path.join(dir_dataset, 'images', image_set)):
        anno = os.path.join(dir_dataset, 'annotations', image_set,
                            filename.replace('png', 'json'))
        img = os.path.join(dir_dataset, 'images', image_set, filename)
        if not os.path.exists(anno):
            anno = None
        pairs.append([img, anno])

    overlap = 0.25
    sizes = [768] if single_scale else [512, 768, 1024, 1536]
    save_empty = image_set == 'test'
    image_set = f"{image_set}-{sizes[0]}" if single_scale else image_set

    out_dir_images = os.path.join(dir_dataset, 'images', f'{image_set}-crop')
    out_dir_annos = os.path.join(dir_dataset, 'annotations',
                                 f'{image_set}-crop')

    cropper = Cropper(sizes, overlap)
    cropper.crop_batch(pairs, out_dir_images, out_dir_annos, save_empty)

    # (3)
    pairs = []
    for filename in os.listdir(out_dir_images):
        img = os.path.join('images', f'{image_set}-crop', filename)
        anno = None if image_set == 'test' else os.path.join(
            'annotations', f'{image_set}-crop', filename.replace(
                'jpg', 'json'))
        pairs.append([img, anno])
    out_dir = os.path.join(dir_dataset, 'image-sets')
    os.makedirs(out_dir, exist_ok=True)
    json.dump(pairs,
              open(os.path.join(out_dir, f'{image_set}.json'), 'wt'),
              indent=2)
Ejemplo n.º 11
0
from mmdet.apis import init_detector, inference_detector
import mmcv
import os
import cv2
from mmdet.core import rotated_box_to_poly_np
from mmcv.visualization import imshow_det_bboxes
import numpy as np
from obb_anns import OBBAnns

annotations_file = "data/deep_scores_dense/deepscores_test.json"
obb = OBBAnns(annotations_file)
obb.load_annotations()
obb.set_annotation_set_filter(['deepscores'])
CLASSES = tuple([v["name"] for (k, v) in obb.get_cats().items()])

config_file = 'DeepScoresV2_s2anet/fullrez_crop/s2anet_r50_fpn_1x_deepscoresv2_tugg_lowrez.py'
checkpoint_file = 'DeepScoresV2_s2anet/fullrez_crop/epoch_500.pth'

model_name = "s2anet_fullrez_crop"
images_folder = "/home/tugg/Documents/RealScores/Realworld_Test"

resize = 1.0

model = init_detector(config_file, checkpoint_file, device='cuda:0')

images = os.listdir(images_folder)
os.makedirs(os.path.join(images_folder, model_name), exist_ok=True)

for img in images:
    if os.path.isdir(os.path.join(images_folder, img)):
        continue
Ejemplo n.º 12
0
class DeepScoresV2Dataset(CocoDataset):
    def __init__(self,
                 ann_file,
                 pipeline,
                 classes=None,
                 data_root=None,
                 img_prefix='',
                 seg_prefix=None,
                 proposal_file=None,
                 test_mode=False,
                 filter_empty_gt=True,
                 use_oriented_bboxes=True):
        self.filter_empty_gt = filter_empty_gt
        super(DeepScoresV2Dataset,
              self).__init__(ann_file, pipeline, data_root, img_prefix,
                             seg_prefix, proposal_file, test_mode)
        #self.CLASSES = self.get_classes(classes)
        self.use_oriented_bboxes = use_oriented_bboxes

    @classmethod
    def get_classes(cls, classes=None):
        """Get class names of current dataset.
        Args:
            classes (Sequence[str] | str | None): If classes is None, use
                default CLASSES defined by builtin dataset. If classes is a
                string, take it as a file name. The file contains the name of
                classes where each line contains one class name. If classes is
                a tuple or list, override the CLASSES defined by the dataset.
        Returns:
            tuple[str] or list[str]: Names of categories of the dataset.
        """
        if classes is None:
            return cls.CLASSES

        if isinstance(classes, str):
            # take it as a file path
            class_names = mmcv.list_from_file(classes)
        elif isinstance(classes, (tuple, list)):
            class_names = classes
        else:
            raise ValueError(f'Unsupported type {type(classes)} of classes.')

        return class_names

    def load_annotations(self, ann_file):
        self.obb = OBBAnns(ann_file)
        self.obb.load_annotations()
        self.obb.set_annotation_set_filter(['deepscores'])
        # self.obb.set_class_blacklist(["staff"])
        self.cat_ids = list(self.obb.get_cats().keys())
        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
        self.label2cat = {v: k for k, v in self.cat2label.items()}
        self.CLASSES = tuple(
            [v["name"] for (k, v) in self.obb.get_cats().items()])
        self.img_ids = [id['id'] for id in self.obb.img_info]

        return self.obb.img_info

    def get_ann_info(self, idx):
        return self._parse_ann_info(*self.obb.get_img_ann_pair(idxs=[idx]))

    def _filter_imgs(self, min_size=32):
        valid_inds = []
        for i, img_info in enumerate(self.obb.img_info):
            if self.filter_empty_gt and len(img_info['ann_ids']) == 0:
                continue
            if min(img_info['width'], img_info['height']) >= min_size:
                valid_inds.append(i)
        return valid_inds

    def _parse_ann_info(self, img_info, ann_info):
        img_info, ann_info = img_info[0], ann_info[0]
        gt_bboxes = []
        gt_labels = []
        gt_bboxes_ignore = np.zeros((0, 8 if self.use_oriented_bboxes else 4),
                                    dtype=np.float32)

        for i, ann in ann_info.iterrows():
            # we have no ignore feature
            if ann['area'] <= 0:
                continue

            bbox = ann['o_bbox' if self.use_oriented_bboxes else 'a_bbox']
            gt_bboxes.append(bbox)
            gt_labels.append(self.cat2label[ann['cat_id'][0]])

        gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
        gt_labels = np.array(gt_labels, dtype=np.int64)

        ann = dict(bboxes=gt_bboxes,
                   labels=gt_labels,
                   bboxes_ignore=gt_bboxes_ignore,
                   masks=None,
                   seg_map=None)
        return ann

    def prepare_json_dict(self, results):
        json_results = {"annotation_set": "deepscores", "proposals": []}
        for idx in range(len(self)):
            img_id = self.img_ids[idx]
            result = results[idx]
            for label in range(len(result)):
                bboxes = result[label]
                for i in range(bboxes.shape[0]):
                    data = dict()
                    data['img_id'] = img_id

                    if len(bboxes[i]) == 8:
                        data['bbox'] = [str(nr) for nr in bboxes[i]]
                        data['score'] = 1
                    else:
                        data['bbox'] = [str(nr) for nr in bboxes[i][0:-1]]
                        data['score'] = str(bboxes[i][-1])
                    data['cat_id'] = self.label2cat[label]
                    json_results["proposals"].append(data)
        return json_results

    def write_results_json(self, results, filename=None):
        if filename is None:
            filename = "deepscores_results.json"
        json_results = self.prepare_json_dict(results)

        with open(filename, "w") as fo:
            json.dump(json_results, fo)

        return filename

    def evaluate(self,
                 results,
                 metric='bbox',
                 logger=None,
                 jsonfile_prefix=None,
                 classwise=True,
                 proposal_nums=(100, 300, 1000),
                 iou_thrs=np.arange(0.5, 0.96, 0.05),
                 average_thrs=False,
                 work_dir=None):
        """Evaluation in COCO protocol.

        Args:
            results (list): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            jsonfile_prefix (str | None): The prefix of json files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
            classwise (bool): Whether to evaluating the AP for each class.
            proposal_nums (Sequence[int]): Proposal number used for evaluating
                recalls, such as recall@100, recall@1000.
                Default: (100, 300, 1000).
            iou_thrs (Sequence[float]): IoU threshold used for evaluating
                recalls. If set to a list, the average recall of all IoUs will
                also be computed. Default: 0.5.

        Returns:
            dict[str: float]
        """

        metrics = metric if isinstance(metric, list) else [metric]
        allowed_metrics = ['bbox']
        for metric in metrics:
            if metric not in allowed_metrics:
                raise KeyError(f'metric {metric} is not supported')

        filename = self.write_results_json(results)

        self.obb.load_proposals(filename)
        metric_results = self.obb.calculate_metrics(iou_thrs=iou_thrs,
                                                    classwise=classwise,
                                                    average_thrs=average_thrs)

        categories = self.obb.get_cats()
        metric_results = {
            categories[key]['name']: value
            for (key, value) in metric_results.items()
        }

        # add occurences
        occurences_by_class = self.obb.get_class_occurences()
        for (key, value) in metric_results.items():
            value.update(no_occurences=occurences_by_class[key])

        if work_dir is not None:
            import pickle
            import os
            out_file = os.path.join(work_dir, "dsv2_metrics.pkl")
            pickle.dump(metric_results, open(out_file, 'wb'))
        print(metric_results)
        return metric_results
Ejemplo n.º 13
0
class DeepScoresV2Dataset(CocoDataset):
    def load_annotations(self, ann_file):
        self.obb = OBBAnns(ann_file)
        self.obb.load_annotations()
        self.obb.set_annotation_set_filter(['deepscores'])
        self.obb.set_class_blacklist(["staff"])
        self.cat_ids = list(self.obb.get_cats().keys())
        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
        self.label2cat = {v: k for k, v in self.cat2label.items()}
        self.CLASSES = tuple(
            [v["name"] for (k, v) in self.obb.get_cats().items()])
        self.img_ids = [id['id'] for id in self.obb.img_info]

        return self.obb.img_info

    def get_ann_info(self, idx):
        return self._parse_ann_info(*self.obb.get_img_ann_pair(idxs=[idx]))

    def _filter_imgs(self, min_size=32):
        valid_inds = []
        for i, img_info in enumerate(self.obb.img_info):
            if self.filter_empty_gt and len(img_info['ann_ids']) == 0:
                continue
            if min(img_info['width'], img_info['height']) >= min_size:
                valid_inds.append(i)
        return valid_inds

    def _parse_ann_info(self, img_info, ann_info):
        img_info, ann_info = img_info[0], ann_info[0]
        gt_bboxes = []
        gt_labels = []
        gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)

        for i, ann in ann_info.iterrows():
            # we have no ignore feature
            if ann['area'] <= 0:
                continue

            bbox = ann['a_bbox']
            gt_bboxes.append(bbox)
            gt_labels.append(self.cat2label[ann['cat_id'][0]])

        gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
        gt_labels = np.array(gt_labels, dtype=np.int64)

        ann = dict(bboxes=gt_bboxes,
                   labels=gt_labels,
                   bboxes_ignore=gt_bboxes_ignore,
                   masks=None,
                   seg_map=None)
        return ann

    def prepare_json_dict(self, results):
        json_results = {"annotation_set": "deepscores", "proposals": []}
        for idx in range(len(self)):
            img_id = self.img_ids[idx]
            result = results[idx]
            for label in range(len(result)):
                bboxes = result[label]
                for i in range(bboxes.shape[0]):
                    data = dict()
                    data['img_id'] = img_id
                    data['bbox'] = [str(nr) for nr in bboxes[i][0:-1]]
                    data['score'] = str(bboxes[i][-1])
                    data['cat_id'] = self.label2cat[label]
                    json_results["proposals"].append(data)
        return json_results

    def write_results_json(self, results, filename=None):
        if filename is None:
            filename = "deepscores_results.json"
        json_results = self.prepare_json_dict(results)

        with open(filename, "w") as fo:
            json.dump(json_results, fo)

        return filename

    def evaluate(self,
                 results,
                 metric='bbox',
                 logger=None,
                 jsonfile_prefix=None,
                 classwise=True,
                 proposal_nums=(100, 300, 1000),
                 iou_thrs=np.arange(0.5, 0.96, 0.05),
                 average_thrs=False):
        """Evaluation in COCO protocol.

        Args:
            results (list): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            jsonfile_prefix (str | None): The prefix of json files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
            classwise (bool): Whether to evaluating the AP for each class.
            proposal_nums (Sequence[int]): Proposal number used for evaluating
                recalls, such as recall@100, recall@1000.
                Default: (100, 300, 1000).
            iou_thrs (Sequence[float]): IoU threshold used for evaluating
                recalls. If set to a list, the average recall of all IoUs will
                also be computed. Default: 0.5.

        Returns:
            dict[str: float]
        """

        metrics = metric if isinstance(metric, list) else [metric]
        allowed_metrics = ['bbox']
        for metric in metrics:
            if metric not in allowed_metrics:
                raise KeyError(f'metric {metric} is not supported')

        filename = self.write_results_json(results)

        self.obb.load_proposals(filename)
        metric_results = self.obb.calculate_metrics(iou_thrs=iou_thrs,
                                                    classwise=classwise,
                                                    average_thrs=average_thrs)

        metric_results = {
            self.CLASSES[self.cat2label[key]]: value
            for (key, value) in metric_results.items()
        }

        # add occurences
        occurences_by_class = self.obb.get_class_occurences()
        for (key, value) in metric_results.items():
            value.update(no_occurences=occurences_by_class[key])

        if True:
            import pickle
            pickle.dump(metric_results,
                        open('evaluation_renamed_rcnn.pickle', 'wb'))
        print(metric_results)
        return metric_results
Ejemplo n.º 14
0
def draw_bbox(self,
              draw,
              ann,
              color,
              oriented,
              annotation_set=None,
              print_label=False,
              print_staff_pos=False,
              print_onset=False,
              instances=False):
    """Draws the bounding box onto an image with a given color.

    :param ImageDraw.ImageDraw draw: ImageDraw object to draw with.
    :param dict ann: Annotation information dictionary of the current
        bounding box to draw.
    :param str color: Color to draw the bounding box in as a hex string,
        e.g. '#00ff00'
    :param bool oriented: Choose between drawing oriented or aligned
        bounding box.
    :param Optional[int] annotation_set: Index of the annotation set to be
        drawn. If None is given, the first one available will be drawn.
    :param Optional[bool] print_label: Determines if the class labels
    are printed on the visualization
    :param Optional[bool] print_staff_pos: Determines if the staff positions
    are printed on the visualization
    :param Optional[bool] print_onset:  Determines if the onsets are
    printed on the visualization

    :return: The drawn object.
    :rtype: ImageDraw.ImageDraw
    """
    annotation_set = 0 if annotation_set is None else annotation_set
    cat_id = ann['cat_id']
    if isinstance(cat_id, list):
        cat_id = int(cat_id[annotation_set])

    parsed_comments = OBBAnns.parse_comments(ann['comments'])

    if oriented:
        bbox = ann['o_bbox']
        draw.line(bbox + bbox[:2], fill=color, width=3)
    else:
        bbox = ann['a_bbox']
        draw.rectangle(bbox, outline=color, width=2)

    # Now draw the label below the bbox
    x0 = min(bbox[::2])
    y0 = max(bbox[1::2])
    pos = (x0, y0)

    def print_text_label(position, text, color_text, color_box):
        x1, y1 = ImageFont.load_default().getsize(text)
        x1 += position[0] + 4
        y1 += position[1] + 4
        draw.rectangle((position[0], position[1], x1, y1), fill=color_box)
        draw.text((position[0] + 2, position[1] + 2), text, color_text)
        return x1, position[1]

    if instances:
        label = str(int(parsed_comments['instance'].lstrip('#'), 16))
        print_text_label(pos, label, '#ffffff', '#303030')

    else:
        label = self.cat_info[cat_id]['name']

        if print_label:
            pos = print_text_label(pos, label, '#ffffff', '#303030')
        if print_onset and 'onset' in parsed_comments.keys():
            pos = print_text_label(pos, parsed_comments['onset'], '#ffffff',
                                   '#091e94')
        if print_staff_pos and 'rel_position' in parsed_comments.keys():
            print_text_label(pos, parsed_comments['rel_position'], '#ffffff',
                             '#0a7313')

    return draw
Ejemplo n.º 15
0
    else:
        label = self.cat_info[cat_id]['name']

        if print_label:
            pos = print_text_label(pos, label, '#ffffff', '#303030')
        if print_onset and 'onset' in parsed_comments.keys():
            pos = print_text_label(pos, parsed_comments['onset'], '#ffffff',
                                   '#091e94')
        if print_staff_pos and 'rel_position' in parsed_comments.keys():
            print_text_label(pos, parsed_comments['rel_position'], '#ffffff',
                             '#0a7313')

    return draw


ann = OBBAnns('../scanned_ds/ili_scores.json')
ann.load_annotations()
data_root = '../scanned_ds'
out_dir = 'out_ili'
annotation_set = 'deepscores'
annotation_set = ann.annotation_sets.index(annotation_set)
ann.chosen_ann_set = ann.chosen_ann_set[annotation_set]
for img_info in ann.img_info:
    img_id = img_info['id']
    ann.visualize(img_id=img_id, data_root=data_root, out_dir=out_dir)
    # img_info, ann_info = ann.get_img_ann_pair(ids=[img_id])
    # img_info, ann_info = img_info[0], ann_info[0]
    #
    # # Get the data_root from the ann_file path if it doesn't exist
    # if data_root is None:
    #     data_root = osp.split(ann.ann_file)[0]