Пример #1
0
def adjust_annotations(ann_paths, meta, progress):

    for ann_path in ann_paths:
        temp_json_data = None
        with open(ann_path, 'r') as annotation_file:
            temp_annotation = Annotation.from_json(json.load(annotation_file),
                                                   meta)
            # Adjust Image dimension infos for annotation file
            new_img_size = tuple(
                map(lambda dim: dim + fsoco.FSOCO_IMPORT_BORDER_THICKNESS * 2,
                    temp_annotation.img_size))
            temp_annotation._img_size = new_img_size
            # Transform labels according to borders added by watermarking
            #translate_label = (lambda label: [label.translate(drow=fsoco.FSOCO_IMPORT_BORDER_THICKNESS, dcol=fsoco.FSOCO_IMPORT_BORDER_THICKNESS)])
            #temp_annotation.transform_labels(translate_label)
            temp_labels = []
            for label in temp_annotation._labels:
                # Do stuff to labels
                # Add border thickness once to each dimension of the bbox points.
                temp_label = label.translate(
                    fsoco.FSOCO_IMPORT_BORDER_THICKNESS,
                    fsoco.FSOCO_IMPORT_BORDER_THICKNESS)
                temp_labels.append(temp_label)
            temp_annotation._labels = temp_labels
            # Save transformed annotation
            temp_json_data = temp_annotation.to_json()
        with open(ann_path, 'w') as annotation_file:
            annotation_file.write(json.dumps(temp_json_data))
        progress.iter_done_report()
Пример #2
0
def load_ann(ann_fpath, classes_mapping, project_meta):
    ann_packed = load_json_file(ann_fpath)
    ann = Annotation.from_json(ann_packed, project_meta)
    # ann.normalize_figures()  # @TODO: enaaaable!
    (h, w) = ann.img_size

    gt_boxes, classes_text, classes = [], [], []
    for label in ann.labels:
        gt = np.zeros((h, w), dtype=np.uint8)  # default bkg
        gt_idx = classes_mapping.get(label.obj_class.name, None)
        if gt_idx is None:
            raise RuntimeError(
                'Missing class mapping (title to index). Class {}.'.format(
                    label.obj_class.name))
        label.geometry.draw(gt, 1)
        if np.sum(gt) > 0:
            xmin, ymin, xmax, ymax = get_bbox(gt)
            gt_boxes.append([ymin / h, xmin / w, ymax / h, xmax / w])
            classes_text.append(label.obj_class.name.encode('utf8'))
            # List of string class name of bounding box (1 per box)
            classes.append(
                gt_idx)  # List of integer class id of bounding box (1 per box)
    num_boxes = len(gt_boxes)
    gt_boxes = np.array(gt_boxes).astype(np.float32)
    classes = np.array(classes, dtype=np.int64)
    if num_boxes == 0:
        gt_boxes = np.reshape(gt_boxes, [0, 4])
    return gt_boxes, classes, np.array([num_boxes]).astype(np.int32)[0]
Пример #3
0
    def _do_single_img_inference(self, img, in_msg):
        in_project_meta = self._in_project_meta_from_msg(in_msg)
        ann_json = in_msg.get('annotation')
        if ann_json is not None:
            if in_project_meta is None:
                raise ValueError('In order to perform inference with annotation you must specify the appropriate'
                                 ' project meta.')
            ann = Annotation.from_json(ann_json, in_project_meta)
        else:
            in_project_meta = in_project_meta or ProjectMeta()
            ann = Annotation(img.shape[:2])

        inference_mode = self._make_inference_mode(in_msg.get(MODE, {}), in_project_meta)
        inference_result = inference_mode.infer_annotate(img, ann)
        return inference_result.to_json()