def mask_to_class_histogram(results: dict,
                            classes: dict,
                            config: Config = None,
                            count_zeros=True):
    """
    Gather all histograms into a general one that looses the information of 'which base mask contains which masks'
    :param results: the results containing per mask histograms
    :param classes: dict that link previous classes to current classes that we want to count
    :param config: the config
    :param count_zeros: if True, base masks without included masks will be counted
    :return: the global histogram
    """
    if config is None:
        return
    selectedClasses = {}
    if "all" in classes.values() or any(
        ["all" in classes[c] for c in classes if type(classes[c]) is list]):
        selectedClasses.update(
            {c['display_name']: c['id']
             for c in config.get_classes_info()})
    else:
        tempClasses = []
        for aClass in classes.values():
            if type(aClass) is list:
                tempClasses.extend(aClass)
            else:
                tempClasses.append(aClass)
        selectedClasses.update({
            c['display_name']: c['id']
            for c in config.get_classes_info() if c['name'] in tempClasses
        })

    histogram = {c: {} for c in selectedClasses.keys()}
    for mask_histogram in results['histos']:
        if mask_histogram is None:
            continue
        else:
            for eltClass in histogram:
                if eltClass in selectedClasses:
                    class_id = selectedClasses[eltClass]
                    if class_id not in mask_histogram and count_zeros:
                        if 0 not in histogram[eltClass]:
                            histogram[eltClass][0] = 0
                        histogram[eltClass][0] += 1
                    elif class_id in mask_histogram:
                        nb = mask_histogram[class_id]
                        if nb not in histogram[eltClass]:
                            histogram[eltClass][nb] = 0
                        histogram[eltClass][nb] += 1
    return histogram
def create_multiclass_mask(image_shape, results: dict, config: Config = None):
    """
    Creates an image containing all the masks where pixel color is the mask's class ID
    :param image_shape: the shape of the initial image
    :param results: the results dictionary containing all the masks
    :param config: the config object used to expand mini_masks if enabled
    :return: the multi-mask image
    """
    res = np.zeros((image_shape[0], image_shape[1]), np.uint8)

    masks = results['masks']
    class_ids = results['class_ids']
    rois = results['rois']
    indices = np.arange(len(class_ids))

    classes_hierarchy = config.get_classes_hierarchy()
    if classes_hierarchy is None:
        levels = [[i + 1 for i in range(len(config.get_classes_info()))]]
    else:
        levels = utils.remove_redundant_classes(
            utils.classes_level(classes_hierarchy), keepFirst=False)

    for lvl in levels:
        current_indices = indices[np.isin(class_ids, lvl)]
        for idx in current_indices:
            mask = masks[:, :, idx].astype(bool).astype(np.uint8) * 255
            roi = rois[idx]
            classID = int(class_ids[idx])
            if config is not None and config.is_using_mini_mask():
                shifted_bbox = utils.shift_bbox(roi)
                mask = utils.expand_mask(shifted_bbox, mask, shifted_bbox[2:])
            res = apply_mask(res, mask, classID, 1, roi)
    return res
 def fromLabelMe(cls,
                 filepath: str,
                 config: Config,
                 group_id_2_class: dict = None):
     """
     Constructs an ASAPAdapter from an exported LabelMe annotations file
     :param filepath: path to the LabelMe annotations file path
     :param config: the config
     :param group_id_2_class: dict that links group ids to classes names (if None, label is used)
     :return:
     """
     res = cls({})
     for c in config.get_classes_info():
         res.addAnnotationClass(c)
     with open(filepath, 'r') as labelMeFile:
         data = json.load(labelMeFile)
     for mask in data['shapes']:
         points = mask['points']
         if group_id_2_class is not None and mask[
                 'group_id'] in group_id_2_class:
             name = group_id_2_class[mask['group_id']]
         else:
             name = mask['label'].split(' ')[0]
         res.addAnnotation({'name': name}, points)
     return res
 def __init__(self, dataset_id, image_info, config: Config, previous_mode=False, enable_occlusion=False):
     super().__init__()
     self.__ID = dataset_id
     self.__CONFIG = config
     self.__CUSTOM_CLASS_NAMES = [c['name'] for c in config.get_classes_info("previous" if previous_mode else None)]
     self.__CLASS_ASSOCIATION = {format_text(c): c for c in self.__CUSTOM_CLASS_NAMES}
     self.__IMAGE_INFO = image_info
     self.__ENABLE_OCCLUSION = enable_occlusion
def export_annotations(image_info: dict,
                       results: dict,
                       adapterClass: AnnotationAdapter.__class__,
                       save_path="predicted",
                       config: Config = None,
                       verbose=0):
    """
    Exports predicted results to an XML annotation file using given XMLExporter
    :param image_info: Dict with at least {"NAME": str, "HEIGHT": int, "WIDTH": int} about the inferred image
    :param results: inference results of the image
    :param adapterClass: class inheriting XMLExporter
    :param save_path: path to the dir you want to save the annotation file
    :param config: the config to get mini_mask informations
    :param verbose: verbose level of the method (0 = nothing, 1 = information)
    :return: None
    """
    if config is None:
        print("Cannot export annotations as config is not given.")
        return

    rois = results['rois']
    masks = results['masks']
    class_ids = results['class_ids']
    height = masks.shape[0]
    width = masks.shape[1]
    adapter_instance = adapterClass(
        {
            "name": image_info['NAME'],
            "height": image_info['HEIGHT'],
            'width': image_info['WIDTH'],
            'format': image_info['IMAGE_FORMAT']
        },
        verbose=verbose)
    if verbose > 0:
        print(
            f"Exporting to {adapter_instance.getName()} annotation file format."
        )
    # For each prediction
    for i in range(masks.shape[2]):
        if config is not None and config.is_using_mini_mask():
            shifted_roi = shift_bbox(rois[i])
            shifted_roi += [5, 5, 5, 5]
            image_size = shifted_roi[2:] + [5, 5]
            mask = expand_mask(shifted_roi, masks[:, :, i], image_size)
            yStart, xStart = rois[i][:2] - [5, 5]
        else:
            # Getting the RoI coordinates and the corresponding area
            # y1, x1, y2, x2
            yStart, xStart, yEnd, xEnd = rois[i]
            yStart = max(yStart - 10, 0)
            xStart = max(xStart - 10, 0)
            yEnd = min(yEnd + 10, height)
            xEnd = min(xEnd + 10, width)
            mask = masks[yStart:yEnd, xStart:xEnd, i]

        # Getting list of points coordinates and adding the prediction to XML
        points = getPoints(np.uint8(mask),
                           xOffset=xStart,
                           yOffset=yStart,
                           show=False,
                           waitSeconds=0,
                           info=False)
        if points is None:
            continue
        adapter_instance.addAnnotation(
            config.get_classes_info()[class_ids[i] - 1], points)

    for classInfo in config.get_classes_info():
        adapter_instance.addAnnotationClass(classInfo)

    os.makedirs(save_path, exist_ok=True)
    if verbose > 0:
        print('  - ', end='')
    adapter_instance.saveToFile(save_path, image_info['NAME'])
예제 #6
0
def createMasksOfImage(rawDatasetPath: str,
                       imgName: str,
                       datasetName: str = 'dataset_train',
                       adapter: AnnotationAdapter = None,
                       classesInfo: dict = None,
                       imageFormat="jpg",
                       resize=None,
                       config: Config = None):
    """
    Create all the masks of a given image by parsing xml annotations file
    :param rawDatasetPath: path to the folder containing images and associated annotations
    :param imgName: name w/o extension of an image
    :param datasetName: name of the output dataset
    :param adapter: the annotation adapter to use to create masks, if None looking for an adapter that can read the file
    :param classesInfo: Information about all classes that are used, by default will be nephrology classes Info
    :param imageFormat: output format of the image and masks
    :param resize: if the image and masks have to be resized
    :param config: config object
    :return: None
    """
    # Getting shape of original image (same for all this masks)
    if classesInfo is None:
        classesInfo = NEPHRO_CLASSES if config is None else config.get_classes_info(
        )

    img = cv2.imread(os.path.join(rawDatasetPath, f"{imgName}.{imageFormat}"))
    if img is None:
        print(f'Problem with {imgName} image')
        return
    shape = img.shape
    if resize is not None:
        yRatio = resize[0] / shape[0]
        xRatio = resize[1] / shape[1]
        assert yRatio > 0 and xRatio > 0, f"Error resize ratio not correct ({yRatio:3.2f}, {xRatio:3.2f})"
        img = cv2.resize(img, resize, interpolation=cv2.INTER_CUBIC)
        shape = img.shape

    # Copying the original image in the dataset
    targetDirectoryPath = os.path.join(datasetName, imgName, 'images')
    if not os.path.exists(targetDirectoryPath):
        os.makedirs(targetDirectoryPath)
        # TODO use file copy if unchanged else cv2
        cv2.imwrite(
            os.path.join(targetDirectoryPath, f"{imgName}.{imageFormat}"), img,
            CV2_IMWRITE_PARAM)

    # Finding annotation files
    formats = adapt.ANNOTATION_FORMAT
    fileList = os.listdir(rawDatasetPath)
    imageFiles = []
    for file in fileList:
        if imgName in file:
            if file.split('.')[-1] in formats:
                imageFiles.append(file)

    # Choosing the adapter to use (parameters to force it ?)
    file = None
    assert len(imageFiles) > 0
    if adapter is None:
        # No adapter given, we are looking for the adapter with highest priority level that can read an/the annotation
        # file
        adapters = list(adapt.ANNOTATION_ADAPTERS.values())
        adapterPriority = -1
        for f in imageFiles:
            for a in adapters:
                if a.canRead(os.path.join(rawDatasetPath, f)):
                    if a.getPriorityLevel() > adapterPriority:
                        adapterPriority = a.getPriorityLevel()
                        adapter = a
                        file = f
    else:
        # Using given adapter, we are looking for a file that can be read
        file = None
        for f in imageFiles:
            if adapter.canRead(os.path.join(rawDatasetPath,
                                            f)) and file is None:
                file = f

    # Getting the masks data
    masks = adapter.readFile(os.path.join(rawDatasetPath, file))

    # Creating masks
    for noMask, (datasetClass, maskPoints) in enumerate(masks):
        # Converting class id to class name if needed
        if type(datasetClass) is int:
            if datasetClass < len(classesInfo) and classesInfo[datasetClass][
                    "id"] == datasetClass:
                maskClass = classesInfo[datasetClass]["name"]
            else:
                for classInfo in classesInfo:
                    if classInfo["id"] == datasetClass:
                        maskClass = classInfo["name"]
                        break
        else:
            maskClass = datasetClass
            if maskClass == "None":
                print(f" /!\\ {imgName} : None class present /!\\ ")
        if resize is not None:
            resizedMasks = resizeMasks(maskPoints, xRatio, yRatio)
        createMask(imgName,
                   shape,
                   noMask,
                   maskPoints if resize is None else resizedMasks,
                   datasetName,
                   maskClass,
                   imageFormat,
                   config=config)
def get_count_and_area(results: dict,
                       image_info: dict,
                       selected_classes: [str],
                       save=None,
                       display=True,
                       config: Config = None,
                       verbose=0):
    """
    Computing count and area of classes from results
    :param results: the results
    :param image_info: Dict containing informations about the image
    :param selected_classes: list of classes' names that you want to get statistics on
    :param save: if given, path to the json file that will contains statistics
    :param display: if True, will print the statistics
    :param config: the config to get mini_mask informations
    :param verbose: 0 : nothing, 1+ : errors/problems, 2 : general information, ...
    :return: Dict of "className": {"count": int, "area": int} elements for each classes
    """
    if config is None or (save is None and not display):
        return

    print(" - Computing statistics on predictions")

    rois = results['rois']
    masks = results['masks']
    class_ids = results['class_ids']
    indices = np.arange(len(class_ids))
    mini_mask_used = config.is_using_mini_mask()

    resize = config.get_param().get('resize', None)
    ratio = 1
    if resize is not None:
        ratio = image_info['HEIGHT'] / resize[0]
        ratio *= (image_info['WIDTH'] / resize[1])

    if type(selected_classes) is str:
        selected_classes_ = [selected_classes]
    else:
        selected_classes_ = selected_classes

    # Getting the inferenceIDs of the wanted classes
    if "all" in selected_classes_:
        selectedClassesID = {
            aClass['id']: aClass['name']
            for aClass in config.get_classes_info()
        }
    else:
        selectedClassesID = {
            config.get_class_id(name): name
            for name in selected_classes_
        }
        indices = indices[np.isin(class_ids, list(selectedClassesID.keys()))]
    res = {
        c_name: {
            "display_name": config.get_class_name(c_id, display=True),
            "count": 0,
            "area": 0
        }
        for c_id, c_name in selectedClassesID.items()
    }

    # For each predictions, if class ID matching with one we want
    for index in indices:
        # Getting current values of count and area
        className = selectedClassesID[class_ids[index]]
        res[className]["count"] += 1
        # Getting the area of current mask
        if mini_mask_used:
            shifted_roi = utils.shift_bbox(rois[index])
            mask = utils.expand_mask(shifted_roi, masks[:, :, index],
                                     shifted_roi[2:])
        else:
            yStart, xStart, yEnd, xEnd = rois[index]
            mask = masks[yStart:yEnd, xStart:xEnd, index]
        mask = mask.astype(np.uint8)
        if "mask_areas" in results and results['mask_areas'][index] != -1:
            area = int(results['mask_areas'][index])
        else:
            area, _ = utils.get_mask_area(mask)
        if resize is None:
            res[className][
                "area"] += area  # Cast to int to avoid "json 'int64' not serializable"
        else:
            res[className]["area"] += int(round(area * ratio))

    if 'BASE_CLASS' in image_info:
        mode = config.get_class_mode(image_info['BASE_CLASS'],
                                     only_in_previous="current")[0]
        res[image_info['BASE_CLASS']] = {
            "display_name":
            config.get_class_name(config.get_class_id(image_info['BASE_CLASS'],
                                                      mode),
                                  mode,
                                  display=True),
            "count":
            image_info['BASE_COUNT'],
            "area":
            image_info["BASE_AREA"]
        }
    if save is not None:
        with open(os.path.join(save, f"{image_info['NAME']}_stats.json"),
                  "w") as saveFile:
            try:
                json.dump(res, saveFile, indent='\t')
            except TypeError:
                if verbose > 0:
                    print("    Failed to save statistics", flush=True)
    if display:
        for className in res:
            mode = config.get_class_mode(className,
                                         only_in_previous="current")[0]
            displayName = config.get_class_name(config.get_class_id(
                className, mode),
                                                mode,
                                                display=True)
            stat = res[className]
            print(
                f"    - {displayName} : count = {stat['count']}, area = {stat['area']} px"
            )

    return res