def normalize_annotations(self, muscima_pp_directory: str,
                              output_directory: str) -> None:

        destination_annotation_file = os.path.join(output_directory,
                                                   "annotations.csv")

        muscima_pp_image_generator = MuscimaPlusPlusImageGenerator()
        xml_file_paths = muscima_pp_image_generator.get_all_xml_file_paths(
            muscima_pp_directory)
        all_crop_objects = muscima_pp_image_generator.load_crop_objects_from_xml_files(
            xml_file_paths)

        data = []
        for crop_object in tqdm(all_crop_objects, "Converting annotations"):
            writer = re.search("W-..", crop_object.doc).group().lower()
            page = int(re.search("N-..", crop_object.doc).group()[2:])
            filename = "images/{0}_p{1:03d}.png".format(writer, page)
            class_name = crop_object.clsname
            top = crop_object.top
            left = crop_object.left
            bottom = crop_object.bottom
            right = crop_object.right
            data.append((filename, top, left, bottom, right, class_name))

        all_annotations = pd.DataFrame(data=data,
                                       columns=[
                                           "path_to_image", "top", "left",
                                           "bottom", "right", "class_name"
                                       ])
        all_annotations.to_csv(destination_annotation_file,
                               index=False,
                               float_format="%.0f")
Пример #2
0
def create_statistics_for_full_images(glob_pattern_for_retrieving_muscima_images: str,
                                      muscima_pp_raw_data_directory: str,
                                      exported_absolute_dimensions_file_path: str,
                                      exported_relative_dimensions_file_path: str):
    image_paths = glob(glob_pattern_for_retrieving_muscima_images)

    if os.path.exists(exported_absolute_dimensions_file_path):
        os.remove(exported_absolute_dimensions_file_path)

    if os.path.exists(exported_relative_dimensions_file_path):
        os.remove(exported_relative_dimensions_file_path)

    image_generator = MuscimaPlusPlusImageGenerator()
    all_xml_files = image_generator.get_all_xml_file_paths(muscima_pp_raw_data_directory)

    absolute_dimensions = []
    relative_dimensions = []
    for xml_file in tqdm(all_xml_files, desc='Parsing annotation files'):
        crop_objects = image_generator.load_crop_objects_from_xml_file(xml_file)

        doc = crop_objects[0].doc
        result = re.match(r"CVC-MUSCIMA_W-(?P<writer>\d+)_N-(?P<page>\d+)_D-ideal", doc)
        writer = result.group("writer")
        page = "0" + result.group("page")

        for image_path in image_paths:
            image_path_result = re.match(r".*w-(?P<writer>\d+).*p(?P<page>\d+).png", image_path)
            image_path_writer = image_path_result.group("writer")
            image_path_page = image_path_result.group("page")
            if image_path_writer == writer and image_path_page == page:
                path = image_path
                break
        
        image = Image.open(path, "r")  # type: Image.Image
        image_width = image.width
        image_height = image.height

        for crop_object in crop_objects:
            class_name = crop_object.clsname
            top, left, bottom, right = crop_object.bounding_box
            width = right - left
            height = bottom - top
            x_center = width / 2.0 + left
            y_center = height / 2.0 + top

            absolute_dimensions.append([class_name, left, right, top, bottom, x_center, y_center, width, height])
            relative_dimensions.append([class_name, left / image_width, right / image_width,
                                        top / image_height, bottom / image_height,
                                        x_center / image_width, y_center / image_height,
                                        width / image_width, height / image_height])

    absolute_statistics = pandas.DataFrame(absolute_dimensions, columns=["class","xmin","xmax","ymin","ymax","x_c","y_c","width","height"])
    absolute_statistics.to_csv(exported_absolute_dimensions_file_path, float_format="%.5f", index=False)
    relative_statistics = pandas.DataFrame(relative_dimensions, columns=["class","xmin","xmax","ymin","ymax","x_c","y_c","width","height"])
    relative_statistics.to_csv(exported_relative_dimensions_file_path, float_format="%.5f", index=False)
Пример #3
0
def load_all_muscima_annotations(muscima_pp_dataset_directory) -> Dict[str, List[CropObject]]:
    """
    :param muscima_pp_dataset_directory:
    :return: Returns a dictionary of annotations with the filename as key
    """
    image_generator = MuscimaPlusPlusImageGenerator()
    raw_data_directory = os.path.join(muscima_pp_dataset_directory, "v1.0", "data", "cropobjects_withstaff")
    all_xml_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.xml'))]

    crop_object_annotations = {}
    for xml_file in tqdm(all_xml_files, desc='Parsing annotation files'):
        crop_objects = image_generator.load_crop_objects_from_xml_file(xml_file)
        doc = crop_objects[0].doc
        crop_object_annotations[doc] = crop_objects
    return crop_object_annotations
def prepare_annotations(muscima_image_directory: str, output_path: str,
                        muscima_pp_raw_dataset_directory: str,
                        exported_annotations_file_path: str,
                        annotations_path: str):
    image_paths = glob(muscima_image_directory)
    os.makedirs(output_path, exist_ok=True)

    image_generator = MuscimaPlusPlusImageGenerator()
    raw_data_directory = os.path.join(muscima_pp_raw_dataset_directory, "v1.0",
                                      "data", "cropobjects_manual")
    all_xml_files = [
        y for x in os.walk(raw_data_directory)
        for y in glob(os.path.join(x[0], '*.xml'))
    ]

    if os.path.exists(exported_annotations_file_path):
        os.remove(exported_annotations_file_path)

    shutil.rmtree(annotations_path, ignore_errors=True)

    for xml_file in tqdm(all_xml_files, desc='Parsing annotation files'):
        crop_objects = image_generator.load_crop_objects_from_xml_file(
            xml_file)
        doc = crop_objects[0].doc
        result = re.match(
            r"CVC-MUSCIMA_W-(?P<writer>\d+)_N-(?P<page>\d+)_D-ideal", doc)
        writer = result.group("writer")
        page = result.group("page")

        image_path = None
        for path in image_paths:
            result = re.match(r".*(?P<writer>w-\d+).*(?P<page>p\d+).png", path)
            if ("w-" + writer) == result.group("writer") and (
                    'p' + page.zfill(3)) == result.group("page"):
                image_path = path
                break

        image = Image.open(image_path, "r")  # type: Image.Image
        image_width = image.width
        image_height = image.height
        output_file_path = os.path.join(
            output_path, "w-{0}_p{1}.jpg".format(writer, page.zfill(3)))
        image.save(output_file_path, "JPEG", quality=95)
        create_annotations_in_pascal_voc_format(
            annotations_path, os.path.basename(output_file_path), crop_objects,
            image_width, image_height, 3)
Пример #5
0
    def test_download_extract_and_render_all_symbols(self):
        # Arrange
        datasetDownloader = MuscimaPlusPlusDatasetDownloader()

        # Act
        datasetDownloader.download_and_extract_dataset("temp/muscima_pp_raw")
        image_generator = MuscimaPlusPlusImageGenerator()
        image_generator.extract_and_render_all_symbol_masks(
            "temp/muscima_pp_raw", "temp/muscima_img")
        all_image_files = [
            y for x in os.walk("temp/muscima_img")
            for y in glob(os.path.join(x[0], '*.png'))
        ]
        expected_number_of_symbols = 91254
        actual_number_of_symbols = len(all_image_files)

        # Assert
        self.assertEqual(expected_number_of_symbols, actual_number_of_symbols)

        # Cleanup
        os.remove(datasetDownloader.get_dataset_filename())
        shutil.rmtree("temp")
Пример #6
0
def prepare_annotations(muscima_pp_dataset_directory: str,
                        exported_annotations_file_path: str,
                        annotations_path: str):
    muscima_image_directory = os.path.join(muscima_pp_dataset_directory,
                                           "v1.0", "data", "images", "*.png")
    image_paths = glob(muscima_image_directory)

    image_generator = MuscimaPlusPlusImageGenerator()
    xml_annotations_directory = os.path.join(muscima_pp_dataset_directory,
                                             "v1.0", "data",
                                             "cropobjects_manual")
    all_xml_files = [
        y for x in os.walk(xml_annotations_directory)
        for y in glob(os.path.join(x[0], '*.xml'))
    ]

    if os.path.exists(exported_annotations_file_path):
        os.remove(exported_annotations_file_path)

    shutil.rmtree(annotations_path, ignore_errors=True)

    for xml_file in tqdm(all_xml_files, desc='Parsing annotation files'):
        crop_objects = image_generator.load_crop_objects_from_xml_file(
            xml_file)
        doc = crop_objects[0].doc

        image_path = None
        for path in image_paths:
            if doc in path:
                image_path = path
                break

        image = Image.open(image_path, "r")  # type: Image.Image
        image_width = image.width
        image_height = image.height
        create_annotations_in_pascal_voc_format_from_crop_objects(
            annotations_path, os.path.basename(image_path), crop_objects,
            image_width, image_height, 3)
Пример #7
0
def get_data(muscima_image_directory,
             muscima_pp_raw_data_directory: str,
             visualize=False) -> Tuple[list[dict], dict, dict]:
    all_imgs = {}
    classes_count = {}
    class_mapping = {}

    image_generator = MuscimaPlusPlusImageGenerator()
    all_xml_files = image_generator.get_all_xml_file_paths(
        muscima_pp_raw_data_directory)

    for xml_file in tqdm(all_xml_files, desc='Parsing annotation files'):
        crop_objects = image_generator.load_crop_objects_from_xml_file(
            xml_file)
        doc = crop_objects[0].doc
        result = re.match(
            r"CVC-MUSCIMA_W-(?P<writer>\d+)_N-(?P<page>\d+)_D-ideal", doc)
        writer = result.group("writer")
        page = result.group("page")

        # image_path = os.path.join(image_directory, "w-{0}".format(writer), "p0{0}.png".format(page))
        image_path = os.path.join(muscima_image_directory,
                                  "CvcMuscima-Distortions", "ideal",
                                  "w-{0}".format(writer), "image",
                                  "p0{0}.png".format(page))

        img = cv2.imread(image_path)
        for crop_object in crop_objects:
            class_name = crop_object.clsname
            (top, left, bottom, right) = crop_object.bounding_box
            x1, y1, x2, y2 = left, top, right, bottom

            if class_name not in classes_count:
                classes_count[class_name] = 1
            else:
                classes_count[class_name] += 1

            if class_name not in class_mapping:
                class_mapping[class_name] = len(class_mapping)

            if image_path not in all_imgs:
                all_imgs[image_path] = {}

                (rows, cols) = img.shape[:2]
                all_imgs[image_path]['filepath'] = image_path
                all_imgs[image_path]['width'] = cols
                all_imgs[image_path]['height'] = rows
                all_imgs[image_path]['bboxes'] = []
                if np.random.randint(0, 6) > 0:
                    all_imgs[image_path]['imageset'] = 'trainval'
                else:
                    all_imgs[image_path]['imageset'] = 'test'

            all_imgs[image_path]['bboxes'].append({
                'class': class_name,
                'x1': x1,
                'x2': x2,
                'y1': y1,
                'y2': y2
            })
            if visualize:
                cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255))

        if visualize:
            cv2.imshow('img', img)
            cv2.waitKey(0)

    all_data = []
    for key in all_imgs:
        all_data.append(all_imgs[key])

    return all_data, classes_count, class_mapping
def cut_images(muscima_image_directory: str,
               staff_vertical_positions_file: str, output_path: str,
               muscima_pp_raw_data_directory: str,
               exported_annotations_file_path: str, annotations_path: str):
    image_paths = [
        y for x in os.walk(muscima_image_directory)
        for y in glob(os.path.join(x[0], '*.png'))
    ]
    os.makedirs(output_path, exist_ok=True)

    image_generator = MuscimaPlusPlusImageGenerator()
    all_xml_files = image_generator.get_all_xml_file_paths(
        muscima_pp_raw_data_directory)

    if os.path.exists(exported_annotations_file_path):
        os.remove(exported_annotations_file_path)

    shutil.rmtree(annotations_path, ignore_errors=True)

    crop_object_annotations: List[Tuple[str, str, List[CropObject]]] = []

    for xml_file in tqdm(all_xml_files, desc='Parsing annotation files'):
        crop_objects = image_generator.load_crop_objects_from_xml_file(
            xml_file)
        doc = crop_objects[0].doc
        result = re.match(
            r"CVC-MUSCIMA_W-(?P<writer>\d+)_N-(?P<page>\d+)_D-ideal", doc)
        writer = result.group("writer")
        page = result.group("page")
        crop_object_annotations.append(
            ('w-' + writer, 'p' + page.zfill(3), crop_objects))

    with open(staff_vertical_positions_file) as f:
        content = f.readlines()
    # you may also want to remove whitespace characters like `\n` at the end of each line
    annotations = [x.strip().split(';') for x in content]

    images_to_cut: List[Tuple[str, str, str, str]] = []

    for image_path in image_paths:
        result = re.match(r".*(?P<writer>w-\d+).*(?P<page>p\d+).png",
                          image_path)
        writer = result.group("writer")
        page = result.group("page")
        coordinates = None
        for annotation in annotations:
            if annotation[0] == writer and annotation[1] == page:
                coordinates = annotation[2]
                break

        if coordinates is not None:
            images_to_cut.append((image_path, writer, page, coordinates))

    for image_to_cut in tqdm(images_to_cut, desc="Cutting images"):
        path, writer, page, coordinates = image_to_cut
        staff_line_pairs = coordinates.split(',')
        image = Image.open(path, "r")  # type: Image.Image
        width = image.width
        objects_appearing_in_image: List[CropObject] = None
        for crop_object_annotation in crop_object_annotations:
            if writer == crop_object_annotation[
                    0] and page == crop_object_annotation[1]:
                objects_appearing_in_image = crop_object_annotation[2]
                break

        if objects_appearing_in_image is None:
            # Image has annotated staff-lines, but does not have corresponding crop-object annotations, so skip it
            continue

        i = 1
        for pair in staff_line_pairs:
            y_top, y_bottom = map(int, pair.split(':'))
            previous_width = 0
            overlap = 100
            for crop_width in range(500, 3501, 500):

                if crop_width > width:
                    crop_width = width
                image_crop_bounding_box = (previous_width, y_top, crop_width,
                                           y_bottom)
                image_crop_bounding_box_top_left_bottom_right = (
                    y_top, previous_width, y_bottom, crop_width)
                previous_width = crop_width - overlap

                file_name = "{0}_{1}_{2}.jpg".format(writer, page, i)
                i += 1

                objects_appearing_in_cropped_image = \
                    compute_objects_appearing_in_cropped_image(file_name,
                                                               image_crop_bounding_box_top_left_bottom_right,
                                                               objects_appearing_in_image)

                cropped_image = image.crop(image_crop_bounding_box).convert(
                    'RGB')

                create_annotations_in_plain_format(
                    exported_annotations_file_path,
                    objects_appearing_in_cropped_image)
                create_annotations_in_pascal_voc_format(
                    annotations_path, file_name,
                    objects_appearing_in_cropped_image, cropped_image.width,
                    cropped_image.height, 3)

                # draw_bounding_boxes(cropped_image, objects_appearing_in_cropped_image)
                output_file = os.path.join(output_path, file_name)
                cropped_image.save(output_file, "JPEG", quality=95)
Пример #9
0
#%% Open OMR
from omrdatasettools.downloaders.OpenOmrDatasetDownloader import OpenOmrDatasetDownloader

dataset_downloader = OpenOmrDatasetDownloader("./data/openomr")
dataset_downloader.download_and_extract_dataset()

#%% Capitan
'''
from omrdatasettools.downloaders.CapitanDatasetDownloader import CapitanDatasetDownloader
dataset_downloader = CapitanDatasetDownloader("./data/capitan")
dataset_downloader.download_and_extract_dataset()
'''

#%% MUSCIMA
from omrdatasettools.downloaders.MuscimaPlusPlusDatasetDownloader import MuscimaPlusPlusDatasetDownloader

dataset_downloader = MuscimaPlusPlusDatasetDownloader("./data/muscima")
dataset_downloader.download_and_extract_dataset()

from omrdatasettools.image_generators.MuscimaPlusPlusImageGenerator import MuscimaPlusPlusImageGenerator
imgen = MuscimaPlusPlusImageGenerator()
imgen.extract_and_render_all_symbol_masks(
    raw_data_directory='./data/muscima',
    destination_directory='./data/muscima/images')

#%% Robelo
from omrdatasettools.downloaders.RebeloMusicSymbolDataset1Downloader import RebeloMusicSymbolDataset1Downloader
dataset_downloader = RebeloMusicSymbolDataset1Downloader("./data/robelo")
dataset_downloader.download_and_extract_dataset()
def prepare_annotations(muscima_image_directory: str, output_path: str,
                        muscima_pp_raw_dataset_directory: str,
                        exported_annotations_file_path: str,
                        exported_class_mapping_path: str):
    image_paths = glob(muscima_image_directory)
    os.makedirs(output_path, exist_ok=True)

    image_generator = MuscimaPlusPlusImageGenerator()
    raw_data_directory = os.path.join(muscima_pp_raw_dataset_directory, "v1.0",
                                      "data", "cropobjects_manual")
    all_xml_files = [
        y for x in os.walk(raw_data_directory)
        for y in glob(os.path.join(x[0], '*.xml'))
    ]

    if os.path.exists(exported_annotations_file_path):
        os.remove(exported_annotations_file_path)

    if os.path.exists(exported_class_mapping_path):
        os.remove(exported_class_mapping_path)

    classes = []

    with open(exported_annotations_file_path, "w") as annotations_csv:
        for xml_file in tqdm(all_xml_files, desc='Parsing annotation files'):
            crop_objects = image_generator.load_crop_objects_from_xml_file(
                xml_file)
            doc = crop_objects[0].doc
            result = re.match(
                r"CVC-MUSCIMA_W-(?P<writer>\d+)_N-(?P<page>\d+)_D-ideal", doc)
            writer = result.group("writer")
            page = result.group("page")

            image_path = None
            for path in image_paths:
                result = re.match(r".*(?P<writer>w-\d+).*(?P<page>p\d+).png",
                                  path)
                if ("w-" + writer) == result.group("writer") and (
                        'p' + page.zfill(3)) == result.group("page"):
                    image_path = path
                    break

            image = Image.open(image_path, "r")  # type: Image.Image
            output_file_path = os.path.join(
                output_path, "w-{0}_p{1}.png".format(writer, page.zfill(3)))
            image.save(output_file_path)
            for crop_object in crop_objects:
                class_name = crop_object.clsname
                classes.append(class_name)
                ymin, xmin, ymax, xmax = crop_object.bounding_box

                annotations_csv.write(
                    "muscima_pp_images/{0},{1},{2},{3},{4},{5}\n".format(
                        os.path.basename(output_file_path), xmin, ymin, xmax,
                        ymax, class_name))

    unique_classes = list(set(classes))
    unique_classes.sort()

    with open(exported_class_mapping_path, "w") as mapping_csv:
        for index, class_name in enumerate(unique_classes):
            mapping_csv.write("{0},{1}\n".format(class_name, index))
def cut_images(muscima_image_directory: str, staff_vertical_positions_file: str, output_path: str,
               muscima_pp_raw_data_directory: str, ):
    image_paths = [y for x in os.walk(muscima_image_directory) for y in glob(os.path.join(x[0], '*.png'))]
    os.makedirs(output_path, exist_ok=True)

    with open(os.path.join(output_path, "Annotations.txt"), "w") as annotations_file:

        image_generator = MuscimaPlusPlusImageGenerator()
        all_xml_files = image_generator.get_all_xml_file_paths(muscima_pp_raw_data_directory)

        crop_object_annotations: List[Tuple[str, str, List[CropObject]]] = []

        for xml_file in tqdm(all_xml_files, desc='Parsing annotation files'):
            crop_objects = image_generator.load_crop_objects_from_xml_file(xml_file)
            doc = crop_objects[0].doc
            result = re.match(r"CVC-MUSCIMA_W-(?P<writer>\d+)_N-(?P<page>\d+)_D-ideal", doc)
            writer = result.group("writer")
            page = result.group("page")
            crop_object_annotations.append(('w-' + writer, 'p' + page.zfill(3), crop_objects))

        with open(staff_vertical_positions_file) as f:
            content = f.readlines()
        # you may also want to remove whitespace characters like `\n` at the end of each line
        annotations = [x.strip().split(';') for x in content]

        images_to_cut: List[Tuple[str, str, str, str]] = []

        for image_path in image_paths:
            result = re.match(r".*(?P<writer>w-\d+).*(?P<page>p\d+).png", image_path)
            writer = result.group("writer")
            page = result.group("page")
            coordinates = None
            for annotation in annotations:
                if annotation[0] == writer and annotation[1] == page:
                    coordinates = annotation[2]
                    break

            if coordinates is not None:
                images_to_cut.append((image_path, writer, page, coordinates))

        for image_to_cut in tqdm(images_to_cut, desc="Cutting images"):
            path, writer, page, coordinates = image_to_cut
            staff_line_pairs = coordinates.split(',')
            image = Image.open(path, "r")
            width = image.width
            crop_objects_of_image: List[CropObject] = None
            for crop_object_annotation in crop_object_annotations:
                if writer == crop_object_annotation[0] and page == crop_object_annotation[1]:
                    crop_objects_of_image = crop_object_annotation[2]
                    break

            if crop_objects_of_image is None:
                # Image has annotated staff-lines, but does not have corresponding crop-object annotations, so skip it
                continue

            i = 1
            for pair in staff_line_pairs:
                y_top, y_bottom = map(int, pair.split(':'))
                previous_width = 0
                overlap = 100
                for crop_width in range(500, 3501, 500):
                    objects_of_cropped_image: List[Tuple[str, Tuple[int, int, int, int]]] = []
                    if crop_width > width:
                        crop_width = width
                    image_crop_bounding_box = (previous_width, y_top, crop_width, y_bottom)

                    file_name = "{0}_{1}_{2}.png".format(writer, page, i)

                    for crop_object in crop_objects_of_image:
                        if bounding_box_in(image_crop_bounding_box, crop_object.bounding_box):
                            top, left, bottom, right = crop_object.bounding_box
                            translated_bounding_box = (
                                top - y_top, left - previous_width, bottom - y_top, right - previous_width)
                            trans_top, trans_left, trans_bottom, trans_right = translated_bounding_box
                            objects_of_cropped_image.append((crop_object.clsname, translated_bounding_box))
                            annotations_file.write("{0},{1},{2},{3},{4},{5}\n".format(file_name,
                                                                                      trans_left,
                                                                                      trans_top,
                                                                                      trans_right,
                                                                                      trans_bottom,
                                                                                      crop_object.clsname))

                    cropped_image = image.crop(image_crop_bounding_box).convert('RGB')

                    # draw_bounding_boxes(cropped_image, objects_of_cropped_image)

                    output_file = os.path.join(output_path, file_name)
                    cropped_image.save(output_file)
                    previous_width = crop_width - overlap
                    i += 1
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--dataset_dir",
        default="data/muscima_pp/v1.0/data/cropobjects_withstaff/")
    parser.add_argument("--mapping_output_path",
                        default="mapping_all_classes.txt")
    parser.add_argument("--remove_line_shaped_or_construct",
                        type=bool,
                        default=False)
    args = parser.parse_args()

    names = glob.glob(os.path.join(args.dataset_dir, "*.xml"))
    data = {}
    image_generator = MuscimaPlusPlusImageGenerator()

    for name in tqdm(names,
                     desc="Reading all objects from MUSCIMA++ annotations"):
        data[name] = image_generator.load_crop_objects_from_xml_file(name)
    datas = []

    for value in data.values():
        for val in value:
            datas.append(val)

    c = Counter([x.clsname for x in datas])

    ignored_classes = [
    ]  # ["double_sharp", "numeral_2", "numeral_5", "numeral_6", "numeral_7", "numeral_8"]
    line_shaped_or_construct = [