Exemple #1
0
    def _samples_from_book(self, root, img, page_id) -> Iterable[Dict[str, Any]]:
        ns = {"ns": root.nsmap[root.prefix]}
        page = root.find(".//ns:Page", namespaces=ns)
        imgfile = page.attrib.get("imageFilename")
        if not split_all_ext(img)[0].endswith(split_all_ext(imgfile)[0]):
            logger.warning(
                "Mapping of image file to xml file invalid: {} vs {} (comparing basename {} vs {})".format(
                    img, imgfile, split_all_ext(img)[0], split_all_ext(imgfile)[0]
                )
            )

        img_w = int(page.attrib.get("imageWidth"))
        for textline in root.findall(".//ns:TextLine", namespaces=ns):
            if self.skip_commented and len(textline.attrib.get("comments", "")):
                continue
            orientation = float(textline.getparent().attrib.get("orientation", default=0))

            yield {
                "page_id": page_id,
                "ns": ns,
                "rtype": textline.getparent().attrib.get("type", default=""),
                "xml_element": textline,
                "image_path": img,
                "id": "{}/{}".format(page_id, textline.attrib.get("id")),
                "base_name": textline.attrib.get("id"),
                "coords": textline.find("./ns:Coords", namespaces=ns).attrib.get("points"),
                "orientation": orientation,
                "img_width": img_w,
                "text": None,
            }
Exemple #2
0
def create_train_dataset(args, dataset_args=None):
    gt_extension = args.gt_extension if args.gt_extension is not None else DataSetType.gt_extension(args.dataset)

    # Training dataset
    print("Resolving input files")
    input_image_files = sorted(glob_all(args.files))
    if not args.text_files:
        if gt_extension:
            gt_txt_files = [split_all_ext(f)[0] + gt_extension for f in input_image_files]
        else:
            gt_txt_files = [None] * len(input_image_files)
    else:
        gt_txt_files = sorted(glob_all(args.text_files))
        input_image_files, gt_txt_files = keep_files_with_same_file_name(input_image_files, gt_txt_files)
        for img, gt in zip(input_image_files, gt_txt_files):
            if split_all_ext(os.path.basename(img))[0] != split_all_ext(os.path.basename(gt))[0]:
                raise Exception("Expected identical basenames of file: {} and {}".format(img, gt))

    if len(set(gt_txt_files)) != len(gt_txt_files):
        raise Exception("Some image are occurring more than once in the data set.")

    dataset = create_dataset(
        args.dataset,
        DataSetMode.TRAIN,
        images=input_image_files,
        texts=gt_txt_files,
        skip_invalid=not args.no_skip_invalid_gt,
        args=dataset_args if dataset_args else {},
    )
    print("Found {} files in the dataset".format(len(dataset)))
    return dataset
Exemple #3
0
    def prepare_for_mode(self, mode: PipelineMode):
        logger.info("Resolving input files")
        input_image_files = sorted(glob_all(self.images))

        if not self.texts:
            gt_txt_files = [split_all_ext(f)[0] + self.gt_extension for f in input_image_files]
        else:
            gt_txt_files = sorted(glob_all(self.texts))
            if mode in INPUT_PROCESSOR:
                input_image_files, gt_txt_files = keep_files_with_same_file_name(input_image_files, gt_txt_files)
                for img, gt in zip(input_image_files, gt_txt_files):
                    if split_all_ext(os.path.basename(img))[0] != split_all_ext(os.path.basename(gt))[0]:
                        raise Exception(f"Expected identical basenames of file: {img} and {gt}")
            else:
                input_image_files = None

        if mode in {PipelineMode.TRAINING, PipelineMode.EVALUATION}:
            if len(set(gt_txt_files)) != len(gt_txt_files):
                logger.warning(
                    "Some ground truth text files occur more than once in the data set "
                    "(ignore this warning, if this was intended)."
                )
            if len(set(input_image_files)) != len(input_image_files):
                logger.warning(
                    "Some images occur more than once in the data set. " "This warning should usually not be ignored."
                )

        self.images = input_image_files
        self.texts = gt_txt_files
Exemple #4
0
    def _samples_from_book(self, root, img):
        ns = {"ns": root.nsmap[None]}
        imgfile = root.xpath('//ns:Page',
                             namespaces=ns)[0].attrib["imageFilename"]
        if not split_all_ext(img)[0].endswith(split_all_ext(imgfile)[0]):
            raise Exception("Mapping of image file to xml file invalid: {} vs {} (comparing basename {} vs {})".format(
                img, imgfile, split_all_ext(img)[0], split_all_ext(imgfile)[0]))

        img_w = int(root.xpath('//ns:Page',
                               namespaces=ns)[0].attrib["imageWidth"])
        for l in root.xpath('//ns:TextLine', namespaces=ns):
            try:
                orientation = float(l.xpath('../@orientation', namespaces=ns).pop())
            except (ValueError, IndexError):
                orientation = 0

            yield {
                'ns': ns,
                "rtype": l.xpath('../@type', namespaces=ns).pop(),
                'xml_element': l,
                "image_path": img,
                "id": l.xpath('./@id', namespaces=ns).pop(),
                "coords": l.xpath('./ns:Coords/@points',
                                  namespaces=ns).pop(),
                "orientation": orientation,
                "img_width": img_w,
                "text": None,
            }
Exemple #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--files", nargs="+", required=True,
                        help="The image files to predict with its gt and pred")
    parser.add_argument("--html_output", type=str, required=True,
                        help="Where to write the html file")
    parser.add_argument("--open", action="store_true",
                        help="Automatically open the file")

    args = parser.parse_args()
    img_files = sorted(glob_all(args.files))
    gt_files = [split_all_ext(f)[0] + ".gt.txt" for f in img_files]
    pred_files = [split_all_ext(f)[0] + ".pred.txt" for f in img_files]

    with open(args.html_output, 'w') as html:
        html.write("""
                   <!DOCTYPE html>
                   <html lang="en">
                   <head>
                       <meta charset="utf-8"/>
                   </head>
                   <body>
                   <ul>""")

        for img, gt, pred in zip(img_files, gt_files, pred_files):
            html.write("<li><p><img src=\"file://{}\"></p><p>{}</p><p>{}</p>\n".format(
                img.replace('\\', '/').replace('/', '\\\\'), open(gt).read(), open(pred).read()
            ))

        html.write("</ul></body></html>")

    if args.open:
        webbrowser.open(args.html_output)
Exemple #6
0
    def prepare_for_mode(self, mode: PipelineMode) -> 'PipelineParams':
        from calamari_ocr.ocr.dataset.datareader.factory import DataReaderFactory
        assert (self.type is not None)
        params_out = deepcopy(self)
        # Training dataset
        logger.info("Resolving input files")
        if isinstance(self.type, str):
            try:
                self.type = DataSetType.from_string(self.type)
            except ValueError:
                # Not a valid type, must be custom
                if self.type not in DataReaderFactory.CUSTOM_READERS:
                    raise KeyError(
                        f"DataSetType {self.type} is neither a standard DataSetType or preset as custom "
                        f"reader ({list(DataReaderFactory.CUSTOM_READERS.keys())})"
                    )
        if not isinstance(self.type, str) and self.type not in {
                DataSetType.RAW, DataSetType.GENERATED_LINE
        }:
            input_image_files = sorted(glob_all(
                self.files)) if self.files else None

            if not self.text_files:
                if self.gt_extension:
                    gt_txt_files = [
                        split_all_ext(f)[0] + self.gt_extension
                        for f in input_image_files
                    ]
                else:
                    gt_txt_files = None
            else:
                gt_txt_files = sorted(glob_all(self.text_files))
                if mode in INPUT_PROCESSOR:
                    input_image_files, gt_txt_files = keep_files_with_same_file_name(
                        input_image_files, gt_txt_files)
                    for img, gt in zip(input_image_files, gt_txt_files):
                        if split_all_ext(
                                os.path.basename(img))[0] != split_all_ext(
                                    os.path.basename(gt))[0]:
                            raise Exception(
                                "Expected identical basenames of file: {} and {}"
                                .format(img, gt))
                else:
                    input_image_files = None

            if mode in {PipelineMode.Training, PipelineMode.Evaluation}:
                if len(set(gt_txt_files)) != len(gt_txt_files):
                    logger.warning(
                        "Some ground truth text files occur more than once in the data set "
                        "(ignore this warning, if this was intended).")
                if len(set(input_image_files)) != len(input_image_files):
                    logger.warning(
                        "Some images occur more than once in the data set. "
                        "This warning should usually not be ignored.")

            params_out.files = input_image_files
            params_out.text_files = gt_txt_files
        return params_out
Exemple #7
0
def data_reader_from_params(mode: PipelineMode,
                            params: PipelineParams) -> DataReader:
    assert (params.type is not None)
    from calamari_ocr.ocr.dataset.dataset_factory import create_data_reader
    # Training dataset
    logger.info("Resolving input files")
    if params.type not in {DataSetType.RAW, DataSetType.GENERATED_LINE}:
        input_image_files = sorted(glob_all(
            params.files)) if params.files else None

        if not params.text_files:
            if params.gt_extension:
                gt_txt_files = [
                    split_all_ext(f)[0] + params.gt_extension
                    for f in input_image_files
                ]
            else:
                gt_txt_files = None
        else:
            gt_txt_files = sorted(glob_all(params.text_files))
            if mode in INPUT_PROCESSOR:
                input_image_files, gt_txt_files = keep_files_with_same_file_name(
                    input_image_files, gt_txt_files)
                for img, gt in zip(input_image_files, gt_txt_files):
                    if split_all_ext(
                            os.path.basename(img))[0] != split_all_ext(
                                os.path.basename(gt))[0]:
                        raise Exception(
                            "Expected identical basenames of file: {} and {}".
                            format(img, gt))
            else:
                input_image_files = None

        if mode in {PipelineMode.Training, PipelineMode.Evaluation}:
            if len(set(gt_txt_files)) != len(gt_txt_files):
                logger.warning(
                    "Some ground truth text files occur more than once in the data set "
                    "(ignore this warning, if this was intended).")
            if len(set(input_image_files)) != len(input_image_files):
                logger.warning(
                    "Some images occur more than once in the data set. "
                    "This warning should usually not be ignored.")
    else:
        input_image_files = params.files
        gt_txt_files = params.text_files

    dataset = create_data_reader(
        params.type,
        mode,
        images=input_image_files,
        texts=gt_txt_files,
        skip_invalid=params.skip_invalid,
        args=params.data_reader_args
        if params.data_reader_args else FileDataReaderArgs(),
    )
    logger.info(f"Found {len(dataset)} files in the dataset")
    return dataset
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--files", nargs="+", type=str, required=True,
                        help="The image files to copy")
    parser.add_argument("--target_dir", type=str, required=True,
                        help="")
    parser.add_argument("--index_files", action="store_true")
    parser.add_argument("--convert_images", type=str,
                        help="Convert the image to a given type (by default use original format). E. g. jpg, png, tif, ...")
    parser.add_argument("--gt_ext", type=str, default=".gt.txt")
    parser.add_argument("--index_ext", type=str, default=".index")

    args = parser.parse_args()

    if args.convert_images and not args.convert_images.startswith("."):
        args.convert_images = "." + args.convert_images

    args.target_dir = os.path.expanduser(args.target_dir)

    print("Resolving files")
    image_files = glob_all(args.files)
    gt_files = [split_all_ext(p)[0] + ".gt.txt" for p in image_files]

    if len(image_files) == 0:
        raise Exception("No files found")

    if not os.path.isdir(args.target_dir):
        os.makedirs(args.target_dir)

    for i, (img, gt) in tqdm(enumerate(zip(image_files, gt_files)), total=len(gt_files), desc="Copying"):
        if not os.path.exists(img) or not os.path.exists(gt):
            # skip non existing examples
            continue

        # img with optional convert
        try:
            ext = split_all_ext(img)[1]
            target_ext = args.convert_images if args.convert_images else ext
            target_name = os.path.join(args.target_dir, "{:08}{}".format(i, target_ext))
            if ext == target_ext:
                shutil.copyfile(img, target_name)
            else:
                data = skimage_io.imread(img)
                skimage_io.imsave(target_name, data)

        except:
            continue

        # gt txt
        target_name = os.path.join(args.target_dir, "{:08}{}".format(i, args.gt_ext))
        shutil.copyfile(gt, target_name)

        if args.index_files:
            target_name = os.path.join(args.target_dir, "{:08}{}".format(i, args.index_ext))
            with open(target_name, "w") as f:
                f.write(str(i))
Exemple #9
0
def create_test_dataset(
    cfg: CfgNode,
    dataset_args=None
) -> Union[List[Union[RawDataSet, FileDataSet, AbbyyDataSet, PageXMLDataset,
                      Hdf5DataSet, ExtendedPredictionDataSet,
                      GeneratedLineDataset]], None]:
    if cfg.DATASET.VALID.TEXT_FILES:
        assert len(cfg.DATASET.VALID.PATH) == len(cfg.DATASET.VALID.TEXT_FILES)

    if cfg.DATASET.VALID.PATH:
        validation_dataset_list = []
        print("Resolving validation files")
        for i, valid_path in enumerate(cfg.DATASET.VALID.PATH):
            validation_image_files = glob_all(valid_path)
            dataregistry.register(
                i, os.path.basename(os.path.dirname(valid_path)),
                len(validation_image_files))

            if not cfg.DATASET.VALID.TEXT_FILES:
                val_txt_files = [
                    split_all_ext(f)[0] + cfg.DATASET.VALID.GT_EXTENSION
                    for f in validation_image_files
                ]
            else:
                val_txt_files = sorted(
                    glob_all(cfg.DATASET.VALID.TEXT_FILES[i]))
                validation_image_files, val_txt_files = keep_files_with_same_file_name(
                    validation_image_files, val_txt_files)
                for img, gt in zip(validation_image_files, val_txt_files):
                    if split_all_ext(
                            os.path.basename(img))[0] != split_all_ext(
                                os.path.basename(gt))[0]:
                        raise Exception(
                            "Expected identical basenames of validation file: {} and {}"
                            .format(img, gt))

            if len(set(val_txt_files)) != len(val_txt_files):
                raise Exception(
                    "Some validation images are occurring more than once in the data set."
                )

            validation_dataset = create_dataset(
                cfg.DATASET.VALID.TYPE,
                DataSetMode.TRAIN,
                images=validation_image_files,
                texts=val_txt_files,
                skip_invalid=not cfg.DATALOADER.NO_SKIP_INVALID_GT,
                args=dataset_args,
            )
            print("Found {} files in the validation dataset".format(
                len(validation_dataset)))
            validation_dataset_list.append(validation_dataset)
    else:
        validation_dataset_list = None

    return validation_dataset_list
Exemple #10
0
    def _samples_gt_from_book(self, root, img,
                              skipcommented=True):
        ns = {"ns": root.nsmap[None]}
        imgfile = root.xpath('//ns:Page',
                             namespaces=ns)[0].attrib["imageFilename"]
        if (self.mode == DataSetMode.TRAIN or self.mode == DataSetMode.PRED_AND_EVAL) and not split_all_ext(img)[0].endswith(split_all_ext(imgfile)[0]):
            raise Exception("Mapping of image file to xml file invalid: {} vs {} (comparing basename {} vs {})".format(
                img, imgfile, split_all_ext(img)[0], split_all_ext(imgfile)[0]))

        img_w = int(root.xpath('//ns:Page',
                               namespaces=ns)[0].attrib["imageWidth"])
        textlines = root.xpath('//ns:TextLine', namespaces=ns)

        for textline in textlines:
            tequivs = textline.xpath('./ns:TextEquiv[@index="{}"]'.format(self.text_index),
                                namespaces=ns)
            if len(tequivs) > 1:
                logger.warning("PageXML is invalid: TextLine includes TextEquivs with non unique ids")

            parat = textline.attrib
            if skipcommented and "comments" in parat and parat["comments"]:
                continue

            if tequivs is not None and len(tequivs) > 0:
                l = tequivs[0]
                text = l.xpath('./ns:Unicode', namespaces=ns).pop().text
            else:
                l = None
                text = None

            if not text:
                if self.skip_invalid:
                    continue
                elif self._non_existing_as_empty:
                    text = ""
                else:
                    raise Exception("Empty text field")

            try:
                orientation = float(textline.xpath('../@orientation', namespaces=ns).pop())
            except (ValueError, IndexError):
                orientation = 0

            yield {
                'ns': ns,
                "rtype": textline.xpath('../@type', namespaces=ns).pop(),
                'xml_element': l,
                "image_path": img,
                "id": textline.xpath('./@id', namespaces=ns).pop(),
                "text": text,
                "coords": textline.xpath('./ns:Coords/@points',
                                  namespaces=ns).pop(),
                "orientation": orientation,
                "img_width": img_w
            }
Exemple #11
0
def main():
    parser = ArgumentParser()
    parser.add_argument("--checkpoint",
                        type=str,
                        required=True,
                        help="The checkpoint used to resume")
    parser.add_argument("--validation",
                        type=str,
                        nargs="+",
                        help="Validation line files used for early stopping")
    parser.add_argument("files",
                        type=str,
                        nargs="+",
                        help="The files to use for training")

    args = parser.parse_args()

    # Train dataset
    input_image_files = glob_all(args.files)
    gt_txt_files = [split_all_ext(f)[0] + ".gt.txt" for f in input_image_files]

    if len(set(gt_txt_files)) != len(gt_txt_files):
        raise Exception(
            "Some image are occurring more than once in the data set.")

    dataset = FileDataSet(input_image_files, gt_txt_files)

    print("Found {} files in the dataset".format(len(dataset)))

    # Validation dataset
    if args.validation:
        validation_image_files = glob_all(args.validation)
        val_txt_files = [
            split_all_ext(f)[0] + ".gt.txt" for f in validation_image_files
        ]

        if len(set(val_txt_files)) != len(val_txt_files):
            raise Exception(
                "Some validation images are occurring more than once in the data set."
            )

        validation_dataset = FileDataSet(validation_image_files, val_txt_files)
        print("Found {} files in the validation dataset".format(
            len(validation_dataset)))
    else:
        validation_dataset = None

    with open(args.checkpoint + '.json', 'r') as f:
        checkpoint_params = json_format.Parse(f.read(), CheckpointParams())

        trainer = Trainer(checkpoint_params,
                          dataset,
                          validation_dataset=validation_dataset,
                          restore=args.checkpoint)
        trainer.train(progress_bar=True)
Exemple #12
0
 def store(self):
     extension = self.params.pred_extension
     if self._last_page_id:
         self._store_page(extension, self._last_page_id)
         self._last_page_id = None
     else:
         for xml in tqdm(
             self.params.xmlfiles,
             desc="Writing PageXML files",
             total=len(self.params.xmlfiles),
         ):
             page = self.pages(split_all_ext(xml)[0])
             with open(split_all_ext(xml)[0] + extension, "w", encoding="utf-8") as f:
                 f.write(etree.tounicode(page.getroottree(), pretty_print=True))
Exemple #13
0
 def to_prediction(self):
     self.files = sorted(glob_all(self.files))
     pred = deepcopy(self)
     pred.files = [
         split_all_ext(f)[0] + self.pred_extension for f in self.files
     ]
     return pred
Exemple #14
0
    def _generate_epoch(self, text_only) -> Generator[InputSample, None, None]:
        filenames = list(self.params.files)
        if self.mode == PipelineMode.TRAINING:
            shuffle(filenames)

        for filename in filenames:
            basename = split_all_ext(filename)[0]
            with h5py.File(filename, 'r') as f:
                codec = list(map(chr, f['codec']))
                if text_only:
                    for i, (text, idx) in enumerate(
                            zip(f['transcripts'],
                                range(len(f['transcripts'])))):
                        text = "".join([codec[c] for c in text])
                        fold_id = idx % self.params.n_folds if self.params.n_folds > 0 else -1
                        yield InputSample(
                            None, text,
                            SampleMeta(id=f"{basename}/{i}", fold_id=fold_id))
                else:
                    gen = zip(f['images'], f['images_dims'], f['transcripts'],
                              range(len(f['images'])))
                    if self.mode == PipelineMode.TRAINING:
                        gen = list(gen)
                        shuffle(gen)

                    for i, (image, shape, text, idx) in enumerate(gen):
                        image = np.reshape(image, shape)
                        text = "".join([codec[c] for c in text])
                        fold_id = idx % self.params.n_folds if self.params.n_folds > 0 else -1
                        yield InputSample(
                            image, text,
                            SampleMeta(id=f"{basename}/{i}", fold_id=fold_id))
Exemple #15
0
    def __init__(
        self,
        mode: DataSetMode,
        files: List[str] = None,
        xmlfiles: List[str] = None,
        skip_invalid=False,
        remove_invalid=True,
        binary=False,
        non_existing_as_empty=False,
    ):
        """ Create a dataset from a Path as String

        Parameters
         ----------
        files : [], required
            image files
        skip_invalid : bool, optional
            skip invalid files
        remove_invalid : bool, optional
            remove invalid files
        """

        super().__init__(mode, skip_invalid, remove_invalid)

        self.xmlfiles = xmlfiles if xmlfiles else []
        self.files = files if files else []

        self._non_existing_as_empty = non_existing_as_empty
        if len(self.xmlfiles) == 0:
            from calamari_ocr.ocr.datasets import DataSetType
            self.xmlfiles = [
                split_all_ext(p)[0] +
                DataSetType.gt_extension(DataSetType.ABBYY) for p in files
            ]

        if len(self.files) == 0:
            self.files = [None] * len(self.xmlfiles)

        self.book = XMLReader(self.files, self.xmlfiles, skip_invalid,
                              remove_invalid).read()
        self.binary = binary

        for p, page in enumerate(self.book.pages):
            for l, line in enumerate(page.getLines()):
                for f, fo in enumerate(line.formats):
                    self.add_sample({
                        "image_path":
                        page.imgFile,
                        "xml_path":
                        page.xmlFile,
                        "id":
                        "{}_{}_{}_{}".format(
                            os.path.splitext(page.xmlFile if page.
                                             xmlFile else page.imgFile)[0], p,
                            l, f),
                        "line":
                        line,
                        "format":
                        fo,
                    })
Exemple #16
0
    def __init__(
        self,
        mode: PipelineMode,
        params: PageXML,
    ):
        super().__init__(mode, params)
        self.pages = {}
        for img, xml in zip(params.images, params.xml_files):
            loader = PageXMLDatasetLoader(
                self.mode,
                params.non_existing_as_empty,
                params.text_index,
                params.skip_invalid,
                params.skip_commented,
            )
            for sample in loader.load(img, xml):
                self.add_sample(sample)

            self.pages[split_all_ext(xml)[0]] = loader.root

        # store which pagexml was stored last, to check when a file is ready to be written during sequential prediction
        self._last_page_id = None

        # counter for word tag ids
        self._next_word_id = 0
Exemple #17
0
    def __init__(self, json_path: str, auto_update=True, dry_run=False):
        self.json_path = json_path if json_path.endswith(
            '.json') else json_path + '.json'
        self.json_path = os.path.abspath(
            os.path.expanduser(os.path.expandvars(self.json_path)))
        self.ckpt_path = os.path.splitext(self.json_path)[0]
        self.dry_run = dry_run
        self.dirname = os.path.dirname(self.ckpt_path)
        self.basename = os.path.basename(split_all_ext(self.ckpt_path)[0])

        # do not parse as proto, since some parameters might have changed
        with open(self.json_path, 'r') as f:
            self.dict = json.load(f)

            self.version = self.dict['version'] if 'version' in self.dict else 0

        if self.version != SavedCalamariModel.VERSION:
            if auto_update:
                self.update_checkpoint()
            else:
                raise Exception(
                    "Version of checkpoint is {} but {} is required. Please upgrade the model or "
                    "set the auto update flag.".format(
                        self.version, SavedCalamariModel.VERSION))

        else:
            logger.info(f"Checkpoint version {self.version} is up-to-date.")

        from calamari_ocr.ocr.training.params import TrainerParams
        if 'scenario' in self.dict:
            self.trainer_params = TrainerParams.from_dict(self.dict)
        else:
            self.trainer_params = TrainerParams.from_dict(
                {'scenario': self.dict})
        self.scenario_params = self.trainer_params.scenario
Exemple #18
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--files",
                        type=str,
                        default=[],
                        nargs="+",
                        required=True,
                        help="Protobuf files to convert")
    parser.add_argument("--logits",
                        action="store_true",
                        help="Do write logits")
    args = parser.parse_args()

    files = glob_all(args.files)
    for file in tqdm(files, desc="Converting"):
        predictions = Predictions()
        with open(file, 'rb') as f:
            predictions.ParseFromString(f.read())

        if not args.logits:
            for prediction in predictions.predictions:
                prediction.logits.rows = 0
                prediction.logits.cols = 0
                prediction.logits.data[:] = []

        out_json_path = split_all_ext(file)[0] + ".json"
        with open(out_json_path, 'w') as f:
            f.write(
                MessageToJson(predictions,
                              including_default_value_fields=True))
Exemple #19
0
 def store(self):
     for page in tqdm(self.book.pages,
                      desc="Writing Abbyy files",
                      total=len(self.book.pages)):
         XMLWriter.write(
             page,
             split_all_ext(page.xmlFile)[0] + self.params.pred_extension)
Exemple #20
0
    def __init__(
        self,
        mode: PipelineMode,
        params: Abbyy,
    ):
        super().__init__(mode, params)

        self.book = XMLReader(self.params.images, self.params.xml_files,
                              self.params.skip_invalid).read()

        for p, page in enumerate(self.book.pages):
            for l, line in enumerate(page.getLines()):
                for f, fo in enumerate(line.formats):
                    self.add_sample({
                        "image_path":
                        page.imgFile,
                        "xml_path":
                        page.xmlFile,
                        "id":
                        "{}_{}_{}_{}".format(
                            split_all_ext(page.xmlFile or page.imgFile)[0], p,
                            l, f),
                        "line":
                        line,
                        "format":
                        fo,
                    })
Exemple #21
0
    def __init__(
        self,
        mode: DataSetMode,
        files,
        xmlfiles: List[str] = None,
        skip_invalid=False,
        remove_invalid=True,
        non_existing_as_empty=False,
        args: dict = None,
    ):
        """ Create a dataset from a Path as String

        Parameters
         ----------
        files : [], required
            image files
        skip_invalid : bool, optional
            skip invalid files
        remove_invalid : bool, optional
            remove invalid files
        """

        super().__init__(
            mode,
            skip_invalid,
            remove_invalid,
        )

        if xmlfiles is None:
            xmlfiles = []

        if args is None:
            args = {}

        self.args = args

        self.text_index = args.get('text_index', 0)

        self._non_existing_as_empty = non_existing_as_empty
        if len(xmlfiles) == 0:
            xmlfiles = [split_all_ext(p)[0] + ".xml" for p in files]

        if len(files) == 0:
            files = [None] * len(xmlfiles)

        self.files = files
        self.xmlfiles = xmlfiles
        self.pages = []
        for img, xml in zip(files, xmlfiles):
            loader = PageXMLDatasetLoader(self.mode,
                                          self._non_existing_as_empty,
                                          self.text_index, self.skip_invalid)
            for sample in loader.load(img, xml):
                self.add_sample(sample)

            self.pages.append(loader.root)

        # store which pagexml was stored last, to check when a file is ready to be written during sequential prediction
        self._last_page_id = None
Exemple #22
0
 def store(self, extension):
     if self._last_page_id:
         self._store_page(extension, self._last_page_id)
         self._last_page_id = None
     else:
         for xml, page in tqdm(zip(self.xmlfiles, self.pages), desc="Writing PageXML files", total=len(self.xmlfiles)):
             with open(split_all_ext(xml)[0] + extension, 'w') as f:
                 f.write(etree.tounicode(page.getroottree()))
Exemple #23
0
    def prepare_for_mode(self, mode: PipelineMode):
        self.images = sorted(glob_all(self.images))
        self.xml_files = sorted(glob_all(self.xml_files))
        if not self.xml_files:
            self.xml_files = [split_all_ext(f)[0] + self.gt_extension for f in self.images]
        if not self.images:
            self.images = [None] * len(self.xml_files)

        if len(self.images) != len(self.xml_files):
            raise ValueError(f"Different number of image and xml files, {len(self.images)} != {len(self.xml_files)}")
        for img_path, xml_path in zip(self.images, self.xml_files):
            if img_path and xml_path:
                img_bn, xml_bn = split_all_ext(img_path)[0], split_all_ext(xml_path)[0]
                if img_bn != xml_bn:
                    logger.warning(
                        f"Filenames are not matching, got base names \n  image: {img_bn}\n  xml:   {xml_bn}\n."
                    )
Exemple #24
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--files", nargs="+", required=True,
                        help="List of all image files with corresponding gt.txt files")
    parser.add_argument("--dataset", type=DataSetType.from_string, choices=list(DataSetType), default=DataSetType.FILE)
    parser.add_argument("--line_height", type=int, default=48,
                        help="The line height")
    parser.add_argument("--pad", type=int, default=16,
                        help="Padding (left right) of the line")

    args = parser.parse_args()

    print("Resolving files")
    image_files = glob_all(args.files)
    gt_files = [split_all_ext(p)[0] + ".gt.txt" for p in image_files]

    ds = create_dataset(
        args.dataset,
        DataSetMode.TRAIN,
        images=image_files, texts=gt_files, non_existing_as_empty=True)

    print("Loading {} files".format(len(image_files)))
    ds.load_samples(processes=1, progress_bar=True)
    images, texts = ds.train_samples(skip_empty=True)
    statistics = {
        "n_lines": len(images),
        "chars": [len(c) for c in texts],
        "widths": [img.shape[1] / img.shape[0] * args.line_height + 2 * args.pad for img in images
                   if img is not None and img.shape[0] > 0 and img.shape[1] > 0],
        "total_line_width": 0,
        "char_counts": {},
    }

    for image, text in zip(images, texts):
        for c in text:
            if c in statistics["char_counts"]:
                statistics["char_counts"][c] += 1
            else:
                statistics["char_counts"][c] = 1

    statistics["av_line_width"] = np.average(statistics["widths"])
    statistics["max_line_width"] = np.max(statistics["widths"])
    statistics["min_line_width"] = np.min(statistics["widths"])
    statistics["total_line_width"] = np.sum(statistics["widths"])

    statistics["av_chars"] = np.average(statistics["chars"])
    statistics["max_chars"] = np.max(statistics["chars"])
    statistics["min_chars"] = np.min(statistics["chars"])
    statistics["total_chars"] = np.sum(statistics["chars"])

    statistics["av_px_per_char"] = statistics["av_line_width"] / statistics["av_chars"]
    statistics["codec_size"] = len(statistics["char_counts"])

    del statistics["chars"]
    del statistics["widths"]


    print(statistics)
Exemple #25
0
 def store(self, extension):
     for filename, data in self.prediction.items():
         texts = data['transcripts']
         codec = data['codec']
         basename, ext = split_all_ext(filename)
         with h5py.File(basename + extension, 'w') as file:
             dt = h5py.special_dtype(vlen=np.dtype('int32'))
             file.create_dataset('transcripts', (len(texts), ), dtype=dt)
             file['transcripts'][...] = texts
             file.create_dataset('codec', data=list(map(ord, codec)))
Exemple #26
0
def main():
    parser = argparse.ArgumentParser(
        description="Write split of folds to separate directories"
    )
    parser.add_argument("--files", nargs="+",
                        help="List all image files that shall be processed. Ground truth fils with the same "
                             "base name but with '.gt.txt' as extension are required at the same location")
    parser.add_argument("--n_folds", type=int, required=True,
                        help="The number of fold, that is the number of models to train")
    parser.add_argument("--output_dir", type=str, required=True,
                        help="Where to write the folds")
    parser.add_argument("--keep_original_filename", action="store_true",
                        help="By default the copied new files get a new 8 digit name. Use this flag to keep the "
                             "original name but be aware, that this might override lines with the same name")

    args = parser.parse_args()

    logger.info("Creating folds")
    images = glob_all(args.files)
    texts = [split_all_ext(p)[0] + '.gt.txt' for p in images]
    data_reader = FileDataReader(PipelineMode.Training, images=images, texts=texts, skip_invalid=True)
    cross_fold = CrossFold(n_folds=args.n_folds, data_reader=data_reader, output_dir=args.output_dir)

    logger.info("Copying files")
    for fold_id, fold_files in enumerate(cross_fold.folds):
        fold_out_dir = os.path.join(args.output_dir, str(fold_id))
        if not os.path.exists(fold_out_dir):
            os.makedirs(fold_out_dir)

        for file_id, file in tqdm(enumerate(fold_files), total=len(fold_files), desc="Fold {}".format(fold_id)):
            img_file = file
            base, ext = split_all_ext(file)
            txt_file = base + ".gt.txt"
            output_basename = os.path.basename(base) if args.keep_original_filename else "{:08d}".format(file_id)

            if os.path.exists(img_file) and os.path.exists(txt_file):
                output_file = os.path.join(fold_out_dir, "{}{}".format(output_basename, ext))
                shutil.copyfile(img_file, output_file)

                output_file = os.path.join(fold_out_dir, "{}{}".format(output_basename, ".gt.txt"))
                shutil.copyfile(txt_file, output_file)
            else:
                logger.info("Warning: Does not exist {} or {}".format(img_file, txt_file))
Exemple #27
0
    def __init__(self, texts=list()):
        super().__init__(DataSetMode.EVAL)

        for text in texts:
            text_bn, text_ext = split_all_ext(text)
            self.add_sample({
                "image_path": None,
                "pred_path": text,
                "id": text_bn,
            })
Exemple #28
0
 def prepare_for_mode(self, mode: PipelineMode):
     self.images = sorted(glob_all(self.images))
     self.xml_files = sorted(self.xml_files)
     if not self.xml_files:
         self.xml_files = [
             split_all_ext(f)[0] + self.gt_extension for f in self.images
         ]
     if not self.images:
         self.xml_files = sorted(glob_all(self.xml_files))
         self.images = [None] * len(self.xml_files)
Exemple #29
0
 def __init__(self, mode, params: ExtendedPredictionDataParams):
     super().__init__(mode, params)
     for text in params.files:
         text_bn, text_ext = split_all_ext(text)
         sample = {
             "image_path": None,
             "pred_path": text,
             "id": text_bn,
         }
         self._load_sample(sample, False)
         self.add_sample(sample)
Exemple #30
0
    def _samples_gt_from_book(self, root, img, skipcommented=True):
        ns = {"ns": root.nsmap[None]}
        imgfile = root.xpath('//ns:Page',
                             namespaces=ns)[0].attrib["imageFilename"]
        if self.mode == DataSetMode.TRAIN and not split_all_ext(
                img)[0].endswith(split_all_ext(imgfile)[0]):
            raise Exception(
                "Mapping of image file to xml file invalid: {} vs {} (comparing basename {} vs {})"
                .format(img, imgfile,
                        split_all_ext(img)[0],
                        split_all_ext(imgfile)[0]))

        img_w = int(
            root.xpath('//ns:Page', namespaces=ns)[0].attrib["imageWidth"])
        tequivs = root.xpath('//ns:TextEquiv[@index="{}"]'.format(
            self.text_index),
                             namespaces=ns)
        for l in tequivs:
            parat = l.getparent().attrib
            if skipcommented and "comments" in parat and parat["comments"]:
                continue

            text = l.xpath('./ns:Unicode', namespaces=ns).pop().text
            if not text:
                if self.skip_invalid:
                    continue
                elif self._non_existing_as_empty:
                    text = ""
                else:
                    raise Exception("Empty text field")

            yield {
                'ns': ns,
                "rtype": l.xpath('../../@type', namespaces=ns).pop(),
                'xml_element': l,
                "image_path": img,
                "id": l.xpath('../@id', namespaces=ns).pop(),
                "text": text,
                "coords": l.xpath('../ns:Coords/@points', namespaces=ns).pop(),
                "img_width": img_w
            }
Exemple #31
0
    def __init__(
        self,
        mode: DataSetMode,
        files,
        xmlfiles=list(),
        skip_invalid=False,
        remove_invalid=True,
        binary=False,
        non_existing_as_empty=False,
    ):
        """ Create a dataset from a Path as String

        Parameters
         ----------
        files : [], required
            image files
        skip_invalid : bool, optional
            skip invalid files
        remove_invalid : bool, optional
            remove invalid files
        """

        super().__init__(mode, skip_invalid, remove_invalid)

        self._non_existing_as_empty = non_existing_as_empty
        if not xmlfiles or len(xmlfiles) == 0:
            xmlfiles = [split_all_ext(p)[0] + ".xml" for p in files]

        if not files or len(files) == 0:
            files = [None] * len(xmlfiles)

        self.book = XMLReader(files, xmlfiles, skip_invalid,
                              remove_invalid).read()
        self.binary = binary

        for p, page in enumerate(self.book.pages):
            for l, line in enumerate(page.getLines()):
                for f, fo in enumerate(line.formats):
                    self.add_sample({
                        "image_path":
                        page.imgFile,
                        "xml_path":
                        page.xmlFile,
                        "id":
                        "{}_{}_{}_{}".format(
                            os.path.splitext(page.xmlFile if page.
                                             xmlFile else page.imgFile)[0], p,
                            l, f),
                        "line":
                        line,
                        "format":
                        fo,
                    })
Exemple #32
0
    def store(self):
        extension = self.params.pred_extension

        for filename, data in self.prediction.items():
            texts = data["transcripts"]
            codec = data["codec"]
            basename, ext = split_all_ext(filename)
            with h5py.File(basename + extension, "w") as file:
                dt = h5py.special_dtype(vlen=np.dtype("int32"))
                file.create_dataset("transcripts", (len(texts), ), dtype=dt)
                file["transcripts"][...] = texts
                file.create_dataset("codec", data=list(map(ord, codec)))
Exemple #33
0
    def __init__(self,
                 mode: DataSetMode,
                 files,
                 xmlfiles=list(),
                 skip_invalid=False,
                 remove_invalid=True,
                 binary=False,
                 non_existing_as_empty=False,
                 ):

        """ Create a dataset from a Path as String

        Parameters
         ----------
        files : [], required
            image files
        skip_invalid : bool, optional
            skip invalid files
        remove_invalid : bool, optional
            remove invalid files
        """

        super().__init__(
            mode,
            skip_invalid, remove_invalid)

        self._non_existing_as_empty = non_existing_as_empty
        if not xmlfiles or len(xmlfiles) == 0:
            xmlfiles = [split_all_ext(p)[0] + ".xml" for p in files]

        if not files or len(files) == 0:
            files = [None] * len(xmlfiles)

        self.book = XMLReader(files, xmlfiles, skip_invalid, remove_invalid).read()
        self.binary = binary

        for p, page in enumerate(self.book.pages):
            for l, line in enumerate(page.getLines()):
                for f, fo in enumerate(line.formats):
                    self.add_sample({
                        "image_path": page.imgFile,
                        "xml_path": page.xmlFile,
                        "id": "{}_{}_{}_{}".format(os.path.splitext(page.xmlFile if page.xmlFile else page.imgFile)[0], p, l, f),
                        "line": line,
                        "format": fo,
                    })
Exemple #34
0
def main():
    parser = argparse.ArgumentParser(
        description="Write split of folds to separate directories"
    )
    parser.add_argument("--files", nargs="+",
                        help="List all image files that shall be processed. Ground truth fils with the same "
                             "base name but with '.gt.txt' as extension are required at the same location")
    parser.add_argument("--n_folds", type=int, required=True,
                        help="The number of fold, that is the number of models to train")
    parser.add_argument("--output_dir", type=str, required=True,
                        help="Where to write the folds")
    parser.add_argument("--keep_original_filename", action="store_true",
                        help="By default the copied new files get a new 8 digit name. Use this flag to keep the "
                             "original name but be aware, that this might override lines with the same name")

    args = parser.parse_args()

    print("Creating folds")
    cross_fold = CrossFold(n_folds=args.n_folds, source_files=args.files, output_dir=args.output_dir)

    print("Copying files")
    for fold_id, fold_files in enumerate(cross_fold.folds):
        fold_out_dir = os.path.join(args.output_dir, str(fold_id))
        if not os.path.exists(fold_out_dir):
            os.makedirs(fold_out_dir)

        for file_id, file in tqdm(enumerate(fold_files), total=len(fold_files), desc="Fold {}".format(fold_id)):
            img_file = file
            base, ext = split_all_ext(file)
            txt_file = base + ".gt.txt"
            output_basename = os.path.basename(base) if args.keep_original_filename else "{:08d}".format(file_id)

            if os.path.exists(img_file) and os.path.exists(txt_file):
                output_file = os.path.join(fold_out_dir, "{}{}".format(output_basename, ext))
                shutil.copyfile(img_file, output_file)

                output_file = os.path.join(fold_out_dir, "{}{}".format(output_basename, ".gt.txt"))
                shutil.copyfile(txt_file, output_file)
            else:
                print("Waring: Does not exist {} or {}".format(img_file, txt_file))
Exemple #35
0
    def __init__(self,
                 mode: DataSetMode,
                 files,
                 xmlfiles=list(),
                 skip_invalid=False,
                 remove_invalid=True,
                 non_existing_as_empty=False,
                 args=dict(),
                 ):

        """ Create a dataset from a Path as String

        Parameters
         ----------
        files : [], required
            image files
        skip_invalid : bool, optional
            skip invalid files
        remove_invalid : bool, optional
            remove invalid files
        """

        super().__init__(
            mode,
            skip_invalid, remove_invalid,
        )

        self.text_index = args.get('text_index', 0)

        self._non_existing_as_empty = non_existing_as_empty
        if not xmlfiles or len(xmlfiles) == 0:
            xmlfiles = [split_all_ext(p)[0] + ".xml" for p in files]

        if not files or len(files) == 0:
            files = [None] * len(xmlfiles)

        self.files = files
        self.xmlfiles = xmlfiles

        self.pages = [self.read_page_xml(img, xml) for img, xml in zip(files, xmlfiles)]
Exemple #36
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--files", type=str, default=[], nargs="+", required=True,
                        help="Protobuf files to convert")
    parser.add_argument("--logits", action="store_true",
                        help="Do write logits")
    args = parser.parse_args()

    files = glob_all(args.files)
    for file in tqdm(files, desc="Converting"):
        predictions = Predictions()
        with open(file, 'rb') as f:
            predictions.ParseFromString(f.read())

        if not args.logits:
            for prediction in predictions.predictions:
                prediction.logits.rows = 0
                prediction.logits.cols = 0
                prediction.logits.data[:] = []

        out_json_path = split_all_ext(file)[0] + ".json"
        with open(out_json_path, 'w') as f:
            f.write(MessageToJson(predictions, including_default_value_fields=True))
Exemple #37
0
 def store(self):
     for page in tqdm(self.book.pages, desc="Writing Abbyy files", total=len(self.book.pages)):
         XMLWriter.write(page, split_all_ext(page.xmlFile)[0] + ".pred.abbyy.xml")
Exemple #38
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--files", nargs="+", required=True,
                        help="All img files, an appropriate .gt.txt must exist")
    parser.add_argument("--n_eval", type=float, required=True,
                        help="The (relative or absolute) count of training files (or -1 to use the remaining)")
    parser.add_argument("--n_train", type=float, required=True,
                        help="The (relative or absolute) count of training files (or -1 to use the remaining)")
    parser.add_argument("--output_dir", type=str, required=True,
                        help="Where to write the splits")
    parser.add_argument("--eval_sub_dir", type=str, default="eval")
    parser.add_argument("--train_sub_dir", type=str, default="train")

    args = parser.parse_args()

    img_files = sorted(glob_all(args.files))
    if len(img_files) == 0:
        raise Exception("No files were found")

    gt_txt_files = [split_all_ext(p)[0] + ".gt.txt" for p in img_files]

    if args.n_eval < 0:
        pass
    elif args.n_eval < 1:
        args.n_eval = int(args.n_eval) * len(img_files)
    else:
        args.n_eval = int(args.n_eval)

    if args.n_train < 0:
        pass
    elif args.n_train < 1:
        args.n_train = int(args.n_train) * len(img_files)
    else:
        args.n_train = int(args.n_train)

    if args.n_eval < 0 and args.n_train < 0:
        raise Exception("Either n_eval or n_train may be < 0")

    if args.n_eval < 0:
        args.n_eval = len(img_files) - args.n_train
    elif args.n_train < 0:
        args.n_train = len(img_files) - args.n_eval

    if args.n_eval + args.n_train > len(img_files):
        raise Exception("Got {} eval and {} train files = {} in total, but only {} files are in the dataset".format(
            args.n_eval, args.n_train, args.n_eval + args.n_train, len(img_files)
        ))

    def copy_files(imgs, txts, out_dir):
        assert(len(imgs) == len(txts))

        if not os.path.exists(out_dir):
            os.makedirs(out_dir)

        for img, txt in tqdm(zip(imgs, txts), total=len(imgs), desc="Writing to {}".format(out_dir)):
            if not os.path.exists(img):
                print("Image file at {} not found".format(img))
                continue

            if not os.path.exists(txt):
                print("Ground truth file at {} not found".format(txt))
                continue

            shutil.copyfile(img, os.path.join(out_dir, os.path.basename(img)))
            shutil.copyfile(txt, os.path.join(out_dir, os.path.basename(txt)))

    copy_files(img_files[:args.n_eval], gt_txt_files[:args.n_eval], os.path.join(args.output_dir, args.eval_sub_dir))
    copy_files(img_files[args.n_eval:], gt_txt_files[args.n_eval:], os.path.join(args.output_dir, args.train_sub_dir))
Exemple #39
0
    def __init__(self, mode: DataSetMode,
                 images=None, texts=None,
                 skip_invalid=False, remove_invalid=True,
                 non_existing_as_empty=False):
        """ Create a dataset from a list of files

        Images or texts may be empty to create a dataset for prediction or evaluation only.

        Parameters
        ----------
        images : list of str, optional
            image files
        texts : list of str, optional
            text files
        skip_invalid : bool, optional
            skip invalid files
        remove_invalid : bool, optional
            remove invalid files
        non_existing_as_empty : bool, optional
            tread non existing files as empty. This is relevant for evaluation a dataset
        """
        super().__init__(mode,
                         skip_invalid=skip_invalid,
                         remove_invalid=remove_invalid)
        self._non_existing_as_empty = non_existing_as_empty

        images = [] if images is None else images
        texts = [] if texts is None else texts

        if mode == DataSetMode.PREDICT:
            texts = [None] * len(images)

        if mode == DataSetMode.EVAL:
            images = [None] * len(texts)

        for image, text in zip(images, texts):
            try:
                if image is None and text is None:
                    raise Exception("An empty data point is not allowed. Both image and text file are None")

                img_bn, text_bn = None, None
                if image:
                    img_path, img_fn = os.path.split(image)
                    img_bn, img_ext = split_all_ext(img_fn)

                    if not self._non_existing_as_empty and not os.path.exists(image):
                        raise Exception("Image at '{}' must exist".format(image))

                if text:
                    if not self._non_existing_as_empty and not os.path.exists(text):
                        raise Exception("Text file at '{}' must exist".format(text))

                    text_path, text_fn = os.path.split(text)
                    text_bn, text_ext = split_all_ext(text_fn)

                if image and text and img_bn != text_bn:
                    raise Exception("Expected image base name equals text base name but got '{}' != '{}'".format(
                        img_bn, text_bn
                    ))
            except Exception as e:
                if self.skip_invalid:
                    print("Invalid data: {}".format(e))
                    continue
                else:
                    raise e

            self.add_sample({
                "image_path": image,
                "text_path": text,
                "id": img_bn if image else text_bn,
            })
Exemple #40
0
def main():
    parser = ArgumentParser()
    parser.add_argument("--checkpoint", type=str, required=True,
                        help="The checkpoint used to resume")

    # validation files
    parser.add_argument("--validation", type=str, nargs="+",
                        help="Validation line files used for early stopping")
    parser.add_argument("--validation_text_files", nargs="+", default=None,
                        help="Optional list of validation GT files if they are in other directory")
    parser.add_argument("--validation_extension", default=None,
                        help="Default extension of the gt files (expected to exist in same dir)")
    parser.add_argument("--validation_dataset", type=DataSetType.from_string, choices=list(DataSetType), default=DataSetType.FILE)

    # input files
    parser.add_argument("--files", nargs="+",
                        help="List all image files that shall be processed. Ground truth fils with the same "
                             "base name but with '.gt.txt' as extension are required at the same location")
    parser.add_argument("--text_files", nargs="+", default=None,
                        help="Optional list of GT files if they are in other directory")
    parser.add_argument("--gt_extension", default=None,
                        help="Default extension of the gt files (expected to exist in same dir)")
    parser.add_argument("--dataset", type=DataSetType.from_string, choices=list(DataSetType), default=DataSetType.FILE)
    parser.add_argument("--no_skip_invalid_gt", action="store_true",
                        help="Do no skip invalid gt, instead raise an exception.")

    args = parser.parse_args()

    if args.gt_extension is None:
        args.gt_extension = DataSetType.gt_extension(args.dataset)

    if args.validation_extension is None:
        args.validation_extension = DataSetType.gt_extension(args.validation_dataset)

    # Training dataset
    print("Resolving input files")
    input_image_files = sorted(glob_all(args.files))
    if not args.text_files:
        gt_txt_files = [split_all_ext(f)[0] + args.gt_extension for f in input_image_files]
    else:
        gt_txt_files = sorted(glob_all(args.text_files))
        input_image_files, gt_txt_files = keep_files_with_same_file_name(input_image_files, gt_txt_files)
        for img, gt in zip(input_image_files, gt_txt_files):
            if split_all_ext(os.path.basename(img))[0] != split_all_ext(os.path.basename(gt))[0]:
                raise Exception("Expected identical basenames of file: {} and {}".format(img, gt))

    if len(set(gt_txt_files)) != len(gt_txt_files):
        raise Exception("Some image are occurring more than once in the data set.")

    dataset = create_dataset(
        args.dataset,
        DataSetMode.TRAIN,
        images=input_image_files,
        texts=gt_txt_files,
        skip_invalid=not args.no_skip_invalid_gt
    )
    print("Found {} files in the dataset".format(len(dataset)))

    # Validation dataset
    if args.validation:
        print("Resolving validation files")
        validation_image_files = glob_all(args.validation)
        if not args.validation_text_files:
            val_txt_files = [split_all_ext(f)[0] + args.validation_extension for f in validation_image_files]
        else:
            val_txt_files = sorted(glob_all(args.validation_text_files))
            validation_image_files, val_txt_files = keep_files_with_same_file_name(validation_image_files, val_txt_files)
            for img, gt in zip(validation_image_files, val_txt_files):
                if split_all_ext(os.path.basename(img))[0] != split_all_ext(os.path.basename(gt))[0]:
                    raise Exception("Expected identical basenames of validation file: {} and {}".format(img, gt))

        if len(set(val_txt_files)) != len(val_txt_files):
            raise Exception("Some validation images are occurring more than once in the data set.")

        validation_dataset = create_dataset(
            args.validation_dataset,
            DataSetMode.TRAIN,
            images=validation_image_files,
            texts=val_txt_files,
            skip_invalid=not args.no_skip_invalid_gt)
        print("Found {} files in the validation dataset".format(len(validation_dataset)))
    else:
        validation_dataset = None

    print("Resuming training")
    with open(args.checkpoint + '.json', 'r') as f:
        checkpoint_params = json_format.Parse(f.read(), CheckpointParams())

        trainer = Trainer(checkpoint_params, dataset,
                          validation_dataset=validation_dataset,
                          weights=args.checkpoint)
        trainer.train(progress_bar=True)
Exemple #41
0
 def store(self):
     for xml, page in tqdm(zip(self.xmlfiles, self.pages), desc="Writing PageXML files", total=len(self.xmlfiles)):
         with open(split_all_ext(xml)[0] + ".pred.xml", 'w') as f:
             f.write(etree.tounicode(page.getroottree()))
Exemple #42
0
def main():
    parser = ArgumentParser()
    parser.add_argument("--dataset", type=DataSetType.from_string, choices=list(DataSetType), default=DataSetType.FILE)
    parser.add_argument("--gt", nargs="+", required=True,
                        help="Ground truth files (.gt.txt extension)")
    parser.add_argument("--pred", nargs="+", default=None,
                        help="Prediction files if provided. Else files with .pred.txt are expected at the same "
                             "location as the gt.")
    parser.add_argument("--pred_dataset", type=DataSetType.from_string, choices=list(DataSetType), default=DataSetType.FILE)
    parser.add_argument("--pred_ext", type=str, default=".pred.txt",
                        help="Extension of the predicted text files")
    parser.add_argument("--n_confusions", type=int, default=10,
                        help="Only print n most common confusions. Defaults to 10, use -1 for all.")
    parser.add_argument("--n_worst_lines", type=int, default=0,
                        help="Print the n worst recognized text lines with its error")
    parser.add_argument("--xlsx_output", type=str,
                        help="Optionally write a xlsx file with the evaluation results")
    parser.add_argument("--num_threads", type=int, default=1,
                        help="Number of threads to use for evaluation")
    parser.add_argument("--non_existing_file_handling_mode", type=str, default="error",
                        help="How to handle non existing .pred.txt files. Possible modes: skip, empty, error. "
                             "'Skip' will simply skip the evaluation of that file (not counting it to errors). "
                             "'Empty' will handle this file as would it be empty (fully checking for errors)."
                             "'Error' will throw an exception if a file is not existing. This is the default behaviour.")
    parser.add_argument("--no_progress_bars", action="store_true",
                        help="Do not show any progress bars")
    parser.add_argument("--checkpoint", type=str, default=None,
                        help="Specify an optional checkpoint to parse the text preprocessor (for the gt txt files)")

    # page xml specific args
    parser.add_argument("--pagexml_gt_text_index", default=0)
    parser.add_argument("--pagexml_pred_text_index", default=1)


    args = parser.parse_args()

    print("Resolving files")
    gt_files = sorted(glob_all(args.gt))

    if args.pred:
        pred_files = sorted(glob_all(args.pred))
    else:
        pred_files = [split_all_ext(gt)[0] + args.pred_ext for gt in gt_files]
        args.pred_dataset = args.dataset

    if args.non_existing_file_handling_mode.lower() == "skip":
        non_existing_pred = [p for p in pred_files if not os.path.exists(p)]
        for f in non_existing_pred:
            idx = pred_files.index(f)
            del pred_files[idx]
            del gt_files[idx]

    text_preproc = None
    if args.checkpoint:
        with open(args.checkpoint if args.checkpoint.endswith(".json") else args.checkpoint + '.json', 'r') as f:
            checkpoint_params = json_format.Parse(f.read(), CheckpointParams())
            text_preproc = text_processor_from_proto(checkpoint_params.model.text_preprocessor)

    non_existing_as_empty = args.non_existing_file_handling_mode.lower() != "error "
    gt_data_set = create_dataset(
        args.dataset,
        DataSetMode.EVAL,
        texts=gt_files,
        non_existing_as_empty=non_existing_as_empty,
        args={'text_index': args.pagexml_gt_text_index},
    )
    pred_data_set = create_dataset(
        args.pred_dataset,
        DataSetMode.EVAL,
        texts=pred_files,
        non_existing_as_empty=non_existing_as_empty,
        args={'text_index': args.pagexml_pred_text_index},
    )

    evaluator = Evaluator(text_preprocessor=text_preproc)
    r = evaluator.run(gt_dataset=gt_data_set, pred_dataset=pred_data_set, processes=args.num_threads,
                      progress_bar=not args.no_progress_bars)

    # TODO: More output
    print("Evaluation result")
    print("=================")
    print("")
    print("Got mean normalized label error rate of {:.2%} ({} errs, {} total chars, {} sync errs)".format(
        r["avg_ler"], r["total_char_errs"], r["total_chars"], r["total_sync_errs"]))

    # sort descending
    print_confusions(r, args.n_confusions)

    print_worst_lines(r, gt_data_set.samples(), pred_data_set.text_samples(), args.n_worst_lines)

    if args.xlsx_output:
        write_xlsx(args.xlsx_output,
                   [{
                       "prefix": "evaluation",
                       "results": r,
                       "gt_files": gt_files,
                       "gts": gt_data_set.text_samples(),
                       "preds": pred_data_set.text_samples()
                   }])
Exemple #43
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--eval_imgs", type=str, nargs="+", required=True,
                        help="The evaluation files")
    parser.add_argument("--eval_dataset", type=DataSetType.from_string, choices=list(DataSetType), default=DataSetType.FILE)
    parser.add_argument("--checkpoint", type=str, nargs="+", default=[],
                        help="Path to the checkpoint without file extension")
    parser.add_argument("-j", "--processes", type=int, default=1,
                        help="Number of processes to use")
    parser.add_argument("--verbose", action="store_true",
                        help="Print additional information")
    parser.add_argument("--voter", type=str, nargs="+", default=["sequence_voter", "confidence_voter_default_ctc", "confidence_voter_fuzzy_ctc"],
                        help="The voting algorithm to use. Possible values: confidence_voter_default_ctc (default), "
                             "confidence_voter_fuzzy_ctc, sequence_voter")
    parser.add_argument("--batch_size", type=int, default=10,
                        help="The batch size for prediction")
    parser.add_argument("--dump", type=str,
                        help="Dump the output as serialized pickle object")
    parser.add_argument("--no_skip_invalid_gt", action="store_true",
                        help="Do no skip invalid gt, instead raise an exception.")

    args = parser.parse_args()

    # allow user to specify json file for model definition, but remove the file extension
    # for further processing
    args.checkpoint = [(cp[:-5] if cp.endswith(".json") else cp) for cp in args.checkpoint]

    # load files
    gt_images = sorted(glob_all(args.eval_imgs))
    gt_txts = [split_all_ext(path)[0] + ".gt.txt" for path in sorted(glob_all(args.eval_imgs))]

    dataset = create_dataset(
        args.eval_dataset,
        DataSetMode.TRAIN,
        images=gt_images,
        texts=gt_txts,
        skip_invalid=not args.no_skip_invalid_gt
    )

    print("Found {} files in the dataset".format(len(dataset)))
    if len(dataset) == 0:
        raise Exception("Empty dataset provided. Check your files argument (got {})!".format(args.files))

    # predict for all models
    n_models = len(args.checkpoint)
    predictor = MultiPredictor(checkpoints=args.checkpoint, batch_size=args.batch_size, processes=args.processes)
    do_prediction = predictor.predict_dataset(dataset, progress_bar=True)

    voters = []
    all_voter_sentences = []
    all_prediction_sentences = [[] for _ in range(n_models)]

    for voter in args.voter:
        # create voter
        voter_params = VoterParams()
        voter_params.type = VoterParams.Type.Value(voter.upper())
        voters.append(voter_from_proto(voter_params))
        all_voter_sentences.append([])

    for prediction, sample in do_prediction:
        for sent, p in zip(all_prediction_sentences, prediction):
            sent.append(p.sentence)

        # vote results
        for voter, voter_sentences in zip(voters, all_voter_sentences):
            voter_sentences.append(voter.vote_prediction_result(prediction).sentence)

    # evaluation
    text_preproc = text_processor_from_proto(predictor.predictors[0].model_params.text_preprocessor)
    evaluator = Evaluator(text_preprocessor=text_preproc)
    evaluator.preload_gt(gt_dataset=dataset, progress_bar=True)

    def single_evaluation(predicted_sentences):
        if len(predicted_sentences) != len(dataset):
            raise Exception("Mismatch in number of gt and pred files: {} != {}. Probably, the prediction did "
                            "not succeed".format(len(dataset), len(predicted_sentences)))

        pred_data_set = create_dataset(
            DataSetType.RAW,
            DataSetMode.EVAL,
            texts=predicted_sentences)

        r = evaluator.run(pred_dataset=pred_data_set, progress_bar=True, processes=args.processes)

        return r

    full_evaluation = {}
    for id, data in [(str(i), sent) for i, sent in enumerate(all_prediction_sentences)] + list(zip(args.voter, all_voter_sentences)):
        full_evaluation[id] = {"eval": single_evaluation(data), "data": data}

    if args.verbose:
        print(full_evaluation)

    if args.dump:
        import pickle
        with open(args.dump, 'wb') as f:
            pickle.dump({"full": full_evaluation, "gt_txts": gt_txts, "gt": dataset.text_samples()}, f)
Exemple #44
0
def run(args):

    # check if loading a json file
    if len(args.files) == 1 and args.files[0].endswith("json"):
        import json
        with open(args.files[0], 'r') as f:
            json_args = json.load(f)
            for key, value in json_args.items():
                setattr(args, key, value)

    # parse whitelist
    whitelist = args.whitelist
    if len(whitelist) == 1:
        whitelist = list(whitelist[0])

    whitelist_files = glob_all(args.whitelist_files)
    for f in whitelist_files:
        with open(f) as txt:
            whitelist += list(txt.read())

    if args.gt_extension is None:
        args.gt_extension = DataSetType.gt_extension(args.dataset)

    if args.validation_extension is None:
        args.validation_extension = DataSetType.gt_extension(args.validation_dataset)

    # Training dataset
    print("Resolving input files")
    input_image_files = sorted(glob_all(args.files))
    if not args.text_files:
        gt_txt_files = [split_all_ext(f)[0] + args.gt_extension for f in input_image_files]
    else:
        gt_txt_files = sorted(glob_all(args.text_files))
        input_image_files, gt_txt_files = keep_files_with_same_file_name(input_image_files, gt_txt_files)
        for img, gt in zip(input_image_files, gt_txt_files):
            if split_all_ext(os.path.basename(img))[0] != split_all_ext(os.path.basename(gt))[0]:
                raise Exception("Expected identical basenames of file: {} and {}".format(img, gt))

    if len(set(gt_txt_files)) != len(gt_txt_files):
        raise Exception("Some image are occurring more than once in the data set.")

    dataset = create_dataset(
        args.dataset,
        DataSetMode.TRAIN,
        images=input_image_files,
        texts=gt_txt_files,
        skip_invalid=not args.no_skip_invalid_gt
    )
    print("Found {} files in the dataset".format(len(dataset)))

    # Validation dataset
    if args.validation:
        print("Resolving validation files")
        validation_image_files = glob_all(args.validation)
        if not args.validation_text_files:
            val_txt_files = [split_all_ext(f)[0] + args.validation_extension for f in validation_image_files]
        else:
            val_txt_files = sorted(glob_all(args.validation_text_files))
            validation_image_files, val_txt_files = keep_files_with_same_file_name(validation_image_files, val_txt_files)
            for img, gt in zip(validation_image_files, val_txt_files):
                if split_all_ext(os.path.basename(img))[0] != split_all_ext(os.path.basename(gt))[0]:
                    raise Exception("Expected identical basenames of validation file: {} and {}".format(img, gt))

        if len(set(val_txt_files)) != len(val_txt_files):
            raise Exception("Some validation images are occurring more than once in the data set.")

        validation_dataset = create_dataset(
            args.validation_dataset,
            DataSetMode.TRAIN,
            images=validation_image_files,
            texts=val_txt_files,
            skip_invalid=not args.no_skip_invalid_gt)
        print("Found {} files in the validation dataset".format(len(validation_dataset)))
    else:
        validation_dataset = None

    params = CheckpointParams()

    params.max_iters = args.max_iters
    params.stats_size = args.stats_size
    params.batch_size = args.batch_size
    params.checkpoint_frequency = args.checkpoint_frequency if args.checkpoint_frequency >= 0 else args.early_stopping_frequency
    params.output_dir = args.output_dir
    params.output_model_prefix = args.output_model_prefix
    params.display = args.display
    params.skip_invalid_gt = not args.no_skip_invalid_gt
    params.processes = args.num_threads
    params.data_aug_retrain_on_original = not args.only_train_on_augmented

    params.early_stopping_frequency = args.early_stopping_frequency
    params.early_stopping_nbest = args.early_stopping_nbest
    params.early_stopping_best_model_prefix = args.early_stopping_best_model_prefix
    params.early_stopping_best_model_output_dir = \
        args.early_stopping_best_model_output_dir if args.early_stopping_best_model_output_dir else args.output_dir

    params.model.data_preprocessor.type = DataPreprocessorParams.DEFAULT_NORMALIZER
    params.model.data_preprocessor.line_height = args.line_height
    params.model.data_preprocessor.pad = args.pad

    # Text pre processing (reading)
    params.model.text_preprocessor.type = TextProcessorParams.MULTI_NORMALIZER
    default_text_normalizer_params(params.model.text_preprocessor.children.add(), default=args.text_normalization)
    default_text_regularizer_params(params.model.text_preprocessor.children.add(), groups=args.text_regularization)
    strip_processor_params = params.model.text_preprocessor.children.add()
    strip_processor_params.type = TextProcessorParams.STRIP_NORMALIZER

    # Text post processing (prediction)
    params.model.text_postprocessor.type = TextProcessorParams.MULTI_NORMALIZER
    default_text_normalizer_params(params.model.text_postprocessor.children.add(), default=args.text_normalization)
    default_text_regularizer_params(params.model.text_postprocessor.children.add(), groups=args.text_regularization)
    strip_processor_params = params.model.text_postprocessor.children.add()
    strip_processor_params.type = TextProcessorParams.STRIP_NORMALIZER

    if args.seed > 0:
        params.model.network.backend.random_seed = args.seed

    if args.bidi_dir:
        # change bidirectional text direction if desired
        bidi_dir_to_enum = {"rtl": TextProcessorParams.BIDI_RTL, "ltr": TextProcessorParams.BIDI_LTR,
                            "auto": TextProcessorParams.BIDI_AUTO}

        bidi_processor_params = params.model.text_preprocessor.children.add()
        bidi_processor_params.type = TextProcessorParams.BIDI_NORMALIZER
        bidi_processor_params.bidi_direction = bidi_dir_to_enum[args.bidi_dir]

        bidi_processor_params = params.model.text_postprocessor.children.add()
        bidi_processor_params.type = TextProcessorParams.BIDI_NORMALIZER
        bidi_processor_params.bidi_direction = TextProcessorParams.BIDI_AUTO

    params.model.line_height = args.line_height

    network_params_from_definition_string(args.network, params.model.network)
    params.model.network.clipping_mode = NetworkParams.ClippingMode.Value("CLIP_" + args.gradient_clipping_mode.upper())
    params.model.network.clipping_constant = args.gradient_clipping_const
    params.model.network.backend.fuzzy_ctc_library_path = args.fuzzy_ctc_library_path
    params.model.network.backend.num_inter_threads = args.num_inter_threads
    params.model.network.backend.num_intra_threads = args.num_intra_threads

    # create the actual trainer
    trainer = Trainer(params,
                      dataset,
                      validation_dataset=validation_dataset,
                      data_augmenter=SimpleDataAugmenter(),
                      n_augmentations=args.n_augmentations,
                      weights=args.weights,
                      codec_whitelist=whitelist,
                      preload_training=not args.train_data_on_the_fly,
                      preload_validation=not args.validation_data_on_the_fly,
                      )
    trainer.train(
        auto_compute_codec=not args.no_auto_compute_codec,
        progress_bar=not args.no_progress_bars
    )
Exemple #45
0
def run_for_single_line(args):
    # lines/network/pretraining as base dir
    args.base_dir = os.path.join(args.base_dir, "all" if args.n_lines < 0 else str(args.n_lines))
    pretrain_prefix = "scratch"
    if args.weights and len(args.weights) > 0:
        pretrain_prefix = ",".join([split_all_ext(os.path.basename(path))[0] for path in args.weights])

    args.base_dir = os.path.join(args.base_dir, args.network, pretrain_prefix)

    if not os.path.exists(args.base_dir):
        os.makedirs(args.base_dir)

    tmp_dir = os.path.join(args.base_dir, "tmp")
    if not os.path.exists(tmp_dir):
        os.makedirs(tmp_dir)

    best_models_dir = os.path.join(args.base_dir, "models")
    if not os.path.exists(best_models_dir):
        os.makedirs(best_models_dir)

    prediction_dir = os.path.join(args.base_dir, "predictions")
    if not os.path.exists(prediction_dir):
        os.makedirs(prediction_dir)

    # select number of files
    files = args.train_files
    if args.n_lines > 0:
        all_files = glob_all(args.train_files)
        files = random.sample(all_files, args.n_lines)

    # run the cross-fold-training
    setattr(args, "max_parallel_models", args.max_parallel_models)
    setattr(args, "best_models_dir", best_models_dir)
    setattr(args, "temporary_dir", tmp_dir)
    setattr(args, "keep_temporary_files", False)
    setattr(args, "files", files)
    setattr(args, "best_model_label", "{id}")
    if not args.skip_train:
        cross_fold_train.main(args)

    dump_file = os.path.join(tmp_dir, "prediction.pkl")

    # run the prediction
    if not args.skip_eval:
        # locate the eval script (must be in the same dir as "this")
        predict_script_path = os.path.join(this_absdir, "experiment_eval.py")

        if len(args.single_fold) > 0:
            models = [os.path.join(best_models_dir, "{}.ckpt.json".format(sf)) for sf in args.single_fold]
            for m in models:
                if not os.path.exists(m):
                    raise Exception("Expected model at '{}', but file does not exist".format(m))
        else:
            models = [os.path.join(best_models_dir, d) for d in sorted(os.listdir(best_models_dir)) if d.endswith("json")]
            if len(models) != args.n_folds:
                raise Exception("Expected {} models, one for each fold respectively, but only {} models were found".format(
                    args.n_folds, len(models)
                ))

        for line in run(prefix_run_command([
                "python3", "-u",
                predict_script_path,
                "-j", str(args.num_threads),
                "--batch_size", str(args.batch_size),
                "--dump", dump_file,
                "--eval_imgs"] + args.eval_files + [
                ] + (["--verbose"] if args.verbose else []) + [
                "--checkpoint"] + models + [
                ], args.run, {"threads": args.num_threads}), verbose=args.verbose):
            # Print the output of the thread
            if args.verbose:
                print(line)

    import pickle
    with open(dump_file, 'rb') as f:
        prediction = pickle.load(f)

    return prediction