Ejemplo n.º 1
0
def _load_data():
    """load an image and its associated lines file"""

    input_filename = "data/20170929_1.png.sample.pkl.1"
    image_sample = util.load(input_filename)

    image = image_sample.data
    lines = [x.data for x in image_sample.result]

    return image, lines
Ejemplo n.º 2
0
def vis_char(filename, char_to_vis):
    """find and visualize images of a specific character
    and the words that contain them"""

    image_sample = util.load(filename)

    for line_idx, line_pos in enumerate(image_sample.result):
        for word_pos in line_pos.result.result:
            for char_pos in word_pos.result.result:
                if char_pos.result.result == char_to_vis:
                    print(filename, line_idx + 1)
                    cv2.imshow("char", char_pos.result.data)
                    cv2.imshow("word", word_pos.result.data)
                    cv2.waitKey(0)
Ejemplo n.º 3
0
def read_text(filename):
    """get the text in a file of predictions / annotations"""

    image_sample = util.load(filename)

    def join_words(words):
        """helper"""
        return " ".join(["".join(x) for x in words])

    line_results = [join_words(
                        [[char_pos.result.result
                          for char_pos in word_pos.result.result]
                         for word_pos in line_pos.result.result])
                    for line_pos in image_sample.result]
    line_results = [x.translate({ord(c): None for c in CHARS_REMOVE})
                    for x in line_results]

    return "\n".join(line_results)
Ejemplo n.º 4
0
def _load_samples(filenames):
    """load verified character label samples from multiple files"""

    # load multiple files
    line_poss = [y for x in filenames for y in util.load(x).result]

    # get word images and verified character positions
    word_ims = [
        word_pos.result for line_pos in line_poss
        for word_pos in line_pos.result.result if word_pos.result.verified
    ]

    # only keep if word contains other letters besides "`"
    # and all of the character labels have been verified
    char_poss = [
        char_pos for word_im in word_ims for char_pos in word_im.result
        if char_pos.result.verified
    ]

    return [x.result.data
            for x in char_poss], [x.result.result for x in char_poss]
Ejemplo n.º 5
0
def _load_words(filenames):
    """load verified word image samples from multiple files"""

    images = []
    positions = []

    for filename in filenames:
        line_poss = util.load(filename).result

        # get word images and verified character positions
        word_ims = [
            word_pos.result for line_pos in line_poss
            for word_pos in line_pos.result.result if word_pos.result.verified
        ]

        # only keep if word contains other letters besides "`"
        # and all of the character labels have been verified
        word_ims = [
            word_im for word_im in word_ims if np.sum(
                [char_pos.result.result != "`"
                 for char_pos in word_im.result]) > 0 and np.sum(
                     [char_pos.result.verified
                      for char_pos in word_im.result]) == len(word_im.result)
        ]
        # print the words
        for word_im in word_ims:
            print("".join(
                [char_pos.result.result for char_pos in word_im.result]),
                  end=" ")
        print()

        # save memory by ditching extracted character images etc
        for word_im in word_ims:
            images.append(word_im.data)
            positions.append([char_pos.data for char_pos in word_im.result])

    return images, positions
Ejemplo n.º 6
0
def _load_samples(filenames, half_width, offset):
    """load word image slices from multiple files"""

    images = []
    combined_labels = []

    for filename in filenames:
        line_poss = util.load(filename).result

        # get word images and verified character positions
        word_ims = [
            word_pos.result for line_pos in line_poss
            for word_pos in line_pos.result.result if word_pos.result.verified
        ]

        # helper functions for extracting images
        # extract_char = lambda cpos, im: im[:, np.maximum(cpos[0], 0):cpos[1]]

        def extract_char_half_width(x, im):
            return improc.extract_pos(
                (x + offset - half_width, x + offset + half_width), im)

        area_true = 1  # 2

        for word_im in word_ims:

            for char_pos in word_im.result:
                if ((char_pos.result.result not in IGNORE_CHARS)
                        and char_pos.result.verified
                        and (char_pos.data[1] - char_pos.data[0]) > 1):

                    char_im = char_pos.result.data

                    # print(char_pos.result.result, end="")

                    for x_pos in range(0, char_im.shape[1]):

                        extract_im = extract_char_half_width(
                            char_pos.data[0] + x_pos, word_im.data)

                        # choose gap samples from start and end of each
                        # character position
                        label = ((x_pos < area_true)
                                 or (x_pos > char_im.shape[1] - area_true - 1))

                        # choose gap samples only from start
                        # label = True if x_pos < area_true else False

                        # choose gap samples only from end
                        # label = x_pos > (char_im.shape[1] - area_true)

                        images.append(extract_im)
                        combined_labels.append((label, char_pos.result.result))

                        # cv2.namedWindow("word", cv2.WINDOW_NORMAL)
                        # cv2.namedWindow("extract", cv2.WINDOW_NORMAL)
                        # disp_word_im = np.copy(word_im.data)
                        # disp_word_im[:, char_pos.data[0] + x] = (0, 0, 255)
                        # print(char_pos.data[0] + x, label, char_pos.result.result)
                        # cv2.imshow("word", disp_word_im)
                        # cv2.imshow("extract", extract_im)
                        # cv2.waitKey(200)

    return images, combined_labels
Ejemplo n.º 7
0
def _load_samples_old(filenames, half_width, offset):
    """old method of loading word image slices"""

    images = []
    combined_labels = []

    for filename in filenames:
        line_poss = util.load(filename).result

        # get word images and verified character positions
        word_ims = [
            word_pos.result for line_pos in line_poss
            for word_pos in line_pos.result.result if word_pos.result.verified
        ]

        # helper functions for extracting images
        # extract_char = lambda cpos, im: im[:, np.maximum(cpos[0], 0):cpos[1]]

        def extract_char_half_width(x, im):
            return improc.extract_pos(
                (x + offset - half_width, x + offset + half_width), im)

        def half(start, end):
            """point halfway between two points"""
            return int((start + end) * 0.5)

        def extract(extract_func):
            """extract images from valid char positions using extract_func"""
            res = [
                (extract_func(char_pos.data, word_im.data),
                 char_pos.result.result, char_pos.data, word_im.data)
                for word_im in word_ims for char_pos in word_im.result
                if (char_pos.result.result not in IGNORE_CHARS) and char_pos.
                result.verified and (char_pos.data[1] - char_pos.data[0]) > 1
            ]
            return res

        # extract images from the ends of all positions in each word
        char_end_ims = (
            extract(lambda x, y: extract_char_half_width(x[1], y)) +
            extract(lambda x, y: extract_char_half_width(x[1] + 1, y)) +
            extract(lambda x, y: extract_char_half_width(x[1] - 1, y)))

        # extract images from half, one fourth, and three fourths of the way
        # between starts and ends of each position
        char_middle_ims = (
            extract(lambda x, y: extract_char_half_width(half(x[0], x[1]), y))
            + extract(lambda x, y: extract_char_half_width(
                half(x[0], half(x[0], x[1])), y)) +
            extract(lambda x, y: extract_char_half_width(
                half(half(x[0], x[1]), x[1]), y)))

        # filter out images that are too small
        char_end_ims = [
            x for x in char_end_ims if x[0].shape[1] > 1.5 * half_width
        ]
        char_middle_ims = [
            x for x in char_middle_ims if x[0].shape[1] > 1.5 * half_width
        ]

        data_with_src = char_end_ims + char_middle_ims
        labels = [True] * len(char_end_ims) + [False] * len(char_middle_ims)

        combined_labels.append([(x, y[1])
                                for x, y in zip(labels, data_with_src)])
        images.append([x[0] for x in data_with_src])

    return images, combined_labels
Ejemplo n.º 8
0
def main(argv):
    """main program"""

    if len(argv) < 3:
        print("Usage: verify input_file <line | multi | view>")
        sys.exit()

    input_filename = argv[1]
    verify_type = argv[2]

    new_char_annotation_mode = False

    # filename has a number version suffix
    sample_filename = input_filename + ".sample.pkl"
    sample_dirname, sample_basename = os.path.split(sample_filename)

    possible_files = [
        x for x in os.listdir(sample_dirname) if x.startswith(sample_basename)
    ]
    versions = [int_safe(x.split(".")[-1]) for x in possible_files]
    latest_idx = np.argmax(versions)

    latest_version = versions[latest_idx]
    latest_filename = possible_files[latest_idx]

    sample_filename_full = os.path.join(sample_dirname, latest_filename)
    print("loading sample file:", sample_filename_full)
    image_sample = util.load(sample_filename_full)
    # with open(sample_filename_full, "rb") as sample_file:
    #     image_sample = pickle.load(sample_file)

    status = _verification_status_recursive(image_sample)
    print(status[0], "/", status[1], "samples verified", "-",
          np.round(status[0] / status[1] * 100, 2), "%")

    (process_image, process_line_position, process_word_position,
     process_char_position) = driver.current_best_process()

    if verify_type == "line":
        _mutate_verify_line_poss(image_sample, process_line_position)
    elif verify_type == "view":
        for line_pos in image_sample.result:
            img = analysisimage.LineAnalysisImage(line_pos.result).image
            cv2.namedWindow("line analysis", cv2.WINDOW_NORMAL)
            cv2.imshow("line analysis", img)
            cv2.waitKey()
    else:
        if len(argv) > 3:
            start_idx = int(argv[3]) - 1
        else:
            start_idx = 0
        for idx in range(start_idx, len(image_sample.result)):
            line_pos = image_sample.result[idx]
            print("editing line " + str(idx + 1) + " / " +
                  str(len(image_sample.result)))
            _mutate_verify_multi(line_pos.result, process_word_position,
                                 process_char_position,
                                 new_char_annotation_mode)

    if verify_type != "view":
        status = _verification_status_recursive(image_sample)
        print(status[0], "/", status[1], "samples verified", "-",
              np.round(status[0] / status[1] * 100, 2), "%")

        sample_filename_full = sample_filename + "." + str(latest_version + 1)
        print("writing sample file:", sample_filename_full)
        util.save(image_sample, sample_filename_full)