Exemple #1
0
def render_generated_image(char_obj, select_strokes_dict, size=400):
    image = createBlankGrayscaleImageWithSize((size, size))
    offset_base = int(abs(size - 256) / 2)

    for key in select_strokes_dict.keys():

        print(key, " ", len(select_strokes_dict[key]))
        print(type(key))

        # get real position of stroke
        real_post = char_obj.strokes[int(key)].position

        cent_x0 = int(real_post[0] + real_post[2] / 2)
        cent_y0 = int(real_post[1] + real_post[3] / 2)

        # get position of similar stroke
        stroke_path = select_strokes_dict[key]
        print(stroke_path)
        stroke_img = cv2.imread(stroke_path, 0)
        stroke_rect = getSingleMaxBoundingBoxOfImage(stroke_img)
        if stroke_rect is None:
            continue

        for x in range(stroke_rect[2]):
            for y in range(stroke_rect[3]):
                if stroke_img[stroke_rect[1] + y][stroke_rect[0] + x] == 0:
                    image[cent_y0 - int(stroke_rect[3] / 2) + offset_base + y][
                        cent_x0 - int(stroke_rect[2] / 2) + offset_base + x] = \
                        stroke_img[stroke_rect[1] + y][stroke_rect[0] + x]

    return image
Exemple #2
0
def sorted_strokes_based_on_similarity(position, paths, alpha=0.8):
    x0, y0, w0, h0 = position

    # similar distance:  ssim = alpha * size_distance + (1-alpha) * location_distance
    ssim_distance_dict = {}

    for i in range(len(paths)):
        p_ = paths[i]
        if p_ == "":
            continue

        img_ = cv2.imread(p_, 0)
        x, y, w, h = getSingleMaxBoundingBoxOfImage(img_)

        # calculate the size similar distance
        size_dist = abs(w - w0) + abs(h - h0)

        # calcluate the location similar distance
        location_dist = abs(x - x0) + abs(y - y0)

        similar_dist = alpha * size_dist + (1 - alpha) * location_dist
        ssim_distance_dict[i] = similar_dist

    # sort the size and location similar distance
    ssim_distance_dict_sorted = [
        (k, ssim_distance_dict[k])
        for k in sorted(ssim_distance_dict, key=ssim_distance_dict.get)
    ]

    new_paths = []
    for item in ssim_distance_dict_sorted:
        new_paths.append(paths[item[0]])

    return new_paths
Exemple #3
0
def check_heng_pattern(img_path):
    img_ = cv2.imread(img_path, 0)
    if img_ is not None:
        x, y, w, h = getSingleMaxBoundingBoxOfImage(img_)

        black_area = np.sum((255 - np.array(img_, dtype=np.uint8)) / 255)
        total_area = w * h

        # long heng
        if w / h >= 6:
            return True
        if w / h >= 4.5 and black_area / total_area > 0.4:
            return True

        if w / h >= 3.2 and black_area / total_area > 0.4:
            return True

        if w / h >= 3.8 and black_area / total_area > 0.37:
            return True

        if w / h >= 4.9 and black_area / total_area > 0.31:
            return True
        if w / h >= 4.8 and black_area / total_area > 0.34:
            return True

    return False
Exemple #4
0
def getStrokesListFromTemplates(templates_path):
    """
        Obtain strokes from templates files.
    :param templates_path:
    :return:
    """
    stroke_windows = []
    if templates_path == "" or not os.path.exists(templates_path):
        return

    templates_files = [f for f in os.listdir(templates_path) if '.png' in f]
    print(templates_files)

    for temp_file in templates_files:
        temp_path = templates_path + "/" + temp_file
        temp_img = cv2.imread(temp_path, 0)
        # scale image
        # temp_img = cv2.resize(temp_img, (int(temp_img.shape[0]), int(temp_img.shape[1])))

        _, temp_img = cv2.threshold(temp_img, 127, 255, cv2.THRESH_BINARY)
        print(temp_img.shape)

        # obtain the stroke window
        x, y, w, h = getSingleMaxBoundingBoxOfImage(temp_img)

        stroke_window = temp_img[y:y + h, x:x + w]
        print(stroke_window.shape)
        #
        # cv2.imshow(temp_path, stroke_window)
        stroke_windows.append(stroke_window)

    return stroke_windows
Exemple #5
0
def stroke_recompose(char_info_list, char_target_strokes_list):
    generated_result = []
    generated_strokes_result = []
    generated_result_index_list = []

    for i in range(len(char_info_list)):
        ch_obj = char_info_list[i]
        ch_stroke_imgs = char_target_strokes_list[i]
        bk = createBlankGrayscaleImageWithSize((400, 400))

        # strokes template
        strokes_temp_imgs = []
        stroke_img_index = []

        # merge all stroke with center alignment
        if len(ch_stroke_imgs) == ch_obj.stroke_orders:
            print("imgs of stroke are same length")

        for j in range(len(ch_stroke_imgs)):
            if len(ch_stroke_imgs[j]) > 0:
                img_path = ch_stroke_imgs[j][0]
                stroke_img_index.append(0)
                img_ = cv2.imread(img_path, 0)
                img_ = cv2.resize(img_, (256, 256))
                rect_ = getSingleMaxBoundingBoxOfImage(img_)

                # resize stroke template image
                s_temp_img = createBlankGrayscaleImageWithSize((400, 400))
                # s_temp_img[72: 72+256, 72: 72+256] = img_

                cent_x0 = int(ch_obj.stroke_position[j][0] +
                              ch_obj.stroke_position[j][2] / 2)
                cent_y0 = int(ch_obj.stroke_position[j][1] +
                              ch_obj.stroke_position[j][3] / 2)

                # only copy the valid pixels
                for x_ in range(rect_[2]):
                    for y_ in range(rect_[3]):
                        if img_[rect_[1] + y_][rect_[0] + x_] == 0:
                            bk[cent_y0 - int(rect_[3] / 2) + 72 + y_][cent_x0 - int(rect_[2] / 2) + 72 + x_] = \
                            img_[rect_[1] + y_][rect_[0] + x_]
                            s_temp_img[cent_y0 - int(rect_[3] / 2) + 72 + y_][cent_x0 - int(rect_[2] / 2) + 72 + x_] = \
                                img_[rect_[1] + y_][rect_[0] + x_]
                strokes_temp_imgs.append(s_temp_img)
        generated_result.append(bk)
        generated_strokes_result.append(strokes_temp_imgs)
        generated_result_index_list.append(stroke_img_index)

    return generated_result, generated_strokes_result, generated_result_index_list
Exemple #6
0
def find_similar_basic_radicals_img_names_with_same_position_and_size(
        post, bs_type_path, threshold=5):
    """
    Find the most similar bs with same position and size
    :param post:
    :param bs_type_path:
    :param threshold:
    :return:
    """
    if post is None or bs_type_path == "" or not os.path.exists(bs_type_path):
        return []

    temp_bs_names = [f for f in os.listdir(bs_type_path) if ".png" in f]
    print("temp bs image num: ", len(temp_bs_names))

    similar_bs_names = []
    print("post: ", post)

    cent_x0 = int(post[0] + post[2] / 2)
    cent_y0 = int(post[1] + post[3] / 2)

    similar_image_ssim_dict = {}
    for t_name in temp_bs_names:
        t_img_path = os.path.join(bs_type_path, t_name)
        t_img = cv2.imread(t_img_path, 0)
        _, t_img = cv2.threshold(t_img, 127, 255, cv2.THRESH_BINARY)
        x, y, w, h = getSingleMaxBoundingBoxOfImage(t_img)

        cent_x = int(x + w / 2)
        cent_y = int(y + h / 2)

        loc_ssim = abs(cent_x0 - cent_x) + abs(cent_y0 - cent_y)
        size_ssim = abs(w - post[2]) + abs(h - post[3])

        total_ssim = 0.5 * loc_ssim + 0.5 * size_ssim

        if total_ssim <= threshold:
            similar_image_ssim_dict[t_name] = total_ssim

    # sorted find images based on the total ssim
    if len(similar_image_ssim_dict) > 0:
        dict_sorted_list = [(k, similar_image_ssim_dict[k]) for k in sorted(similar_image_ssim_dict,\
                            key=similar_image_ssim_dict.get)]

        for k, _ in dict_sorted_list:
            similar_bs_names.append(k)

    return similar_bs_names
Exemple #7
0
def merge_radical_up_down(strokes_path):
    if not os.path.exists(strokes_path):
        print("stroke path not exist")
        return

    # get all stroke images
    stroke_names_ = [f for f in os.listdir(strokes_path) if ".png" in f]
    print("stroke : ", stroke_names_)

    # sorted image names
    stroke_names = []
    for i in range(len(stroke_names_)):
        for sn in stroke_names_:
            if "_{}.png".format(i) in sn:
                stroke_names.append(sn)
                break
    print(stroke_names)

    # up and down list
    up_strokes_list = []
    down_strokes_list = []

    for sn in stroke_names:
        sk_img_path = os.path.join(strokes_path, sn)
        sk_img = cv2.imread(sk_img_path, 0)
        x, y, w, h = getSingleMaxBoundingBoxOfImage(sk_img)
        cent_x = x + int(w / 2)
        cent_y = y + int(h / 2)

        if cent_y > 120:
            down_strokes_list.append(sk_img_path)
        else:
            up_strokes_list.append(sk_img_path)

    # check images
    bk_up = createBlankGrayscaleImageWithSize((256, 256))
    bk_down = createBlankGrayscaleImageWithSize((256, 256))

    for img_path in up_strokes_list:
        img = cv2.imread(img_path, 0)
        bk_up = merge_two_images(bk_up, img)

    for img_path in down_strokes_list:
        img = cv2.imread(img_path, 0)
        bk_down = merge_two_images(bk_down, img)

    return (bk_up, bk_down)
Exemple #8
0
def main():
    base_path = "../templates/templates_comparison"

    temp_roots = [f for f in os.listdir(base_path) if "." not in f]

    for temp_rt in temp_roots:
        temp_path = base_path + "/" + temp_rt + "/char/" + temp_rt + ".png"
        print(temp_path)

        if not os.path.exists(temp_path):
            continue

        temp_img = cv2.imread(temp_path, 0)
        _, temp_img = cv2.threshold(temp_img, 127, 255, cv2.THRESH_BINARY)

        x, y, w, h = getSingleMaxBoundingBoxOfImage(temp_img)

        crop_img = temp_img[y:y + h, x:x + w]

        crop_path = base_path + "/" + temp_rt + "/char/" + temp_rt + "_crop.png"
        cv2.imwrite(crop_path, crop_img)

        c0_x = int(crop_img.shape[1] / 2)
        c0_y = int(crop_img.shape[0] / 2)

        new_w = max(w, h) + int(0.1 * max(w, h))
        new_h = new_w

        c1_x = int(new_w / 2)
        c1_y = int(new_h / 2)

        # offset
        offset_x = c1_x - c0_x
        offset_y = c1_y - c0_y

        new_img = np.ones((new_w, new_h)) * 255
        new_img = np.array(new_img, dtype=np.uint8)

        for y in range(crop_img.shape[0]):
            for x in range(crop_img.shape[1]):
                new_img[y + offset_y][x + offset_x] = crop_img[y][x]

        resize_path = base_path + "/" + temp_rt + "/char/" + temp_rt + "_resize.png"

        cv2.imwrite(resize_path, new_img)
Exemple #9
0
def main():
    img_path = "0001ding.jpg"
    template_path = "0001ding_stroke.jpg"

    img = cv2.imread(img_path, 0)
    temp_img = cv2.imread(template_path, 0)

    _, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)
    _, temp_img = cv2.threshold(temp_img, 127, 255, cv2.THRESH_BINARY)

    img_rgb = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)

    temp_x, temp_y, temp_w, temp_h = getSingleMaxBoundingBoxOfImage(temp_img)

    temp_img = temp_img[temp_x:temp_x+temp_w, temp_y:temp_y+temp_h]

    # All the 6 methods for comparison in a list
    methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',
               'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']

    for meth in methods:
        img_ = img.copy()
        method = eval(meth)

        res = cv2.matchTemplate(img_, temp_img, method)

        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

        # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
        if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
            top_left = min_loc
        else:
            top_left = max_loc
        bottom_right = (top_left[0] + temp_w, top_left[1] + temp_h)

        cv2.rectangle(img_rgb, top_left, bottom_right, (0,0,255), 2)

        plt.subplot(121), plt.imshow(res, cmap='gray')
        plt.title('Matching Result'), plt.xticks([]), plt.yticks([])
        plt.subplot(122), plt.imshow(img_rgb)
        plt.title('Detected Point'), plt.xticks([]), plt.yticks([])
        plt.suptitle(meth)

        plt.show()
Exemple #10
0
def check_shu_pattern(img_path):
    img_ = cv2.imread(img_path, 0)
    if img_ is not None:
        x, y, w, h = getSingleMaxBoundingBoxOfImage(img_)

        black_area = np.sum((255 - np.array(img_, dtype=np.uint8)) / 255)
        total_area = w * h

        if h / w > 7.1:
            return True
        if h / w > 6.6 and black_area / total_area > 0.4:
            return True

        if h / w > 5.45 and black_area / total_area > 0.45:
            return True
        if h / w > 4.9 and black_area / total_area > 0.51:
            return True

    return False
Exemple #11
0
def img_align_center(img):
    if img is None:
        print("img is none!")
        return

    x, y, w, h = getSingleMaxBoundingBoxOfImage(img)
    cent_x = x + int(w / 2)
    cent_y = y + int(h / 2)

    bk = createBlankGrayscaleImageWithSize(img.shape)

    offset_x = 128 - cent_x
    offset_y = 128 - cent_y

    for y in range(img.shape[0]):
        for x in range(img.shape[1]):
            if img[y][x] == 0:
                bk[y+offset_y][x+offset_x] = img[y][x]

    return bk
Exemple #12
0
def merge_radical_left_right(img):
    if img is None:
        print("img is none!")
        return

    rects = getAllMiniBoundingBoxesOfImage(img)

    rect_imgs = getConnectedComponentsOfGrayScale(img)
    print("rect img num: ", len(rect_imgs))
    print(rects)

    left_side_list = []
    right_side_list = []
    for i in range(len(rects)):
        x, y, w, h = rects[i]

        cent_x = x + int(w / 2)
        cent_y = y + int(h / 2)

        if cent_x > 127:
            right_side_list.append(rects[i])
        else:
            left_side_list.append(rects[i])

        print(cent_x, cent_y)

    print(left_side_list)
    print(right_side_list)

    # check images
    bk_left = createBlankGrayscaleImageWithSize(img.shape)
    bk_right = createBlankGrayscaleImageWithSize(img.shape)

    for rimg in rect_imgs:
        rect = getSingleMaxBoundingBoxOfImage(rimg)
        if rect in left_side_list:
            bk_left = merge_two_images(bk_left, rimg)
        elif rect in right_side_list:
            bk_right = merge_two_images(bk_right, rimg)

    return (bk_left, bk_right)
Exemple #13
0
def merge_stroke_images(bk, post, similar_stroke_path):
    if bk is None or post is None or similar_stroke_path == "":
        return

    img = cv2.imread(similar_stroke_path, 0)
    if img is None:
        print("not open image {}".format(similar_stroke_path))
        return

    stroke_bk = createBlankGrayscaleImageWithSize(bk.shape)

    offset_base = int((bk.shape[0] - img.shape[0]) / 2)

    _, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)

    rect = getSingleMaxBoundingBoxOfImage(img)
    if rect is None:
        print("not get rect of stroke image")
        return

    # merge bk and stroke image with center align
    cent_x0 = int(post[0] + post[2] / 2)
    cent_y0 = int(post[1] + post[3] / 2)

    cent_x = int(rect[0] + rect[2] / 2)
    cent_y = int(rect[1] + rect[3] / 2)

    offset_x = cent_x - cent_x0
    offset_y = cent_y - cent_y0

    new_img = createBlankGrayscaleImageWithSize(bk.shape)
    new_img[rect[1]-offset_y+offset_base: rect[1]-offset_y+offset_base+rect[3], rect[0]-offset_x+offset_base: \
                        rect[0]-offset_x+offset_base+rect[2]] = img[rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2]]

    for x in range(new_img.shape[0]):
        for y in range(new_img.shape[1]):
            if new_img[x][y] == 0:
                bk[x][y] = 0
                stroke_bk[x][y] = 0
    return bk, stroke_bk.copy()
Exemple #14
0
def find_most_similar_strokes(post, stroke_type_path):
    if post is None or stroke_type_path == "" or not os.path.exists(
            stroke_type_path):
        return []

    temp_sk_names = [f for f in os.listdir(stroke_type_path) if ".png" in f]
    print("temp sk image num:", len(temp_sk_names))

    similar_sk_names = []
    similar_sk_id = 0
    min_dist = 10000000
    for t_name in temp_sk_names:
        t_img_path = os.path.join(stroke_type_path, t_name)
        t_img = cv2.imread(t_img_path, 0)
        x, y, w, h = getSingleMaxBoundingBoxOfImage(t_img)

        dist = abs(w - post[2]) + abs(h - post[3])
        if dist < min_dist:
            min_dist = dist
            similar_sk_id = temp_sk_names.index(t_name)

    return similar_sk_names.append(temp_sk_names[similar_sk_id])
Exemple #15
0
def find_most_match_stroke(position, paths, alpha=0.8):
    """
    Find the most match stroke
    :param position:
    :param paths:
    :return:
    """
    x0, y0, w0, h0 = position

    # similar distance:  ssim = alpha * size_distance + (1-alpha) * location_distance
    ssim_distance_dict = {}

    for i in range(len(paths)):
        p_ = paths[i]
        if p_ == "":
            continue

        img_ = cv2.imread(p_, 0)
        x, y, w, h = getSingleMaxBoundingBoxOfImage(img_)

        # calculate the size similar distance
        size_dist = abs(w - w0) + abs(h - h0)

        # calcluate the location similar distance
        location_dist = abs(x - x0) + abs(y - y0)

        similar_dist = alpha * size_dist + (1 - alpha) * location_dist
        ssim_distance_dict[i] = similar_dist

    # sort the size and location similar distance
    ssim_distance_dict_sorted = [
        (k, ssim_distance_dict[k])
        for k in sorted(ssim_distance_dict, key=ssim_distance_dict.get)
    ]

    most_similar_stroke_id = ssim_distance_dict_sorted[0][0]
    print("most ssim distance: ", ssim_distance_dict_sorted[0][1])

    return paths[most_similar_stroke_id]
Exemple #16
0
def stroke_img_align_center():
    img_path = '/Users/liupeng/Documents/Data/Strokes_png'
    save_path = '/Users/liupeng/Documents/Data/stroke_classification_dataset/images'

    filenames = [f for f in os.listdir(img_path) if '.png' in f]
    print('Files count: ', len(filenames))

    for i in range(len(filenames)):
        print("procsss: ", i)
        path_ = os.path.join(img_path, filenames[i])
        img_ = cv2.imread(path_, 0)
        if img_ is None:
            continue
        x, y, w, h = getSingleMaxBoundingBoxOfImage(img_)
        blank_ = createBlankGrayscaleImage(img_)

        # blank_[]

        blank_[128 - int(h / 2):128 - int(h / 2) + h,
               128 - int(w / 2):128 - int(w / 2) + w] = img_[y:y + h, x:x + w]

        cv2.imwrite(os.path.join(save_path, filenames[i]), blank_)
Exemple #17
0
def find_similar_basic_radicals_img_names_with_same_size(
        post, bs_type_path, threshold=5):
    """
        Find the most similar bs with same size
        :param post:
        :param bs_type_path:
        :param threshold:
        :return:
        """
    if post is None or bs_type_path == "" or not os.path.exists(bs_type_path):
        return []

    temp_bs_names = [f for f in os.listdir(bs_type_path) if ".png" in f]
    print("temp bs image num: ", len(temp_bs_names))

    similar_bs_names = []

    similar_image_ssim_dict = {}
    for t_name in temp_bs_names:
        t_img_path = os.path.join(bs_type_path, t_name)
        t_img = cv2.imread(t_img_path, 0)
        _, t_img = cv2.threshold(t_img, 127, 255, cv2.THRESH_BINARY)
        x, y, w, h = getSingleMaxBoundingBoxOfImage(t_img)

        size_ssim = abs(w - post[2]) + abs(h - post[3])
        if size_ssim <= threshold:
            similar_image_ssim_dict[t_name] = size_ssim

    # sorted
    if len(similar_image_ssim_dict) > 0:
        dict_sorted_list = [(k, similar_image_ssim_dict[k]) for k in sorted(similar_image_ssim_dict, \
                                                                            key=similar_image_ssim_dict.get)]
        for k, _ in dict_sorted_list:
            similar_bs_names.append(k)

    return similar_bs_names
Exemple #18
0
def find_similar_strokes_img_names_with_same_size(post,
                                                  stroke_type_path,
                                                  threshold=5):
    if post is None or stroke_type_path == "" or not os.path.exists(
            stroke_type_path):
        return []

    temp_sk_names = [f for f in os.listdir(stroke_type_path) if ".png" in f]
    print("temp sk image num:", len(temp_sk_names))

    similar_sk_names = []

    similar_image_ssim_dict = {}
    for t_name in temp_sk_names:
        t_img_path = os.path.join(stroke_type_path, t_name)
        t_img = cv2.imread(t_img_path, 0)
        _, t_img = cv2.threshold(t_img, 127, 255, cv2.THRESH_BINARY)
        x, y, w, h = getSingleMaxBoundingBoxOfImage(t_img)

        size_ssim = abs(w - post[2]) + abs(h - post[3])
        if size_ssim <= threshold:
            similar_image_ssim_dict[t_name] = size_ssim

    # sorted
    if len(similar_image_ssim_dict) > 0:
        dict_sorted_list = [(k, similar_image_ssim_dict[k]) for k in sorted(similar_image_ssim_dict, \
                                                                key=similar_image_ssim_dict.get)]

        for k, _ in dict_sorted_list:
            similar_sk_names.append(k)

    # if not find same size stroke, return the most similar one stroke
    if len(similar_sk_names) == 0:
        similar_sk_names = find_most_similar_strokes(post, stroke_type_path)

    return similar_sk_names
Exemple #19
0
def query_similar_basic_radicals_and_strokes(basic_radicals_dataset,
                                             strokes_dataset, char_info_list):
    """
    Find similar basic radicals and strokes.

    :param basic_radicals_dataset:
    :param strokes_dataset:
    :param char_info_list:
    :return:
    """
    if basic_radicals_dataset is None or strokes_dataset is None:
        print("Basic radical dataset or stroke dataset should not be None!")
        return
    if char_info_list is None or len(char_info_list) == 0:
        print("Char info list should not be None!")
        return

    # iterative search char infor
    similar_chars = []
    for ch_id in range(len(char_info_list)):
        ch_obj = char_info_list[ch_id]

        similar_basic_radicals = {}
        similar_strokes = {}

        found_stroke_id = [
        ]  # record the found stroke id in basic radical search action.

        # search similar basic radicals.
        ch_radicals = ch_obj.basic_radicals

        if len(ch_radicals) > 0:
            # this char has basic radical
            for bs_id in range(len(ch_radicals)):
                bs_obj = ch_radicals[bs_id]
                bs_obj_post = bs_obj.position.replace("[", "").replace(
                    "]", "").replace(" ", "").split(",")
                bs_obj_post = [int(p) for p in bs_obj_post]
                bs_obj_tag = bs_obj.tag
                bs_obj_id = bs_obj.id

                similar_bss = []

                # find all library basic radical objects
                if not bs_obj_tag in basic_radicals_dataset:
                    print(bs_obj_tag, " not in 775 basic radicals")
                else:
                    basic_radicals_lib = basic_radicals_dataset[bs_obj_tag]
                    print("Basic radicals lib len: ", len(basic_radicals_lib))

                    for bsl in basic_radicals_lib:

                        # get bsl obj path
                        path_ = bsl.image_path
                        bsl_tag = path_.split("/")[-1].replace(
                            ".png", "").split("_")[0]
                        bsl_id = path_.split("/")[-1].replace(".png",
                                                              "").split("_")[2]

                        x, y, w, h = getSingleMaxBoundingBoxOfImage(
                            bsl.image_bytes)

                        sim_bs_dict = {}

                        # rule1
                        if abs(x - bs_obj_post[0]) <= 5 and abs(
                                y - bs_obj_post[1]) <= 5 and abs(
                                    w - bs_obj_post[2]) <= 5 and abs(
                                        h - bs_obj_post[3]) <= 5:
                            sim_bs_dict["path"] = bsl.image_path

                            # target bs obj strokes id
                            print("bsl tag: ", bsl_tag)
                            targ_bs_obj_list = query_char_info_from_chars_list(
                                [bsl_tag])
                            if len(targ_bs_obj_list) == 0:
                                print("template bs objs not found!")
                            else:
                                targ_bs_obj = targ_bs_obj_list[0]

                                for bs_ in targ_bs_obj.basic_radicals:
                                    if bs_.id == bsl_id:
                                        sim_bs_dict[
                                            "strokes_id"] = bs_.strokes_id

                                print("sim strokes id: ",
                                      sim_bs_dict["strokes_id"])

                            sim_bs_dict["position"] = (x, y, w, h)

                            similar_bss.append(sim_bs_dict)

                similar_basic_radicals[bs_obj_id] = similar_bss

        print(similar_basic_radicals)

        # identify the found strokes in found basic radical
        for bs_id in similar_basic_radicals.keys():
            if len(similar_basic_radicals[bs_id]) > 0:
                # this basic radical found similar basic radicals, add stroke ids of this bs to found strokes list

                for bs in ch_obj.basic_radicals:
                    if bs_id == bs.id:
                        found_stroke_id += bs.strokes_id
        found_stroke_id = [int(f) for f in found_stroke_id]
        print(found_stroke_id)

        # find similar strokes for those not found strokes
        for sk in ch_obj.strokes:
            if sk.id in found_stroke_id:
                continue
            # find similar stroke
            post_ = sk.position
            id_ = sk.id
            type_ = sk.tag

            sm_strokes = find_similar_strokes(type_, post_, strokes_dataset)

            similar_strokes[id_] = sm_strokes

        similar_chars.append((similar_basic_radicals, similar_strokes))

    return similar_chars
Exemple #20
0
def add_bs_stroke_position_to_xml():

    chars_in_svg = []
    with open(chars_txt_path, "r") as f:
        for ch in f.readlines():
            chars_in_svg.append(ch.strip())

    strokes_names = [f for f in os.listdir(strokes_png_path) if ".png" in f]
    print("stroke names: ", len(strokes_names))

    tree = ET.parse(xml_path)
    root = tree.getroot()

    count = 0
    for child in root:

        count += 1
        print(count)

        tag = child.attrib["TAG"].strip()
        if len(tag) > 1:
            continue
        print(tag)

        if tag not in chars_in_svg:
            continue

        # find all stroke names of this char
        sk_names = []
        for skn in strokes_names:
            if tag in skn:
                sk_names.append(skn)

        # find basic radicals
        basic_radicals_root_elems = child.findall("BASIC_RADICALS")
        if basic_radicals_root_elems:
            bs_elems = basic_radicals_root_elems[0].findall("BASIC_RADICAL")
            if bs_elems:
                for bs_item in bs_elems:
                    min_x1 = min_y1 = 1000000000
                    max_w = max_h = -1

                    max_x2 = max_y2 = -1

                    sk_root_elems = bs_item.findall("STROKES")
                    if sk_root_elems:
                        sk_elems = sk_root_elems[0].findall("STROKE")
                        for sk_item in sk_elems:
                            sk_id = sk_item.attrib["ID"].strip()
                            sk_nm = ""
                            for s in sk_names:
                                if "_{}.png".format(sk_id) in s:
                                    sk_nm = s
                                    break
                            # print(sk_nm)
                            sk_img_path = os.path.join(strokes_png_path, sk_nm)
                            # print(sk_img_path)
                            sk_img = cv2.imread(sk_img_path, 0)
                            rect = getSingleMaxBoundingBoxOfImage(sk_img)

                            sk_item.set("POSITION", str(rect))

                            min_x1 = min(min_x1, rect[0])
                            min_y1 = min(min_y1, rect[1])

                            max_x2 = max(max_x2, rect[0] + rect[2])
                            max_y2 = max(max_y2, rect[1] + rect[3])

                    bs_item.set(
                        "POSITION",
                        str((min_x1, min_y1, max_x2 - min_x1,
                             max_y2 - min_y1)))

    # pretty xml
    prettyXml(root, '\t', '\n')
    tree.write(save_path, encoding='utf-8')
Exemple #21
0
def find_similar_strokes(type, position, strokes_dataset):
    """
    Find similar strokes based on the type, position and strokes dataset.
    :param type:
    :param position:
    :param strokes_dataset:
    :return:
    """
    similar_strokes = []

    THRESHOLD_POSITION = 10
    THRESHOLD_SIZE = 15
    THRESHOLD_CONDITION = 1.88

    x = position[0]
    y = position[1]
    w = position[2]
    h = position[3]

    center_x = int(x + w / 2)
    center_y = int(y + h / 2)

    strokes_same_post_and_size = []
    strokes_same_size = []

    sorted_condition = {}
    for i in range(len(strokes_dataset[type])):
        stroke_obj = strokes_dataset[type][i]
        img_ = stroke_obj.image_bytes
        x0, y0, w0, h0 = getSingleMaxBoundingBoxOfImage(img_)

        center_x0 = int(x0 + w0 / 2)
        center_y0 = int(y0 + h0 / 2)

        # calcuate the sorted condition
        val = abs((w - w0) / w0 * 1.) + abs((h - h0) / h0 * 1.) + abs(
            (w * h - w0 * h0) / (w0 * h0) * 1.)
        sorted_condition[i] = val

        # Rule 1: almost same the postion and size
        if abs(center_x - center_x0) <= THRESHOLD_POSITION and abs(center_y - center_y0) <= THRESHOLD_POSITION and \
            abs(w - w0) <= THRESHOLD_SIZE and abs(h - h0) <= THRESHOLD_SIZE:
            strokes_same_post_and_size.append(stroke_obj.image_path)

    if len(strokes_same_post_and_size) > 0:
        # find the strokes with same position and size
        similar_strokes += strokes_same_post_and_size
        return similar_strokes
    else:
        print("Not find strokes with same position and size!")

    # Rule 2: Find strokes with same size
    if len(sorted_condition) == 0:
        print("Sorted condition is null!")

    sorted_condition_sorted = sorted(sorted_condition.items(),
                                     key=lambda x: x[1])
    print(sorted_condition_sorted)
    for s in sorted_condition_sorted:
        if s[1] > THRESHOLD_CONDITION:
            break
        strokes_same_size.append(strokes_dataset[type][s[0]].image_path)

    if len(strokes_same_size) > 0:
        similar_strokes += strokes_same_size
    else:
        print("Not find same size strokes")

    return similar_strokes
Exemple #22
0
img = cv2.imread(path)

img_bit = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
_, img_bit = cv2.threshold(img_bit, 100, 255, cv2.THRESH_BINARY)

# remove littine noise
boxes = getAllMiniBoundingBoxesOfImage(img_bit)
for box in boxes:
    if box[2] < 50 or box[3] < 50:
        img_bit[box[1]:box[1] + box[3], box[0]:box[0] + box[2]] = 255

# size and aspect ratio
img_rbg = cv2.cvtColor(img_bit, cv2.COLOR_GRAY2RGB)
img_layout = img_rbg.copy()

x, y, w, h = getSingleMaxBoundingBoxOfImage(img_bit)
img_rbg = cv2.rectangle(img_rbg, (x, y), (x + w, y + h), (0, 255, 0), 2)
img_cog = img_rbg.copy()

cv2.line(img_rbg, (x, y), (x + w, y + h), (0, 0, 255), 2)

# cog
cog_x, cog_y = getCenterOfGravity(img_bit)
print(cog_x, cog_y)

img_cog = cv2.circle(img_cog, (cog_x, cog_y), 2, (0, 0, 255), 10)

# skeleton
img_skel = cv2.imread("skel.png", 0)
_, img_skel = cv2.threshold(img_skel, 220, 255, cv2.THRESH_BINARY)
Exemple #23
0
def recompose_chars(
        chars_info_list,
        similar_chars,
        char_root_path="/Users/liupeng/Documents/Data/Calligraphy_database/Chars_775",
        size=400):
    """
    Recompose chars to 400 x 400 image from 256 x 256 to avoid out of size of image (256, 256)
    :param chars_info_list:
    :param similar_chars:
    :param char_root_path:
    :return:
    """
    generated_images = []

    if len(similar_chars) == 0:
        return generated_images

    for sc in similar_chars:
        similar_basic_radicals, similar_strokes = sc

        ch_id = similar_chars.index(sc)
        ch_obj = chars_info_list[ch_id]
        ch_strokes_list = ch_obj.strokes

        print("process: ", ch_obj.tag)

        # get basic radicals info and his strokes images
        similar_bs_dict = {}
        for bs_id in similar_basic_radicals.keys():

            bs_obj = []
            for bs_ in similar_basic_radicals[bs_id]:

                bs_obj_dict = {}

                path_ = bs_["path"]
                strokes_id_ = bs_["strokes_id"]
                postion_ = bs_["position"]

                print("path: ", path_)
                print("strokes id: ", strokes_id_)

                char_tag = path_.split('/')[-1].replace(".png",
                                                        "").split("_")[0]
                char_path_ = os.path.join(char_root_path, char_tag, "strokes")
                stroke_img_names = [
                    f for f in os.listdir(char_path_) if ".png" in f
                ]

                stroke_img_dict = {}
                for s_id in strokes_id_:
                    for nm in stroke_img_names:
                        if "_" + str(s_id) + "." in nm:
                            stroke_img_dict[s_id] = os.path.join(
                                char_path_, nm)
                            break

                bs_obj_dict["path"] = path_
                bs_obj_dict["strokes"] = stroke_img_dict
                bs_obj_dict["position"] = postion_
                bs_obj.append(bs_obj_dict)

            if bs_obj != []:
                similar_bs_dict[bs_id] = bs_obj

        print(similar_bs_dict)

        # recompose basic radicals and strokes
        bk = createBlankGrayscaleImageWithSize((size, size))
        offset_base = int(abs(size - 256) / 2)

        # load basic radicals stroke images and center alignment
        for bs_id in similar_bs_dict.keys():
            for bs_obj in similar_bs_dict[bs_id]:

                bk_bs = createBlankGrayscaleImageWithSize(
                    (size, size)
                )  # merge strokes of this basic radical together to get single connected component

                stroke_objs = bs_obj["strokes"]
                post_ = bs_obj["position"]

                cent_x0 = int(post_[0] + post_[2] / 2)
                cent_y0 = int(post_[1] + post_[3] / 2)

                for s_id in stroke_objs.keys():
                    path_ = stroke_objs[s_id]

                    img_ = cv2.imread(path_, 0)

                    for x in range(img_.shape[0]):
                        for y in range(img_.shape[1]):
                            if img_[x][y] == 0:
                                bk_bs[x + offset_base][y + offset_base] = 0

                x, y, w, h = getSingleMaxBoundingBoxOfImage(bk_bs)

                cent_x = int(x + w / 2)
                cent_y = int(y + h / 2)

                offset_x = cent_x0 - cent_x + offset_base
                offset_y = cent_y0 - cent_y + offset_base

                for x in range(bk_bs.shape[0]):
                    for y in range(bk_bs.shape[1]):
                        if bk_bs[x][y] == 0:
                            if x+offset_x < 0 or x+offset_x >= 400 or y+offset_y < 0 and y+offset_y >= 400 or \
                                    x < 0 or x >= 400 or y < 0 or y >= 400:
                                continue
                            bk[x + offset_x][y + offset_y] = bk_bs[x][y]

                break

        # load stroke images
        for s_id in similar_strokes.keys():

            # get template stroke position
            print(s_id)

            real_post = None
            for stk_obj in ch_strokes_list:
                if s_id == stk_obj.id:
                    real_post = stk_obj.position
                    break

            if real_post == None:
                print("Not find jianti_temp position!")
                continue

            cent_x0 = int(real_post[0] + real_post[2] / 2)
            cent_y0 = int(real_post[1] + real_post[3] / 2)

            # path_ = similar_strokes[s_id][0]   # use the most match stroke
            path_ = find_most_match_stroke(real_post, similar_strokes[s_id])

            img_ = cv2.imread(path_, 0)
            rect_ = getSingleMaxBoundingBoxOfImage(img_)

            for x_ in range(rect_[2]):
                for y_ in range(rect_[3]):
                    if img_[rect_[1] + y_][rect_[0] + x_] == 0:
                        bk[cent_y0 - int(rect_[3] / 2) + 72 + y_][cent_x0 - int(rect_[2] / 2) + 72 + x_] = \
                            img_[rect_[1] + y_][rect_[0] + x_]

        generated_images.append(bk)
    return generated_images
Exemple #24
0
def main():
    temp_path = "/Users/liupeng/Documents/PythonProjects/templates/templates/ben/char/ben.png"
    targ_path = "/Users/liupeng/Documents/PythonProjects/templates/templates_comparison/ben/char/ben.png"

    temp_img = cv2.imread(temp_path, 0)
    targ_img = cv2.imread(targ_path, 0)

    _, temp_img = cv2.threshold(temp_img, 127, 255, cv2.THRESH_BINARY)
    _, targ_img = cv2.threshold(targ_img, 127, 255, cv2.THRESH_BINARY)

    # resize two images of template and target.
    temp_img, targ_img = resizeImages(temp_img, targ_img)

    temp_img = np.array(temp_img, dtype=np.uint8)
    targ_img = np.array(targ_img, dtype=np.uint8)

    # bounding box of template and target images
    temp_x, temp_y, temp_w, temp_h = getSingleMaxBoundingBoxOfImage(temp_img)
    targ_x, targ_y, targ_w, targ_h = getSingleMaxBoundingBoxOfImage(targ_img)

    temp_ct_x = temp_x + int(temp_w / 2.)
    temp_ct_y = temp_y + int(temp_h / 2.)

    targ_ct_x = targ_x + int(targ_w / 2.)
    targ_ct_y = targ_y + int(targ_h / 2.)

    # new square width
    square_width = max(temp_w, temp_h, targ_w, targ_h)

    # using new square to crop all effective area in template and target images
    # template image
    if temp_ct_x - int(square_width / 2.) <= 0:
        temp_x = 0
    else:
        temp_x = temp_ct_x - int(square_width / 2.)

    if temp_ct_y - int(square_width / 2, ) <= 0:
        temp_y = 0
    else:
        temp_y = temp_ct_y - int(square_width / 2, )

    if temp_ct_x + int(square_width / 2.) >= temp_img.shape[1]:
        temp_w = temp_img.shape[1] - temp_x
    else:
        temp_w = square_width

    if temp_ct_y + int(square_width / 2.) >= temp_img.shape[0]:
        temp_h = temp_img.shape[0] - temp_y
    else:
        temp_h = square_width
    # target image
    if targ_ct_x - int(square_width / 2.) <= 0:
        targ_x = 0
    else:
        targ_x = targ_ct_x - int(square_width / 2.)

    if targ_ct_x - int(square_width / 2, ) <= 0:
        targ_y = 0
    else:
        targ_y = targ_ct_x - int(square_width / 2, )

    if targ_ct_x + int(square_width / 2.) >= targ_img.shape[1]:
        targ_w = targ_img.shape[1] - targ_x
    else:
        targ_w = square_width

    if targ_ct_x + int(square_width / 2.) >= targ_img.shape[0]:
        targ_h = targ_img.shape[0] - targ_y
    else:
        targ_h = square_width

    # crop effective areas of the template and target images
    temp_reg = temp_img[temp_y:temp_y + temp_h, temp_x:temp_x + temp_w]
    targ_reg = targ_img[targ_y:targ_y + targ_h, targ_x:targ_x + targ_w]

    shape_similarity = calculateShapeSimilarity(temp_reg, targ_reg)
    simi = calculateShapeSimilarity(temp_reg, temp_reg)

    print("Shape similarity: %f" % shape_similarity)
    print("Same image similarity: %f" % simi)

    cv2.imshow("jianti_temp", temp_reg)
    cv2.imshow("targ", targ_reg)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
Exemple #25
0
def add_basic_radical_info_to_xml(root, path):
    if path == '':
        return
    # list all folders
    filenames = [f for f in os.listdir(path) if '.' not in f]
    print('file names len: ', len(filenames))

    for i in range(len(root)):
        element = root[i]
        tag = element.attrib["TAG"].strip()
        if len(tag) > 1:
            continue
        if not tag in filenames:
            continue

        if element.findall("BASIC_RADICALS"):
            continue

        # add basic radicals element
        basic_radicals_elem = ET.Element('BASIC_RADICALS')
        br_names = [
            f for f in os.listdir(os.path.join(path, tag, 'basic radicals'))
            if '.png' in f
        ]

        for bn in br_names:

            b_radical_elem = ET.Element('BASIC_RADICAL')

            bn_folder = bn.replace('.png', '')
            stroke_names = [
                f for f in os.listdir(
                    os.path.join(path, tag, 'basic radicals', bn_folder))
                if '.png' in f
            ]
            stroke_names = sorted(stroke_names)

            # id
            id = bn_folder.split('_')[-2]
            b_tag = bn_folder.split('_')[-1]
            # position
            r_img = cv2.imread(os.path.join(path, tag, 'basic radicals', bn),
                               0)
            r_post = getSingleMaxBoundingBoxOfImage(r_img)
            r_post = str([r_post[0], r_post[1], r_post[2], r_post[3]])

            b_radical_elem.set('ID', id)
            b_radical_elem.set('TAG', b_tag)
            b_radical_elem.set('POSITION', r_post)

            # strokes elemets
            strokes_elemt = ET.Element('STROKES')
            for sn in stroke_names:
                stroke_elemt = ET.Element('STROKE')

                s_id = sn.replace('.png', '').split('_')[-1]

                stroke_elemt.set('TAG', s_id)

                s_img = cv2.imread(
                    os.path.join(path, tag, 'basic radicals', bn_folder, sn),
                    0)
                s_post = getSingleMaxBoundingBoxOfImage(s_img)
                stroke_elemt.text = str(
                    [s_post[0], s_post[1], s_post[2], s_post[3]])

                strokes_elemt.append(stroke_elemt)

            b_radical_elem.append(strokes_elemt)

            basic_radicals_elem.append(b_radical_elem)
        element.append(basic_radicals_elem)

    return root
Exemple #26
0
# load template strokes
temp_strokes = []
for i in range(1, 10):
    temp_img_path = os.path.join(temp_path, ("stroke_%d.png" % i))
    temp_img = cv2.imread(temp_img_path, 0)
    temp_strokes.append(temp_img)
print("temp stroke num: ", len(temp_strokes))

# template char image
temp_char_img = cv2.imread(temp_char_path, 0)

# load gong copy image
src_img = cv2.imread(path, 0)

# resize src image and template image
temp_minx, temp_miny, temp_minw, temp_minh = getSingleMaxBoundingBoxOfImage(
    temp_char_img)
src_minx, src_miny, src_minw, src_minh = getSingleMaxBoundingBoxOfImage(
    src_img)

temp_box_width = max(temp_minw, temp_minh)
src_box_width = max(src_minw, src_minh)

print("tempbox w:", temp_box_width, "src box w:", src_box_width)

temp_img_box = temp_char_img[temp_miny:temp_miny + temp_minh,
                             temp_minx:temp_minx + temp_minw]
src_img_box = src_img[src_miny:src_miny + src_minh,
                      src_minx:src_minx + src_minw]
print(temp_img_box.shape)
print(src_img_box.shape)
Exemple #27
0
    # main()
    src_path = "../templates/ben.png"
    temp_base_path = "../templates/stroke_"

    src_img = cv2.imread(src_path, 0)
    # temp_img = cv2.imread(temp_path, 0)
    # w, h = temp_img.shape

    _, src_img = cv2.threshold(src_img, 127, 255, cv2.THRESH_BINARY)
    # _, temp_img = cv2.threshold(temp_img, 127, 255, cv2.THRESH_BINARY)

    cv2.imshow("src", src_img)
    # cv2.imshow("jianti_temp", temp_img)

    # bounding box
    src_x, src_y, src_w, src_h = getSingleMaxBoundingBoxOfImage(src_img)
    # temp_x, temp_y, temp_w, temp_h = calculateBoundingBox(temp_img)

    src_window = src_img[src_y:src_y + src_h, src_x:src_x + src_w]
    # temp_window = temp_img[temp_y:temp_y+temp_h, temp_x:temp_x+temp_w]

    # temp_path = temp_base_path + "2_2.png"
    # temp_img = cv2.imread(temp_path, 0)
    # _, temp_img = cv2.threshold(temp_img, 127, 255, cv2.THRESH_BINARY)
    #
    # temp_x, temp_y, temp_w, temp_h = calculateBoundingBox(temp_img)
    # temp_window = temp_img[temp_y:temp_y + temp_h, temp_x:temp_x + temp_w]
    #
    # src_part = src_window[185:185+temp_h, 306:306+temp_w]
    #
    # src_part1 = src_window[455:455+temp_h, 165:165+temp_w]
Exemple #28
0
def stroke_recompose(input):
    # remove invalid characters in input
    input = input.replace(' ', '').replace('\n', '').replace('\t', '')

    # reterival xml to find character info
    xml_path = "../../../Data/Characters/radical_add_stroke_position_similar_structure_add_stroke_order.xml"
    # load radical data
    tree = ET.parse(xml_path)
    if tree is None:
        print("tree is none!")
        return

    root = tree.getroot()
    print("root len:", len(root))

    char_info_list = []
    for ins in input:
        tag = ""
        u_code = ""
        stroke_orders = []
        stroke_position = []

        for child in root:
            ch = child.attrib["TAG"]
            if ch == ins:
                tag = ch
                u_code = child.attrib["ID"]

                # stroke order
                stroke_order_elems = child.findall('STROKE_ORDER')
                if stroke_order_elems:
                    s_order = stroke_order_elems[0].text
                    stroke_orders = s_order.split("|")
                else:
                    print("not find stroke order of ", tag)

                # stroke position
                s_post_elems = child.findall('STROKES_POSITION')
                if s_post_elems:
                    ss_post_elems = s_post_elems[0].findall('STROKE_POSITION')
                    if ss_post_elems:
                        for elem in ss_post_elems:
                            stroke_position.append(ast.literal_eval(elem.text))
                else:
                    print("Not find storke position of ", tag)

                break

        if tag == "" and u_code == "":
            print("Not find this char: ", ins)
            continue
        if len(stroke_orders) != len(stroke_position):
            print(tag, "Stroke order and position are not same length!")
            continue

        # create ChineseCharacter object
        cc_obj = ChineseCharacter(tag, u_code, stroke_orders, stroke_position)
        char_info_list.append(cc_obj)

    # search for the template stroke from the library
    char_target_strokes_list = []
    for cc in char_info_list:
        print(cc.tag, cc.u_code, cc.stroke_orders, cc.stroke_position)
        target_strokes = []
        for i in range(len(cc.stroke_orders)):
            targ_strokes_ = query_taget_strokes(cc.stroke_orders[i],
                                                cc.stroke_position[i])
            target_strokes.append(targ_strokes_)
            print('target stroken num: ', len(targ_strokes_))
            if len(targ_strokes_) == 0:
                print(cc.tag, "stroke ", i, "not fond target strokes")

        char_target_strokes_list.append(target_strokes)
    print(char_target_strokes_list)

    # recompose strokes
    if len(char_target_strokes_list) == len(char_info_list):
        print("img and chars are same length")
    else:
        print("img and chars are not same length")

    save_path = "../../../Data/Stroke_recomposed_tool/generated_results"
    for i in range(len(char_info_list)):
        ch_obj = char_info_list[i]
        ch_stroke_imgs = char_target_strokes_list[i]
        bk = createBlankGrayscaleImageWithSize((400, 400))

        # merge all stroke with center alignment
        if len(ch_stroke_imgs) == ch_obj.stroke_orders:
            print("imgs of stroke are same length")

        for j in range(len(ch_stroke_imgs)):
            if len(ch_stroke_imgs[j]) > 0:
                img_path = ch_stroke_imgs[j][0]
                img_ = cv2.imread(img_path, 0)
                img_ = cv2.resize(img_, (256, 256))
                rect_ = getSingleMaxBoundingBoxOfImage(img_)

                cent_x0 = int(ch_obj.stroke_position[j][0] +
                              ch_obj.stroke_position[j][2] / 2)
                cent_y0 = int(ch_obj.stroke_position[j][1] +
                              ch_obj.stroke_position[j][3] / 2)

                # only copy the valid pixels
                for x_ in range(rect_[2]):
                    for y_ in range(rect_[3]):
                        if img_[rect_[1] + y_][rect_[0] + x_] == 0:
                            bk[cent_y0 - int(rect_[3] / 2) + 72 +
                               y_][cent_x0 - int(rect_[2] / 2) + 72 +
                                   x_] = img_[rect_[1] + y_][rect_[0] + x_]

        cv2.imwrite(
            os.path.join(save_path, ch_obj.tag + "_" + ch_obj.u_code + ".png"),
            bk)
Exemple #29
0
def query_target_strokes_by_postion_size(position, stroke_obj_list):
    if position is None or stroke_obj_list is None or len(
            stroke_obj_list) == 0:
        print("Position or stroke object list should not be None!")
        return

    target_strokes_path = []

    # find target strokes with almost same position and size
    center_x0 = int(position[0] + position[2] / 2)
    center_y0 = int(position[1] + position[3] / 2)

    w0 = position[2]
    h0 = position[3]

    strokes_same_post_and_size = []  # almost same position and size
    strokes_same_size = []  # almost same only size

    THRESHOLD_POSITION = 10
    THRESHOLD_SIZE = 15
    THRESHOLD_CONDITION = 1.88

    sorted_condition = {}
    for i in range(len(stroke_obj_list)):
        stroke_obj = stroke_obj_list[i]
        img_ = stroke_obj.image_bytes

        if img_ is None:
            print("stroke obj is None")
            continue

        rect_ = getSingleMaxBoundingBoxOfImage(img_)

        center_x = int(rect_[0] + rect_[2] / 2)
        center_y = int(rect_[1] + rect_[3] / 2)

        w = rect_[2]
        h = rect_[3]

        # calcuate the sorted condition
        val = abs((w - w0) / w0 * 1.) + abs((h - h0) / h0 * 1.) + abs(
            (w * h - w0 * h0) / (w0 * h0) * 1.)
        sorted_condition[i] = val

        # Rule 1: almost same the position and size
        if abs(center_x - center_x0) <= THRESHOLD_POSITION and abs(center_y - center_y0) <= THRESHOLD_POSITION and \
                abs(w - w0) <= THRESHOLD_SIZE and abs(h - h0) <= THRESHOLD_SIZE:
            strokes_same_post_and_size.append(stroke_obj.image_path)
            continue

    if len(strokes_same_post_and_size) > 0:
        target_strokes_path += strokes_same_post_and_size
        return target_strokes_path
    else:
        print("Not find same postion and size strokes")

    # Rule2: Almost same the position and size
    if len(sorted_condition) == 0:
        print("Sorted condition is null!")

    sorted_condition_sorted = sorted(sorted_condition.items(),
                                     key=lambda x: x[1])

    for s in sorted_condition_sorted:
        if s[1] > THRESHOLD_CONDITION:
            break
        strokes_same_size.append(stroke_obj_list[s[0]].image_path)

    # return target strokes
    if len(strokes_same_size) > 0:
        target_strokes_path += strokes_same_size
        # return target_strokes_path
    else:
        print("Not find same size strokes")
    return target_strokes_path
Exemple #30
0
def query_taget_strokes(
        type,
        position,
        library_path="../../../Data/Stroke_recomposed_tool/strokes dataset"):
    """
    Query target strokes from library based on the stroke type and position(x, y, w, h).
    :param type:
    :param position:
    :param library_path:
    :return:
    """
    if type == "":
        print("type should not be None!")
        return

    s_time = timeit.default_timer()
    type_path = os.path.join(library_path, type)
    file_names = [f for f in os.listdir(type_path) if ".png" in f]
    print("lib file num: ", len(file_names))
    print('List stroke image file name time: ',
          timeit.default_timer() - s_time)

    # search target strokes with position info (x, y, w, h) and return the target image paths
    target_strokes_path = []
    s_time = timeit.default_timer()
    # stroke images
    all_stroke_imgs = []
    for fn in file_names:
        img_path = os.path.join(type_path, fn)
        img = cv2.imread(img_path, 0)
        img = cv2.resize(img, (256, 256))
        all_stroke_imgs.append(img)
    print('all stroke images len: ', len(all_stroke_imgs))
    print('list all stroke image of one target-type time: ',
          timeit.default_timer() - s_time)

    # find target strokes with almost same position and size
    center_x0 = int(position[0] + position[2] / 2)
    center_y0 = int(position[1] + position[3] / 2)

    s_time = timeit.default_timer()

    w0 = position[2]
    h0 = position[3]

    strokes_same_post_and_size = []  # almost same position and size
    strokes_same_size = []  # almost same only size

    THRESHOLD_POSITION = 10
    THRESHOLD_SIZE = 10

    for i in range(len(all_stroke_imgs)):
        img_ = all_stroke_imgs[i]
        rect_ = getSingleMaxBoundingBoxOfImage(img_)
        center_x = int(rect_[0] + rect_[2] / 2)
        center_y = int(rect_[1] + rect_[3] / 2)

        w = rect_[2]
        h = rect_[3]

        # Rule 1: almost same the position and size
        if abs(center_x - center_x0) <= THRESHOLD_POSITION and abs(center_y - center_y0) <= THRESHOLD_POSITION and \
            abs(w - w0) <= THRESHOLD_SIZE and abs(h - h0) <= THRESHOLD_SIZE:
            strokes_same_post_and_size.append(
                os.path.join(type_path, file_names[i]))
            continue

        # Rule 2: almost same the size
        if abs(w - w0) <= THRESHOLD_SIZE and abs(h - h0) <= THRESHOLD_SIZE:
            strokes_same_size.append(os.path.join(type_path, file_names[i]))
            continue

    print('find target strokes time: ', timeit.default_timer() - s_time)

    # return target strokes
    if len(strokes_same_post_and_size) > 0:
        target_strokes_path += strokes_same_post_and_size
        return target_strokes_path
    else:
        print("Not find same postion and size strokes")

    if len(strokes_same_size) > 0:
        target_strokes_path += strokes_same_size
        return target_strokes_path
    else:
        print("Not find same size strokes")

    print('Not find target stroke')
    return target_strokes_path