Ejemplo n.º 1
0
def merge_all_generation_images():
    img_path = "../../../Data/1000 generated results"

    filenames = [f for f in os.listdir(img_path) if ".png" in f]
    print("image num: ", len(filenames))

    bk = createBlankGrayscaleImageWithSize((4000, 4000))

    for i in range(len(filenames[:1000])):
        page_id = math.floor(i / 100)
        print(page_id)

        id = i - page_id * 100
        img_ = cv2.imread(os.path.join(img_path, filenames[i]), 0)

        row = math.floor(id / 10)
        col = id % 10
        bk[row * 400:(row + 1) * 400, col * 400:(col + 1) * 400] = img_

        if (i + 1) % 100 == 0:
            cv2.imwrite(
                "../../../Data/1000 merged result/all_merged_%d.png" % page_id,
                bk)

            bk = createBlankGrayscaleImageWithSize((4000, 4000))

        else:

            continue
Ejemplo n.º 2
0
def stroke_recompose(char_info_list, char_target_strokes_list):
    generated_result = []
    generated_strokes_result = []
    generated_result_index_list = []

    for i in range(len(char_info_list)):
        ch_obj = char_info_list[i]
        ch_stroke_imgs = char_target_strokes_list[i]
        bk = createBlankGrayscaleImageWithSize((400, 400))

        # strokes template
        strokes_temp_imgs = []
        stroke_img_index = []

        # merge all stroke with center alignment
        if len(ch_stroke_imgs) == ch_obj.stroke_orders:
            print("imgs of stroke are same length")

        for j in range(len(ch_stroke_imgs)):
            if len(ch_stroke_imgs[j]) > 0:
                img_path = ch_stroke_imgs[j][0]
                stroke_img_index.append(0)
                img_ = cv2.imread(img_path, 0)
                img_ = cv2.resize(img_, (256, 256))
                rect_ = getSingleMaxBoundingBoxOfImage(img_)

                # resize stroke template image
                s_temp_img = createBlankGrayscaleImageWithSize((400, 400))
                # s_temp_img[72: 72+256, 72: 72+256] = img_

                cent_x0 = int(ch_obj.stroke_position[j][0] +
                              ch_obj.stroke_position[j][2] / 2)
                cent_y0 = int(ch_obj.stroke_position[j][1] +
                              ch_obj.stroke_position[j][3] / 2)

                # only copy the valid pixels
                for x_ in range(rect_[2]):
                    for y_ in range(rect_[3]):
                        if img_[rect_[1] + y_][rect_[0] + x_] == 0:
                            bk[cent_y0 - int(rect_[3] / 2) + 72 + y_][cent_x0 - int(rect_[2] / 2) + 72 + x_] = \
                            img_[rect_[1] + y_][rect_[0] + x_]
                            s_temp_img[cent_y0 - int(rect_[3] / 2) + 72 + y_][cent_x0 - int(rect_[2] / 2) + 72 + x_] = \
                                img_[rect_[1] + y_][rect_[0] + x_]
                strokes_temp_imgs.append(s_temp_img)
        generated_result.append(bk)
        generated_strokes_result.append(strokes_temp_imgs)
        generated_result_index_list.append(stroke_img_index)

    return generated_result, generated_strokes_result, generated_result_index_list
Ejemplo n.º 3
0
def render_generated_image(char_obj, select_strokes_dict, size=400):
    image = createBlankGrayscaleImageWithSize((size, size))
    offset_base = int(abs(size - 256) / 2)

    for key in select_strokes_dict.keys():

        print(key, " ", len(select_strokes_dict[key]))
        print(type(key))

        # get real position of stroke
        real_post = char_obj.strokes[int(key)].position

        cent_x0 = int(real_post[0] + real_post[2] / 2)
        cent_y0 = int(real_post[1] + real_post[3] / 2)

        # get position of similar stroke
        stroke_path = select_strokes_dict[key]
        print(stroke_path)
        stroke_img = cv2.imread(stroke_path, 0)
        stroke_rect = getSingleMaxBoundingBoxOfImage(stroke_img)
        if stroke_rect is None:
            continue

        for x in range(stroke_rect[2]):
            for y in range(stroke_rect[3]):
                if stroke_img[stroke_rect[1] + y][stroke_rect[0] + x] == 0:
                    image[cent_y0 - int(stroke_rect[3] / 2) + offset_base + y][
                        cent_x0 - int(stroke_rect[2] / 2) + offset_base + x] = \
                        stroke_img[stroke_rect[1] + y][stroke_rect[0] + x]

    return image
Ejemplo n.º 4
0
def merge_radical_up_down(strokes_path):
    if not os.path.exists(strokes_path):
        print("stroke path not exist")
        return

    # get all stroke images
    stroke_names_ = [f for f in os.listdir(strokes_path) if ".png" in f]
    print("stroke : ", stroke_names_)

    # sorted image names
    stroke_names = []
    for i in range(len(stroke_names_)):
        for sn in stroke_names_:
            if "_{}.png".format(i) in sn:
                stroke_names.append(sn)
                break
    print(stroke_names)

    # up and down list
    up_strokes_list = []
    down_strokes_list = []

    for sn in stroke_names:
        sk_img_path = os.path.join(strokes_path, sn)
        sk_img = cv2.imread(sk_img_path, 0)
        x, y, w, h = getSingleMaxBoundingBoxOfImage(sk_img)
        cent_x = x + int(w / 2)
        cent_y = y + int(h / 2)

        if cent_y > 120:
            down_strokes_list.append(sk_img_path)
        else:
            up_strokes_list.append(sk_img_path)

    # check images
    bk_up = createBlankGrayscaleImageWithSize((256, 256))
    bk_down = createBlankGrayscaleImageWithSize((256, 256))

    for img_path in up_strokes_list:
        img = cv2.imread(img_path, 0)
        bk_up = merge_two_images(bk_up, img)

    for img_path in down_strokes_list:
        img = cv2.imread(img_path, 0)
        bk_down = merge_two_images(bk_down, img)

    return (bk_up, bk_down)
Ejemplo n.º 5
0
    def handle_generated_listwidget_item_click(self, item):
        print(self.generated_results_listWidget.currentRow())

        self.char_id = self.generated_results_listWidget.currentRow()
        self.char = self.input_chars[self.char_id]

        self.ch_stroke_post_dict = get_strokes_position_dict(self.xml_root, self.char)

        self.bk = createBlankGrayscaleImageWithSize(self.size)
        self.ch_similar_sk_id_img_name_dict = self.chars_sk_id_to_name_dict_list[self.char_id]
        self.bk, self.sk_bks = merge_select_stroke_images(self.bk, self.ch_stroke_post_dict, self.ch_similar_sk_id_img_name_dict,
                                                self.stroke_image_path)

        qimg = QImage(self.bk.data, self.bk.shape[1], self.bk.shape[0], self.bk.shape[1], QImage.Format_Indexed8)
        qimg_pix = QPixmap.fromImage(qimg)

        self.grayscale_scene.addPixmap(qimg_pix)
        self.grayscale_scene.update()

        # update the similar basic radicals tree widget
        if len(self.chars_similar_basic_radicals_list) > self.char_id:
            ch_similar_bs_dict = self.chars_similar_basic_radicals_list[self.char_id]
            print(ch_similar_bs_dict)
            self.handle_char_similar_bs_treeview_update(self.similar_bs_treeView, ch_similar_bs_dict)
            self.similar_bs_treeView.expandAll()

        # update the similar strokes tree widget
        if len(self.chars_similar_strokes_list) > self.char_id:
            ch_similar_sk_dict = self.chars_similar_strokes_list[self.char_id]
            print(ch_similar_sk_dict)
            self.handle_char_similar_sk_treeview_update(self.similar_stroke_treeView, ch_similar_sk_dict)
            self.similar_stroke_treeView.expandAll()


        real_img = self.find_char_png(self.char)
        print("select char {}: ".format(self.char))

        real_qimg = QImage(real_img.data, real_img.shape[1], real_img.shape[0], real_img.shape[1], QImage.Format_Indexed8)
        real_qimg_pix = QPixmap.fromImage(real_qimg)
        self.src_scene.addPixmap(real_qimg_pix)
        self.src_scene.update()

        # update similar chars with same bs tag of this char
        ch_bs_tags_dict, similar_chars_with_same_bs_tags_dict = get_similar_chars_with_same_struct_and_bs_tags(self.xml_root, self.char)
        print(similar_chars_with_same_bs_tags_dict)

        model = QStandardItemModel(self.similar_chars_bs_treeView)
        model.setColumnCount(1)

        for bs_id in similar_chars_with_same_bs_tags_dict.keys():
            bs_root_item = QStandardItem("Basic radical {}: {}".format(bs_id, ch_bs_tags_dict[bs_id]))
            bs_item = QStandardItem(str(similar_chars_with_same_bs_tags_dict[bs_id]))
            bs_root_item.appendRow(bs_item)
            model.appendRow(bs_root_item)

        self.similar_chars_bs_treeView.setModel(model)
        self.similar_chars_bs_treeView.expandAll()
Ejemplo n.º 6
0
def merge_radical_left_right(img):
    if img is None:
        print("img is none!")
        return

    rects = getAllMiniBoundingBoxesOfImage(img)

    rect_imgs = getConnectedComponentsOfGrayScale(img)
    print("rect img num: ", len(rect_imgs))
    print(rects)

    left_side_list = []
    right_side_list = []
    for i in range(len(rects)):
        x, y, w, h = rects[i]

        cent_x = x + int(w / 2)
        cent_y = y + int(h / 2)

        if cent_x > 127:
            right_side_list.append(rects[i])
        else:
            left_side_list.append(rects[i])

        print(cent_x, cent_y)

    print(left_side_list)
    print(right_side_list)

    # check images
    bk_left = createBlankGrayscaleImageWithSize(img.shape)
    bk_right = createBlankGrayscaleImageWithSize(img.shape)

    for rimg in rect_imgs:
        rect = getSingleMaxBoundingBoxOfImage(rimg)
        if rect in left_side_list:
            bk_left = merge_two_images(bk_left, rimg)
        elif rect in right_side_list:
            bk_right = merge_two_images(bk_right, rimg)

    return (bk_left, bk_right)
Ejemplo n.º 7
0
def merge_stroke_images(bk, post, similar_stroke_path):
    if bk is None or post is None or similar_stroke_path == "":
        return

    img = cv2.imread(similar_stroke_path, 0)
    if img is None:
        print("not open image {}".format(similar_stroke_path))
        return

    stroke_bk = createBlankGrayscaleImageWithSize(bk.shape)

    offset_base = int((bk.shape[0] - img.shape[0]) / 2)

    _, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)

    rect = getSingleMaxBoundingBoxOfImage(img)
    if rect is None:
        print("not get rect of stroke image")
        return

    # merge bk and stroke image with center align
    cent_x0 = int(post[0] + post[2] / 2)
    cent_y0 = int(post[1] + post[3] / 2)

    cent_x = int(rect[0] + rect[2] / 2)
    cent_y = int(rect[1] + rect[3] / 2)

    offset_x = cent_x - cent_x0
    offset_y = cent_y - cent_y0

    new_img = createBlankGrayscaleImageWithSize(bk.shape)
    new_img[rect[1]-offset_y+offset_base: rect[1]-offset_y+offset_base+rect[3], rect[0]-offset_x+offset_base: \
                        rect[0]-offset_x+offset_base+rect[2]] = img[rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2]]

    for x in range(new_img.shape[0]):
        for y in range(new_img.shape[1]):
            if new_img[x][y] == 0:
                bk[x][y] = 0
                stroke_bk[x][y] = 0
    return bk, stroke_bk.copy()
Ejemplo n.º 8
0
    def render_generated_result_image(self):

        self.ch_stroke_position_dict = get_strokes_position_dict(self.xml_root, self.char)

        self.bk = createBlankGrayscaleImageWithSize(self.size)
        self.bk, self.sk_bks = merge_select_stroke_images(self.bk, self.ch_stroke_position_dict, self.ch_similar_sk_id_img_name_dict,
                                        self.stroke_image_path)

        qimg = QImage(self.bk.data, self.bk.shape[1], self.bk.shape[0], self.bk.shape[1], QImage.Format_Indexed8)
        qimg_pix = QPixmap.fromImage(qimg)

        self.grayscale_scene.addPixmap(qimg_pix)
        self.grayscale_scene.update()
Ejemplo n.º 9
0
def merge_strokes_to_basic_radical_img(ch,
                                       char_bs_tag_stroke_ids_dict_list,
                                       stroke_img_path=""):
    if char_bs_tag_stroke_ids_dict_list is None or ch == "":
        print("char_bs_tag_stroke_ids_dict_list is none")
        return

    ch_bs_images = []

    # get all stroke images of this char
    all_stroke_img_names = [
        f for f in os.listdir(stroke_img_path) if ".png" in f
    ]
    ch_stroke_img_names = []
    for name in all_stroke_img_names:
        if ch in name:
            ch_stroke_img_names.append(name)

    # merge stroke images based on the dict list : [{'心': ['9', '10', '11', '12']}, {'相': ['0', '1', '2', '3', '4', '5', '6', '7', '8']}, {'目': ['4', '5', '6', '7', '8']}, {'木': ['0', '1', '2', '3']}]
    for bs_sk_ids_dict in char_bs_tag_stroke_ids_dict_list:
        if list(bs_sk_ids_dict.keys()) is None or len(
                list(bs_sk_ids_dict.keys())) == 0:
            continue
        bs_tag = list(bs_sk_ids_dict.keys())[0]
        bs_sk_ids = bs_sk_ids_dict[bs_tag]
        bs_sk_ids = [int(ids) for ids in bs_sk_ids]

        bs_sk_img_names = []
        for sk_id in bs_sk_ids:
            for name in ch_stroke_img_names:
                if "_{}.png".format(sk_id) in name:
                    bs_sk_img_names.append(name)

        # merge stroke image togeter
        new_bs = createBlankGrayscaleImageWithSize((256, 256))
        for name in bs_sk_img_names:
            img_path = os.path.join(stroke_img_path, name)
            img = cv2.imread(img_path, 0)

            new_bs = merge_two_images(new_bs, img)

            del img
        ch_bs_images.append(new_bs.copy())

    return ch_bs_images
Ejemplo n.º 10
0
def img_align_center(img):
    if img is None:
        print("img is none!")
        return

    x, y, w, h = getSingleMaxBoundingBoxOfImage(img)
    cent_x = x + int(w / 2)
    cent_y = y + int(h / 2)

    bk = createBlankGrayscaleImageWithSize(img.shape)

    offset_x = 128 - cent_x
    offset_y = 128 - cent_y

    for y in range(img.shape[0]):
        for x in range(img.shape[1]):
            if img[y][x] == 0:
                bk[y+offset_y][x+offset_x] = img[y][x]

    return bk
Ejemplo n.º 11
0
def recompose_strokes_to_basic_radical_png():
    char_names = [f for f in os.listdir(char_1000_path) if '.' not in f]
    print(len(char_names))

    for cn in char_names:
        print(cn)
        radical_names = [
            f for f in os.listdir(
                os.path.join(char_1000_path, cn, 'basic radicals'))
            if '.' not in f
        ]
        print(radical_names)

        for rn in radical_names:

            bk_ = createBlankGrayscaleImageWithSize((256, 256))

            stroke_names = [
                f for f in os.listdir(
                    os.path.join(char_1000_path, cn, 'basic radicals', rn))
                if '.png' in f
            ]
            if len(stroke_names) == 0:
                print(cn, ' ', rn, ' not found!')
                continue

            for sn in stroke_names:
                img_ = cv2.imread(
                    os.path.join(char_1000_path, cn, 'basic radicals', rn, sn),
                    0)
                for x in range(img_.shape[0]):
                    for y in range(img_.shape[1]):
                        if img_[x][y] == 0:
                            bk_[x][y] = 0

            cv2.imwrite(
                os.path.join(
                    os.path.join(char_1000_path, cn, 'basic radicals',
                                 rn + '.png')), bk_)
Ejemplo n.º 12
0
    def handle_SVG_extraction_btn(self):
        """
        SVG file extraction button.
        :return:
        """
        print("SVG extraction button clicked")

        temp_path = './jianti_temp'
        if not os.path.exists(temp_path):
            os.makedirs(temp_path)

        filename = str(
            QFileDialog.getExistingDirectory(self, "Select Directory"))

        svg_content = '<?xml version="1.0" standalone="no"?> <!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN" ' \
                      '"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd"> <svg version="1.0" xmlns="http://www.w3.org/2000/svg" ' \
                      'width="400.000000pt" height="400.000000pt" viewBox="0 0 400.000000 400.000000" preserveAspectRatio="xMidYMid meet"> ' \
                      '<g transform="translate(0.000000,400.000000) scale(0.100000,-0.100000)" fill="#000000" stroke="none"> \n'
        print("select strokes len: ", len(self.__select_strokes_dict))
        for key in self.__select_strokes_dict.keys():
            stroke_img_path = self.__select_strokes_dict[key]

            stroke_img = cv2.imread(stroke_img_path, 0)
            stroke_rect = getSingleMaxBoundingBoxOfImage(stroke_img)

            real_post = self.current_char_obj.strokes[int(key)].position
            cent_x0 = int(real_post[0] + real_post[2] / 2)
            cent_y0 = int(real_post[1] + real_post[3] / 2)

            # merge (256, 256) to (400, 400)
            bk_ = createBlankGrayscaleImageWithSize((SIZE, SIZE))
            offset = int((SIZE - stroke_img.shape[0]) / 2)

            for x in range(stroke_rect[2]):
                for y in range(stroke_rect[3]):
                    if stroke_img[stroke_rect[1] + y][stroke_rect[0] + x] == 0:
                        bk_[cent_y0-int(stroke_rect[3]/2)+offset+y][cent_x0-int(stroke_rect[2]/2)+offset+x] = \
                        stroke_img[stroke_rect[1]+y][stroke_rect[0]+x]

            # bk_[offset: offset+stroke_img.shape[0], offset: offset+stroke_img.shape[1]] = stroke_img

            # save narray to png
            png_img_path = os.path.join(temp_path, "stroke_{}.png".format(key))
            cv2.imwrite(png_img_path, bk_)

            # convert png to bmp
            bmp_img_path = os.path.join(temp_path, "stroke_{}.bmp".format(key))
            img_ = Image.open(png_img_path)
            img_.save(bmp_img_path)

            # convert bmp to svg
            svg_img_path = os.path.join(temp_path, "stroke_{}.svg".format(key))
            os.system("potrace  --svg {} -o {}".format(bmp_img_path,
                                                       svg_img_path))

            # parse svg file to extract path
            # open svg file
            dom = minidom.parse(svg_img_path)

            # find path element in original svg file
            root = dom.documentElement
            path_elems = root.getElementsByTagName("path")
            if path_elems is None:
                print("not find path elements")
                return
            print("path elements len: ", len(path_elems))

            for i in range(len(path_elems)):
                d = path_elems[i].getAttribute('d')
                svg_content += '<path d="' + d + '"></path> \n'

            # del jianti_temp files
            os.system('rm {}'.format(png_img_path))
            os.system('rm {}'.format(bmp_img_path))
            os.system('rm {}'.format(svg_img_path))

            del stroke_img, bk_, img_, dom, root

        svg_content += '</g></svg>'

        # save to svg file
        with open(os.path.join(filename, self.current_char_obj.tag + ".svg"),
                  'w') as f:
            f.write(svg_content)
Ejemplo n.º 13
0
def character_synthesis(chars, root, template_folder_path, stroke_image_path):
    if root is None or chars == "" or not os.path.exists(template_folder_path):
        print("char or root or template folder path is none")
        return

    bs_threshold = 40
    sk_threshold = 20

    char_obj_list = []
    for ch in chars:
        ch_obj = char_from_xml_to_obj(ch, root)
        char_obj_list.append(copy.deepcopy(ch_obj))

    print("ch obj num: ", len(char_obj_list))

    # all stroke image names
    all_stroke_image_names = [
        f for f in os.listdir(stroke_image_path) if ".png" in f
    ]
    print("all stroke num: ", len(all_stroke_image_names))

    # reuslt
    generated_results = []

    for ch_obj in char_obj_list:

        found_similar_stroke_ids = []  # use to identify sk ids in bs

        # find similar basic radical
        similar_basic_radical_dict = {
        }  # {'0': {'肫_月_0.png': 31.400000000000002, '肪_月_0.png': 32.8}}
        for bs_obj in ch_obj.basic_radicals:
            bs_type = bs_obj.tag.strip()
            print("bs type: {}".format(bs_type))
            bs_post = bs_obj.position
            bs_type_path = os.path.join(template_folder_path, "BasicRadicals",
                                        bs_type)
            if not os.path.exists(bs_type_path):
                print("bs type: {} not exist".format(bs_type))
                continue

            # find bs with same position and size
            similar_bs_same_post_and_size_list = find_similar_basic_radicals_img_names_with_same_position_and_size(
                bs_post, bs_type_path, threshold=10)
            print(similar_bs_same_post_and_size_list)
            print("same post and size bs num:",
                  len(similar_bs_same_post_and_size_list))

            if len(similar_bs_same_post_and_size_list) > 0:
                print("find same post and size bs")

                for sk_id_ in bs_obj.stroke_ids:
                    if sk_id_ not in found_similar_stroke_ids:
                        found_similar_stroke_ids.append(sk_id_)
                similar_basic_radical_dict[int(
                    bs_obj.id)] = similar_bs_same_post_and_size_list.copy()
                continue

            # not find similar bs with same post and size, need to find bs with same size
            similar_bs_same_size_list = find_similar_basic_radicals_img_names_with_same_size(
                bs_post, bs_type_path, threshold=10)
            if len(similar_bs_same_size_list) > 0:
                print("find same size bs")

                for sk_id_ in bs_obj.stroke_ids:
                    if sk_id_ not in found_similar_stroke_ids:
                        found_similar_stroke_ids.append(sk_id_)

                similar_basic_radical_dict[int(
                    bs_obj.id)] = similar_bs_same_size_list.copy()
                continue
        print(similar_basic_radical_dict)

        print("find stroke ids: ", found_similar_stroke_ids)

        # find the stroke names of these found bs
        similar_bs_strokes_list_dict_result = {
        }  # {'0': [[肫_storke_0.png, 肫_stroke_1.png ..], [肪_stroke_0.png, 肪_stroke_1.png...]]}
        if len(similar_basic_radical_dict) > 0:
            for bs_id in similar_basic_radical_dict.keys():

                bs_stroke_names_list = []
                bs_similar_bs_name_list = similar_basic_radical_dict[bs_id]
                for bs_img_name in bs_similar_bs_name_list:

                    bs_sk_names_list = []

                    similar_bs_char_tag = bs_img_name.split("_")[0].strip()
                    similar_bs_bs_tag = bs_img_name.split("_")[1].strip()

                    similar_bs_char_all_bs_type = get_all_basic_radical_type_list(
                        root, similar_bs_char_tag)
                    print(similar_bs_char_all_bs_type)

                    similar_bs_sk_ids_list = get_char_bs_tag_and_stroke_ids_dict_list(
                        root, similar_bs_char_tag)
                    print(similar_bs_sk_ids_list)

                    similar_bs_bs_sk_ids = []

                    for d in similar_bs_sk_ids_list:
                        key = list(d.keys())[0]
                        if similar_bs_bs_tag == key:
                            similar_bs_bs_sk_ids = d[key]

                    print("similar_bs_bs_sk_ids: ", similar_bs_bs_sk_ids)

                    # find these stroke images
                    for sk_id in similar_bs_bs_sk_ids:
                        for name in all_stroke_image_names:
                            if "{}_".format(similar_bs_char_tag
                                            ) in name and "_{}.png".format(
                                                sk_id) in name:
                                bs_sk_names_list.append(name)
                                print("name:", name)
                                break
                    if len(bs_sk_names_list) > 0:
                        bs_stroke_names_list.append(bs_sk_names_list.copy())
                similar_bs_strokes_list_dict_result[
                    bs_id] = bs_stroke_names_list.copy()
        print(similar_bs_strokes_list_dict_result)

        # find similar strokes
        unfound_stroke_ids = []
        for sk_obj in ch_obj.strokes:
            if int(sk_obj.id) not in found_similar_stroke_ids:
                unfound_stroke_ids.append(int(sk_obj.id))

        print("not found ids:", unfound_stroke_ids)

        similar_strokes_dict = {
        }  # {4: {'丐_4E10_2.png': 13.2, '乍_4E4D_3.png': 17.599999999999998, '上_4E0A_1.png': 15.199999999999996, '乾_4E7E_9.png': 10.799999999999997, '临_4E34_3.png': 19.6}, 5: {

        ch_similar_sk_id_img_name_dict = {}
        # init this dict
        for i in range(len(ch_obj.strokes)):
            ch_similar_sk_id_img_name_dict[i] = ""

        if len(similar_bs_strokes_list_dict_result) > 0:
            # this char can be synthezed by similar bs
            for bs_id in similar_bs_strokes_list_dict_result.keys():
                for bs_obj in ch_obj.basic_radicals:
                    if bs_obj.id == str(bs_id):
                        bs_stroke_ids = bs_obj.stroke_ids

                        for i in range(len(bs_stroke_ids)):
                            sk_id = bs_stroke_ids[i]
                            if sk_id in ch_similar_sk_id_img_name_dict:
                                ch_similar_sk_id_img_name_dict[
                                    sk_id] = similar_bs_strokes_list_dict_result[
                                        bs_id][0][i]
                        break
            print(ch_similar_sk_id_img_name_dict)

        for sk_id in unfound_stroke_ids:
            sk_obj = None
            for obj in ch_obj.strokes:
                if sk_id == int(obj.id):
                    sk_obj = copy.deepcopy(obj)
                    break
            if sk_obj == None:
                continue

            # find similar strokes
            sk_post = sk_obj.position
            sk_type = sk_obj.tag
            sk_type_temp_path = os.path.join(template_folder_path, "Strokes",
                                             sk_type)
            if not os.path.exists(sk_type_temp_path):
                print("no this type stroke in templates folder")
                continue

            similar_sk_same_post_and_size_list = find_similar_strokes_img_names_with_same_position_and_size(
                sk_post, sk_type_temp_path, threshold=10)
            if len(similar_sk_same_post_and_size_list) > 0:
                print("find same post and size strokes")
                similar_strokes_dict[
                    sk_id] = similar_sk_same_post_and_size_list.copy()
                ch_similar_sk_id_img_name_dict[
                    sk_id] = similar_sk_same_post_and_size_list[0]
                continue

            # not find same post and size stroke, to find same size
            similar_sk_same_size_list = find_similar_strokes_img_names_with_same_size(
                sk_post, sk_type_temp_path, threshold=10)
            if len(similar_sk_same_size_list) > 0:
                print("find same size strokes")
                similar_strokes_dict[sk_id] = similar_sk_same_size_list.copy()
                ch_similar_sk_id_img_name_dict[
                    sk_id] = similar_sk_same_size_list[0]
                continue

        # merge image
        ch_stroke_post_dict = {}
        for i in range(len(ch_obj.strokes)):
            ch_stroke_post_dict[i] = ch_obj.strokes[i].position.copy()

        # add similar bs strokes to dict
        for bs_id in similar_bs_strokes_list_dict_result.keys():
            for bs_obj in ch_obj.basic_radicals:
                if bs_obj.id == bs_id:
                    bs_stroke_ids = bs_obj.stroke_ids

                    for i in range(len(bs_stroke_ids)):
                        sk_id = bs_stroke_ids[i]
                        if sk_id in ch_similar_sk_id_img_name_dict:
                            ch_similar_sk_id_img_name_dict[
                                sk_id] = similar_bs_strokes_list_dict_result[
                                    bs_id][0][i]
        print(ch_similar_sk_id_img_name_dict)

        # merge image
        ch_stroke_position_dict = get_strokes_position_dict(root, ch)
        print("{} stroke post dict: ".format(ch), ch_stroke_position_dict)

        bk = createBlankGrayscaleImageWithSize((500, 500))
        bk = merge_select_stroke_images(bk, ch_stroke_position_dict,
                                        ch_similar_sk_id_img_name_dict,
                                        stroke_image_path)

        generated_results.append(bk.copy())

    return generated_results
Ejemplo n.º 14
0
def merge_diff_generation_real_images():
    img_path = "../../../Data/1000 generated results"

    kai_path = "../../../Data/Calligraphy_database/kai"

    filenames = [f for f in os.listdir(img_path) if ".png" in f]
    print("filenames len: ", len(filenames))

    kainames = [f for f in os.listdir(kai_path) if ".png" in f]

    bk = createBlankGrayscaleImageWithSize((2400, 1600))

    for i in range(len(filenames[:1000])):
        page_id = math.floor(i / 12)
        print(page_id)

        fname = filenames[i]
        tag = fname.split("_")[0]
        print(tag)

        kai_img_path = ""
        # find real kai image
        for kn in kainames:
            kai_tag = kn.split("_")[0]
            if tag == kai_tag:
                kai_img_path = os.path.join(kai_path, kn)
                break
        if kai_img_path == "":
            print(tag, "not find real kai image")
            continue

        # open kai img
        kai_img = cv2.imread(kai_img_path, 0)

        new_kai_img = createBlankGrayscaleImageWithSize((400, 400))
        new_kai_img[72:328, 72:328] = kai_img

        # kai_img = cv2.resize(kai_img, (400, 400))

        img = cv2.imread(os.path.join(img_path, filenames[i]), 0)

        id = i - page_id * 12

        row = math.floor(id / 2)
        col = id % 2

        print(row, col)

        if col == 0:
            bk[row * 400:(row + 1) * 400, col * 400:(col + 1) * 400] = img
            bk[row * 400:(row + 1) * 400,
               (col + 1) * 400:(col + 2) * 400] = new_kai_img
        else:
            bk[row * 400:(row + 1) * 400,
               (col + 1) * 400:(col + 2) * 400] = img
            bk[row * 400:(row + 1) * 400,
               (col + 2) * 400:(col + 3) * 400] = new_kai_img

        if (i + 1) % 12 == 0:
            cv2.imwrite(
                "../../../Data/1000 merged result/all_merged_diff_%d.png" %
                page_id, bk)

            bk = createBlankGrayscaleImageWithSize((2400, 1600))

        else:

            continue
Ejemplo n.º 15
0
    def handle_chars_tag_listview_item_clicked(self, qModelIndex):
        """
        Char list view item clicked!
        :param qModelIndex:
        :return:
        """
        id_ = qModelIndex.row()
        self.select_char_id = id_

        # get char object
        current_char_obj_ = self.__chars_info_list[id_]
        self.current_char_obj = current_char_obj_

        # get similar basic radicals and strokes
        similar_basic_radicals_dict_, similar_strokes_dict_ = self.__similar_radicals_and_strokes_list[
            id_]
        self.similar_basic_radicals_dict, self.similar_strokes_dict = self.__similar_radicals_and_strokes_list[
            id_]

        # update the basic radicals tree view
        self.handle_char_basic_radicals_treeview_update(
            self.target_basic_radicals_treeView, similar_basic_radicals_dict_)
        self.target_basic_radicals_treeView.expandAll()

        # update the strokes tree view
        self.handle_char_strokes_treeview_update(self.target_strokes_treeView,
                                                 similar_strokes_dict_)
        self.target_strokes_treeView.expandAll()

        # select strokes to selected strokes
        self.__select_strokes_dict = {}  # should clean the cache first!!!!
        for sk in current_char_obj_.strokes:
            self.__select_strokes_dict[int(sk.id)] = ""

        # get all default basic radicals strokes
        for k in similar_basic_radicals_dict_.keys():

            # char bs strokes id
            ch_bs_obj = None
            for ch_bs in current_char_obj_.basic_radicals:
                if ch_bs.id == k:
                    ch_bs_obj = ch_bs
            if ch_bs_obj is None:
                continue

            ch_strokes_id = ch_bs_obj.strokes_id

            if len(similar_basic_radicals_dict_[k]) == 0:
                continue

            bs_obj = similar_basic_radicals_dict_[k][0]  # first element of bs

            bs_obj_path = bs_obj["path"]
            bs_obj_tag = bs_obj_path.split("/")[-1].split("_")[0]
            bs_obj_strokes_id = bs_obj["strokes_id"]

            bs_char_path_ = os.path.join(self.__char_root_path, bs_obj_tag,
                                         "strokes")
            bs_stroke_img_names_ = [
                f for f in os.listdir(bs_char_path_) if ".png" in f
            ]

            if len(ch_strokes_id) != len(bs_obj_strokes_id):
                print("char strokes id not same similar bs!")
                continue

            # find stroke path of similar bs
            for i in range(len(ch_strokes_id)):
                for bn in bs_stroke_img_names_:
                    if "_" + str(bs_obj_strokes_id[i]) + "." in bn:
                        self.__select_strokes_dict[int(
                            ch_strokes_id[i])] = os.path.join(
                                bs_char_path_, bn)
                        break

        # get all default stroke
        for k in similar_strokes_dict_.keys():
            strokes_path_ = similar_strokes_dict_[k]
            self.__select_strokes_dict[int(k)] = strokes_path_[0]

        # recompose default basic radicals and strokes
        image = createBlankGrayscaleImageWithSize((SIZE, SIZE))
        offset_base = int(abs(SIZE - 256) / 2)

        for key in self.__select_strokes_dict.keys():

            # get real position of stroke
            real_post = current_char_obj_.strokes[int(key)].position

            cent_x0 = int(real_post[0] + real_post[2] / 2)
            cent_y0 = int(real_post[1] + real_post[3] / 2)

            # get position of similar stroke
            stroke_path = self.__select_strokes_dict[key]
            stroke_img = cv2.imread(stroke_path, 0)
            stroke_rect = getSingleMaxBoundingBoxOfImage(stroke_img)
            if stroke_rect is None:
                continue

            for x in range(stroke_rect[2]):
                for y in range(stroke_rect[3]):
                    if stroke_img[stroke_rect[1] + y][stroke_rect[0] + x] == 0:
                        image[cent_y0-int(stroke_rect[3]/2)+offset_base+y][cent_x0-int(stroke_rect[2]/2)+offset_base+x] = \
                        stroke_img[stroke_rect[1] + y][stroke_rect[0] + x]

        self.current_char_gray = image.copy()

        # add grid lines
        bk_with_grids_rgb_img = self.grid_bk_image.copy()
        bk_with_grids_rgb_img = merge_gray_to_rgb_image(
            self.current_char_gray, bk_with_grids_rgb_img)

        # display generated image
        qimg_ = rgb2qimage(bk_with_grids_rgb_img)
        qimg_pix_ = QPixmap.fromImage(qimg_)

        # render image display of generated results
        self.render_image_display(qimg_pix_, self.result_graphicsView,
                                  self.result_scene)

        self.basic_radical_scene.clear()
        self.stroke_scene.clear()
        del current_char_obj_, similar_basic_radicals_dict_, similar_strokes_dict_, image, stroke_img, \
            bk_with_grids_rgb_img
Ejemplo n.º 16
0
    def handle_char_basic_radicals_treeview_item_clicked(self, qModelIndex):
        """
        Basic radical tree view item clicked!
        :param qModelIndex:
        :return:
        """
        print(qModelIndex.row())

        bs_id = self.target_basic_radicals_treeView.currentIndex().parent(
        ).row()
        bs_img_id = self.target_basic_radicals_treeView.currentIndex().row()
        print(bs_id, " ", bs_img_id)

        if bs_id == -1:
            print("Clicked invalid content, not update the image!")
            return

        # update the stroke list
        select_bs_dict = self.similar_basic_radicals_dict[str(
            bs_id)][bs_img_id]
        select_bs_strokes_id = select_bs_dict["strokes_id"]
        select_bs_path = select_bs_dict["path"]

        # real char strokes id
        real_strokes_id = []
        for bs in self.current_char_obj.basic_radicals:
            if bs.id == str(bs_id):
                real_strokes_id = bs.strokes_id

        if len(select_bs_strokes_id) != len(real_strokes_id):
            print("select and real strokes id not same size!")
            return

        # find stroke names of select bs char
        select_bs_char_tag = select_bs_path.split("/")[-1].split("_")[0]
        select_bs_strokes_name = [
            f for f in os.listdir(
                os.path.join(self.__char_root_path, select_bs_char_tag,
                             "strokes")) if ".png" in f
        ]

        for i in range(len(real_strokes_id)):
            for sn in select_bs_strokes_name:
                if "_" + str(select_bs_strokes_id[i]) + "." in sn:
                    self.__select_strokes_dict[
                        real_strokes_id[i]] = os.path.join(
                            self.__char_root_path, select_bs_char_tag,
                            "strokes", sn)

        image = render_generated_image(self.current_char_obj,
                                       self.__select_strokes_dict,
                                       size=SIZE)
        self.current_char_gray = image.copy()

        # add grid lines to generated results
        bk_with_grids_rgb_img = self.grid_bk_image.copy()
        bk_with_grids_rgb_img = merge_gray_to_rgb_image(
            self.current_char_gray, bk_with_grids_rgb_img)

        # display generated image
        qimg_ = rgb2qimage(bk_with_grids_rgb_img)
        qimg_pix_ = QPixmap.fromImage(qimg_)
        self.render_image_display(qimg_pix_, self.result_graphicsView,
                                  self.result_scene)

        # display select bs
        bs_bk = createBlankGrayscaleImageWithSize((SIZE, SIZE))
        bs_img = cv2.imread(select_bs_path, 0)
        offset = int((SIZE - bs_img.shape[0]) / 2)
        bs_bk[offset:offset + bs_img.shape[0],
              offset:offset + bs_img.shape[1]] = bs_img

        self.current_basic_radical_gray = bs_bk.copy()

        # add grid lines to basic radical
        bk_with_grids_rgb_img = self.grid_bk_image.copy()
        bk_with_grids_rgb_img = merge_gray_to_rgb_image(
            self.current_basic_radical_gray, bk_with_grids_rgb_img)

        qimg_ = rgb2qimage(bk_with_grids_rgb_img)
        qimg_pix_ = QPixmap.fromImage(qimg_)
        self.render_image_display(qimg_pix_, self.basic_radical_graphicsView,
                                  self.basic_radical_scene)

        del select_bs_dict, image, bk_with_grids_rgb_img, qimg_, qimg_pix_, bs_bk, bs_img
Ejemplo n.º 17
0
    def handle_char_strokes_tree_view_item_clicked(self, qModelIndex):
        """
        Stroke tree view item clicked!
        :param qModelIndex:
        :return:
        """
        print(qModelIndex.row())

        # update the generated image
        stroke_id = self.target_strokes_treeView.currentIndex().parent().row()
        stroke_img_id = self.target_strokes_treeView.currentIndex().row()

        # click invaild content of stroke_id, not update image
        if stroke_id == -1:
            print("Click invaild content, not update the image!")
            return
        # get the select stroke path
        self.current_stroke_id = stroke_id
        self.current_stroke_img_id = stroke_img_id

        stroke_img_name = self.target_strokes_treeView.currentIndex().data()
        stroke_type = stroke_img_name.split("_")[2]
        stroke_img_path = os.path.join(self.library_stroke_root_path,
                                       stroke_type, stroke_img_name)
        self.select_stroke_image_path = stroke_img_path
        self.__select_strokes_dict[stroke_id] = stroke_img_path

        image = render_generated_image(self.current_char_obj,
                                       self.__select_strokes_dict,
                                       size=SIZE)

        self.current_char_gray = image.copy()

        # add grid lines
        bk_with_grids_rgb_img = self.grid_bk_image.copy()
        bk_with_grids_rgb_img = merge_gray_to_rgb_image(
            self.current_char_gray, bk_with_grids_rgb_img)

        # display generated image
        qimg_ = rgb2qimage(bk_with_grids_rgb_img)
        qimg_pix_ = QPixmap.fromImage(qimg_)
        self.render_image_display(qimg_pix_, self.result_graphicsView,
                                  self.result_scene)

        # display select stroke image
        stroke_bk = createBlankGrayscaleImageWithSize((SIZE, SIZE))
        stroke_img = cv2.imread(self.select_stroke_image_path, 0)
        offset = int((SIZE - stroke_img.shape[0]) / 2)
        stroke_bk[offset:offset + stroke_img.shape[0],
                  offset:offset + stroke_img.shape[1]] = stroke_img

        self.current_stroke_gray = stroke_bk.copy()
        # add grid lines
        bk_with_grids_rgb_img = self.grid_bk_image.copy()
        bk_with_grids_rgb_img = merge_gray_to_rgb_image(
            self.current_stroke_gray, bk_with_grids_rgb_img)

        qimg_ = rgb2qimage(bk_with_grids_rgb_img)
        qimg_pix_ = QPixmap.fromImage(qimg_)
        self.render_image_display(qimg_pix_, self.stroke_graphicsView,
                                  self.stroke_scene)

        del image, bk_with_grids_rgb_img, qimg_, qimg_pix_, stroke_bk, stroke_img
Ejemplo n.º 18
0
def recompose_chars(
        chars_info_list,
        similar_chars,
        char_root_path="/Users/liupeng/Documents/Data/Calligraphy_database/Chars_775",
        size=400):
    """
    Recompose chars to 400 x 400 image from 256 x 256 to avoid out of size of image (256, 256)
    :param chars_info_list:
    :param similar_chars:
    :param char_root_path:
    :return:
    """
    generated_images = []

    if len(similar_chars) == 0:
        return generated_images

    for sc in similar_chars:
        similar_basic_radicals, similar_strokes = sc

        ch_id = similar_chars.index(sc)
        ch_obj = chars_info_list[ch_id]
        ch_strokes_list = ch_obj.strokes

        print("process: ", ch_obj.tag)

        # get basic radicals info and his strokes images
        similar_bs_dict = {}
        for bs_id in similar_basic_radicals.keys():

            bs_obj = []
            for bs_ in similar_basic_radicals[bs_id]:

                bs_obj_dict = {}

                path_ = bs_["path"]
                strokes_id_ = bs_["strokes_id"]
                postion_ = bs_["position"]

                print("path: ", path_)
                print("strokes id: ", strokes_id_)

                char_tag = path_.split('/')[-1].replace(".png",
                                                        "").split("_")[0]
                char_path_ = os.path.join(char_root_path, char_tag, "strokes")
                stroke_img_names = [
                    f for f in os.listdir(char_path_) if ".png" in f
                ]

                stroke_img_dict = {}
                for s_id in strokes_id_:
                    for nm in stroke_img_names:
                        if "_" + str(s_id) + "." in nm:
                            stroke_img_dict[s_id] = os.path.join(
                                char_path_, nm)
                            break

                bs_obj_dict["path"] = path_
                bs_obj_dict["strokes"] = stroke_img_dict
                bs_obj_dict["position"] = postion_
                bs_obj.append(bs_obj_dict)

            if bs_obj != []:
                similar_bs_dict[bs_id] = bs_obj

        print(similar_bs_dict)

        # recompose basic radicals and strokes
        bk = createBlankGrayscaleImageWithSize((size, size))
        offset_base = int(abs(size - 256) / 2)

        # load basic radicals stroke images and center alignment
        for bs_id in similar_bs_dict.keys():
            for bs_obj in similar_bs_dict[bs_id]:

                bk_bs = createBlankGrayscaleImageWithSize(
                    (size, size)
                )  # merge strokes of this basic radical together to get single connected component

                stroke_objs = bs_obj["strokes"]
                post_ = bs_obj["position"]

                cent_x0 = int(post_[0] + post_[2] / 2)
                cent_y0 = int(post_[1] + post_[3] / 2)

                for s_id in stroke_objs.keys():
                    path_ = stroke_objs[s_id]

                    img_ = cv2.imread(path_, 0)

                    for x in range(img_.shape[0]):
                        for y in range(img_.shape[1]):
                            if img_[x][y] == 0:
                                bk_bs[x + offset_base][y + offset_base] = 0

                x, y, w, h = getSingleMaxBoundingBoxOfImage(bk_bs)

                cent_x = int(x + w / 2)
                cent_y = int(y + h / 2)

                offset_x = cent_x0 - cent_x + offset_base
                offset_y = cent_y0 - cent_y + offset_base

                for x in range(bk_bs.shape[0]):
                    for y in range(bk_bs.shape[1]):
                        if bk_bs[x][y] == 0:
                            if x+offset_x < 0 or x+offset_x >= 400 or y+offset_y < 0 and y+offset_y >= 400 or \
                                    x < 0 or x >= 400 or y < 0 or y >= 400:
                                continue
                            bk[x + offset_x][y + offset_y] = bk_bs[x][y]

                break

        # load stroke images
        for s_id in similar_strokes.keys():

            # get template stroke position
            print(s_id)

            real_post = None
            for stk_obj in ch_strokes_list:
                if s_id == stk_obj.id:
                    real_post = stk_obj.position
                    break

            if real_post == None:
                print("Not find jianti_temp position!")
                continue

            cent_x0 = int(real_post[0] + real_post[2] / 2)
            cent_y0 = int(real_post[1] + real_post[3] / 2)

            # path_ = similar_strokes[s_id][0]   # use the most match stroke
            path_ = find_most_match_stroke(real_post, similar_strokes[s_id])

            img_ = cv2.imread(path_, 0)
            rect_ = getSingleMaxBoundingBoxOfImage(img_)

            for x_ in range(rect_[2]):
                for y_ in range(rect_[3]):
                    if img_[rect_[1] + y_][rect_[0] + x_] == 0:
                        bk[cent_y0 - int(rect_[3] / 2) + 72 + y_][cent_x0 - int(rect_[2] / 2) + 72 + x_] = \
                            img_[rect_[1] + y_][rect_[0] + x_]

        generated_images.append(bk)
    return generated_images
Ejemplo n.º 19
0
def create_pdf_files():
    chars = []
    with open(chars_path, "r") as f:
        chars = f.readline()
    print("chars num: ", len(chars))

    codes = chars_to_unicode_list(chars)

    font = ImageFont.truetype(font_path, size=char_size)
    label_font = ImageFont.truetype(font_path, size=100)

    bk = creatBlankRGBImageWithSize((4200, 3000))
    for char_id in range(len(chars)):

        if char_id == len(chars) - 1:
            cv2.imwrite(os.path.join(save_path, "page_%d.png" % (math.floor(char_id / 35) - 1)), bk)

        if char_id % 35 == 0 and char_id != 0:
            cv2.imwrite(os.path.join(save_path, "page_%d.png" % (math.floor(char_id / 35)-1)), bk)
            # create new page of PDF
            bk = creatBlankRGBImageWithSize((4200, 3000))

        page_id = math.floor((char_id / 35))
        print("page id: ", page_id)

        # # merge 35 chars to one page.
        col_id = math.floor((char_id - page_id * 35) / 5)
        row_id = (char_id - page_id * 35) % 5

        # create char image
        image = Image.new("L", (char_size, char_size), 255)
        draw = ImageDraw.Draw(image)
        draw.text((0, 0), chars[char_id], 0, font=font)

        # draw label
        label_img = Image.new("L", (500, 100), 255)
        label_draw = ImageDraw.Draw(label_img)
        label_draw.text((0, 0), (chars[char_id].replace("\n", "") + " " + str(codes[char_id])), 0, font=label_font)

        # print(codes[char_id], type(codes[char_id]), chars[char_id].replace("\n", "") + str(codes[char_id]))

        ch_bk = createBlankGrayscaleImageWithSize((600, 600))
        ch_bk[:char_size, :char_size] = image
        ch_bk[char_size: 600, : char_size] = label_img

        ch_bk_3 = cv2.cvtColor(ch_bk, cv2.COLOR_GRAY2RGB)

        # write char and unicode to image
        # cv2.putText(ch_bk, chars[char_id] + " " + codes[char_id], (10,230), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 0, 2)

        ch_bk_rgb = creatBlankRGBImageWithSize(ch_bk.shape)

        # add boundary
        cv2.line(ch_bk_rgb, (1, 1), (1, char_size-1), (0, 0, 255), 4)
        cv2.line(ch_bk_rgb, (1, 1), (char_size - 1, 1), (0, 0, 255), 4)
        cv2.line(ch_bk_rgb, (char_size-1, 1), (char_size-1, char_size - 1), (0, 0, 255), 4)
        cv2.line(ch_bk_rgb, (1, char_size-1), (char_size-1, char_size - 1), (0, 0, 255), 4)

        # dash line
        drawline(ch_bk_rgb, (int(char_size/2), 1), (int(char_size/2), char_size-1), (0, 0, 255), 2)
        drawline(ch_bk_rgb, (1, int(char_size / 2)), (char_size - 1, int(char_size / 2)), (0,0,  255), 2)

        for y in range(ch_bk.shape[0]):
            for x in range(ch_bk.shape[1]):
                if ch_bk[y][x] != 255 :
                    ch_bk_rgb[y][x] = (0, 0, 0)



        print("col id: ", col_id, " row id:", row_id)
        print(bk[col_id*char_size: (col_id+1)*char_size, row_id*char_size: (row_id+1)*char_size].shape)
        bk[col_id*600: (col_id+1)*600, row_id*600: (row_id+1)*600] = ch_bk_rgb

        if char_id == 36:
            break
Ejemplo n.º 20
0
def stroke_recompose(input):
    # remove invalid characters in input
    input = input.replace(' ', '').replace('\n', '').replace('\t', '')

    # reterival xml to find character info
    xml_path = "../../../Data/Characters/radical_add_stroke_position_similar_structure_add_stroke_order.xml"
    # load radical data
    tree = ET.parse(xml_path)
    if tree is None:
        print("tree is none!")
        return

    root = tree.getroot()
    print("root len:", len(root))

    char_info_list = []
    for ins in input:
        tag = ""
        u_code = ""
        stroke_orders = []
        stroke_position = []

        for child in root:
            ch = child.attrib["TAG"]
            if ch == ins:
                tag = ch
                u_code = child.attrib["ID"]

                # stroke order
                stroke_order_elems = child.findall('STROKE_ORDER')
                if stroke_order_elems:
                    s_order = stroke_order_elems[0].text
                    stroke_orders = s_order.split("|")
                else:
                    print("not find stroke order of ", tag)

                # stroke position
                s_post_elems = child.findall('STROKES_POSITION')
                if s_post_elems:
                    ss_post_elems = s_post_elems[0].findall('STROKE_POSITION')
                    if ss_post_elems:
                        for elem in ss_post_elems:
                            stroke_position.append(ast.literal_eval(elem.text))
                else:
                    print("Not find storke position of ", tag)

                break

        if tag == "" and u_code == "":
            print("Not find this char: ", ins)
            continue
        if len(stroke_orders) != len(stroke_position):
            print(tag, "Stroke order and position are not same length!")
            continue

        # create ChineseCharacter object
        cc_obj = ChineseCharacter(tag, u_code, stroke_orders, stroke_position)
        char_info_list.append(cc_obj)

    # search for the template stroke from the library
    char_target_strokes_list = []
    for cc in char_info_list:
        print(cc.tag, cc.u_code, cc.stroke_orders, cc.stroke_position)
        target_strokes = []
        for i in range(len(cc.stroke_orders)):
            targ_strokes_ = query_taget_strokes(cc.stroke_orders[i],
                                                cc.stroke_position[i])
            target_strokes.append(targ_strokes_)
            print('target stroken num: ', len(targ_strokes_))
            if len(targ_strokes_) == 0:
                print(cc.tag, "stroke ", i, "not fond target strokes")

        char_target_strokes_list.append(target_strokes)
    print(char_target_strokes_list)

    # recompose strokes
    if len(char_target_strokes_list) == len(char_info_list):
        print("img and chars are same length")
    else:
        print("img and chars are not same length")

    save_path = "../../../Data/Stroke_recomposed_tool/generated_results"
    for i in range(len(char_info_list)):
        ch_obj = char_info_list[i]
        ch_stroke_imgs = char_target_strokes_list[i]
        bk = createBlankGrayscaleImageWithSize((400, 400))

        # merge all stroke with center alignment
        if len(ch_stroke_imgs) == ch_obj.stroke_orders:
            print("imgs of stroke are same length")

        for j in range(len(ch_stroke_imgs)):
            if len(ch_stroke_imgs[j]) > 0:
                img_path = ch_stroke_imgs[j][0]
                img_ = cv2.imread(img_path, 0)
                img_ = cv2.resize(img_, (256, 256))
                rect_ = getSingleMaxBoundingBoxOfImage(img_)

                cent_x0 = int(ch_obj.stroke_position[j][0] +
                              ch_obj.stroke_position[j][2] / 2)
                cent_y0 = int(ch_obj.stroke_position[j][1] +
                              ch_obj.stroke_position[j][3] / 2)

                # only copy the valid pixels
                for x_ in range(rect_[2]):
                    for y_ in range(rect_[3]):
                        if img_[rect_[1] + y_][rect_[0] + x_] == 0:
                            bk[cent_y0 - int(rect_[3] / 2) + 72 +
                               y_][cent_x0 - int(rect_[2] / 2) + 72 +
                                   x_] = img_[rect_[1] + y_][rect_[0] + x_]

        cv2.imwrite(
            os.path.join(save_path, ch_obj.tag + "_" + ch_obj.u_code + ".png"),
            bk)