Esempio n. 1
0
def get_index_from_region(coorRegion, imageRegion, coorCCRegion, suffix):
    bdResult = {}
    index1 = []
    index2 = []
    bdResult = bd_rec(imageRegion, client, imgForm=suffix, api='accurate')
    if "words_result" not in bdResult:
        print(bdResult)
        print("BaiDu recognized error, exit...")
        exit(0)
    if len(bdResult["words_result"]) == 0:
        return index1, index2

    words = []
    for j, w in enumerate(bdResult["words_result"]):
        words.append((j, dic2length(w["location"])))

    graphs = []
    for j, c in enumerate(coorCCRegion):
        if c[2] > heightOfLine * 3 or c[3] > heightOfLine * 1.5:
            # if c[2] > heightOfLine*0.5 or c[3] > heightOfLine*0.5:
            graphs.append(c)

    for w in words:
        flag = False
        for g in graphs:
            g = expand_box(g)
            if is_within(w[1], g):
                index2.append(bdResult["words_result"][w[0]]["words"])
                flag = True
                break
        if not flag:
            index1.append(bdResult["words_result"][w[0]]["words"])
    return index1, index2
Esempio n. 2
0
def get_index_from_region(coorRegion, imageRegion, coorCCRegion, suffix):
    """Gets index from region.

    Args:
        coorRegion: Coordinate of region.
        imageRegion: The image data of region.
        coorCCRegion: The coordinate of connected components in region.
        suffix: Image name's suffix.

    Returns:
        Index out graph and Index in graph.
    """
    bdResult = {}
    index1 = []
    index2 = []
    bdResult = bd_rec(imageRegion, client, imgForm=suffix, api='accurate')
    if "words_result" not in bdResult:
        print(bdResult)
        print("BaiDu recognized error, exit...")
        exit(0)
    if len(bdResult["words_result"]) == 0:
        return index1, index2

    words = []
    for j, w in enumerate(bdResult["words_result"]):
        words.append((j, dic2length(w["location"])))

    graphs = []
    for j, c in enumerate(coorCCRegion):
        if c[2] > heightOfLine * 3 or c[3] > heightOfLine * 1.5:
            # if c[2] > heightOfLine*0.5 or c[3] > heightOfLine*0.5:
            graphs.append(c)

    for w in words:
        flag = False
        for g in graphs:
            g = expand_box(g)
            if is_within(w[1], g):
                index2.append(bdResult["words_result"][w[0]]["words"])
                flag = True
                break
        if not flag:
            index1.append(bdResult["words_result"][w[0]]["words"])
    return index1, index2
Esempio n. 3
0
def deal_one_page(srcPath,
                  desPath='',
                  stanPath='',
                  saveImage=True,
                  charsOnly=False,
                  rectification=False):
    """
    """
    global heightOfLine
    coor5 = []
    mistakes = 0
    bdResult = {}
    name = os.path.basename(srcPath)
    if not os.path.exists(srcPath):
        print("Image path not exists!")
        return None
    try:
        imgBgr = cv2.imread(srcPath)
        if rectification:
            imgBgr = rectify(imgBgr.copy())
        imgData = cv2.cvtColor(imgBgr, cv2.COLOR_BGR2GRAY)

        # imgData = cv2.imread(srcPath, cv2.IMREAD_GRAYSCALE)
        # imgBgr = cv2.cvtColor(imgData, cv2.COLOR_GRAY2BGR)
    except Exception as imageError:
        print(imageError, 'Could not read the image file, skip...')

    # cv2.namedWindow("imgData", cv2.WINDOW_AUTOSIZE)
    # cv2.imshow("imgData", imgData)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()
    # exit(0)

    imgEliVer = eli_ver(imgData.copy())  #
    # cv2.namedWindow("imgEliVer", cv2.WINDOW_AUTOSIZE)
    # cv2.imshow("imgEliVer", imgEliVer)

    coor1 = divide_ver(imgEliVer)  #
    # print('coor1: {}'.format(coor1))

    if heightOfLine == 0:  #
        coor2 = divide_hor(imgEliVer, coor1)
        heightOfLines = [c[3] - c[1] for c in coor2]
        heightOfLine = median(heightOfLines)

    imgInv = imu.preprocess_bw_inv(imgData.copy())
    cv2.imwrite('imgInv.jpg', imgInv)
    coorCC = get_no_intersect_boxes('imgInv.jpg')
    imgEliCC = eli_large_cc(coorCC, imgData.copy(), heightOfLine)
    os.remove('imgInv.jpg')
    # cv2.namedWindow("imgEliCC", cv2.WINDOW_AUTOSIZE)
    # cv2.imshow("imgEliCC", imgEliCC)

    imgEli = eli_ver(imgEliCC)  #1
    # cv2.namedWindow("imgEli", cv2.WINDOW_AUTOSIZE)
    # cv2.imshow("imgEli", imgEli)

    coor2 = divide_hor(imgEli, coor1)  #2
    # print('coor2: {}'.format(coor2))
    # img2 = mark_box(imgBgr, coor2, color=(0,255,0))
    # cv2.namedWindow("img2", cv2.WINDOW_AUTOSIZE)
    # cv2.imshow("img2", img2)

    coor3 = find_char(imgEli, coor2)  #3
    # print(np.array(coor3))
    # for l in coor3:
    #     img3 = mark_box(imgBgr, l, color=(0,255,0))
    # cv2.namedWindow("img3", cv2.WINDOW_AUTOSIZE)
    # cv2.imshow("img3", img3)

    if charsOnly:
        return coor3

    # print('SVM classifying...')
    coor4 = svm_classify(imgEli, coor3)  #4
    # print(*coor4, sep='\n')

    imgBw = imu.preprocess_bw(imgEli, boxSize=(4, 4), morph=False)  #re
    coor3Bw = find_char(imgBw, coor2)
    coor4Bw = svm_classify(imgBw, coor3Bw)
    coor5 = update_result(coor4, coor4Bw)

    try:
        # imgBd = preprocess_bw(imgEliCC)
        # print('BaiDu recognizing...')
        bdResult = bd_rec(imgEliCC,
                          client,
                          imgForm=srcPath[-4:],
                          api='general')
    # except ConnectionResetError as e:
    #     print('Connection reset by peer, repeat BaiDu recognizion...')
    #     bdResult = bd_rec(imgEliCC, client, api='accurate')
    except Exception as e:
        print(
            'Baidu recognition error, check your internet connection. exit...')
        exit(0)
    # print(bdResult)
    addResult = additional_questions(bdResult)
    # print(addResult)
    if addResult:
        coor5.append(([addResult], 2))
    # print(*coor4, sep='\n')

    if stanPath:
        stanResult = output_stan(coor5, os.path.basename(srcPath))
        mistakes = verification(stanResult, stanPath)

    # print(mistakes)

    # if saveImage:
    # for l in coor5:   #5
    #     # print(l)
    #     color = (255,255,0)
    #     if l[1] != 0:
    #         color = (255,0,0) if l[1] == 2 else (0,255,0)
    #     img5 = mark_box(imgBgr, l[0], color)
    # cv2.imwrite(desPath, img5)    #6

    # show_image(img5)
    # exit(0)
    # cv2.namedWindow("img5", cv2.WINDOW_AUTOSIZE)
    # cv2.imshow("img5", img5)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()

    # imgEliVer2 = detect_vertical_line2(imgData)
    # coor2 = divide_hor(imgEliVer2, coor1)
    sa = detect_self_assessment(bdResult)
    if sa:
        # imgEliSa = imgData.copy()
        imgData[sa:, coor1[1][0]:coor1[1][1] + 10] = 255
        # coor22 = divide_hor(imgEliSa, coor1)

    coor22 = divide_hor(imgData, coor1)
    region = integrate_lines(coor5, coor22, imgData.shape[1], imgData.shape[0])
    # print('region: {}'.format(region))
    # imgEliVerBgr = cv2.cvtColor(imgEliVer2, cv2.COLOR_GRAY2BGR)
    # imgRegion = mark_box(imgBgr, region, color=(0,255,0))
    # cv2.imwrite(desPath, imgRegion)

    # cv2.imwrite('imgInv.jpg', get_dilation(imgData.copy()))
    # coorCCDil = get_no_intersect_boxes('imgInv.jpg')
    # os.remove('imgInv.jpg')
    '''
    CCAndType = []
    wordBoxes = [w["location"] for w in bdResult["words_result"]]
    idxWords = index.Index()
    for j,b in enumerate(wordBoxes):
        idxWords.insert(j, (b["left"], b["top"], b["left"]+b["width"], b["top"]+b["height"]))

    idxRegion = index.Index()
    for j,r in enumerate(region):
        idxRegion.insert(j, r)

    idxCC = index.Index()
    for j,c in enumerate(coorCC):
        idxCC.insert(j, (c[0], c[1], c[0]+c[2], c[1]+c[3]))

    boxOfSpecialWord = get_match_location(bdResult, keyWordsConfig)
    hitsOfCC = []
    for j,s in enumerate(boxOfSpecialWord):
        b = (s[0], s[1], s[0]+s[2], s[1]+s[3])
        hitsOfRegion = list(idxRegion.intersection(b, objects=True))
        items = [item for item in hitsOfRegion if item.bbox[1] < s[1]+s[3]/2 < item.bbox[3]]
        for item in items:
            boxOfGraph = (item.bbox[0], b[3]+10, item.bbox[2]-item.bbox[0], item.bbox[3]-b[3]-20)
            boxOfGraph1 = (item.bbox[0], b[3]+10, item.bbox[2], item.bbox[3]-10)
            coorCC.append(boxOfGraph)
            hitsOfCC += list(idxCC.intersection(boxOfGraph1, objects=False))
    # print(hitsOfCC)
    coorCCCopy = coorCC.copy()
    coorCC = [coorCCCopy[i] for i in range(len(coorCCCopy)) if i not in hitsOfCC]

    boxRest = []
    for i,c in enumerate(coorCC.copy()):
        r = (c[0], c[1], c[0]+c[2], c[1]+c[3])
        hitsOfRegion = list(idxRegion.intersection(r))
        if len(hitsOfRegion) == 0:
            continue

        if c[2] > heightOfLine*3 or c[3] > heightOfLine*1.5:
            CCAndType.append((c, 1))
            # del(coorCC[i])
            continue
        elif c[2] < heightOfLine*0.5 and c[3] < heightOfLine*0.5:
            CCAndType.append((c, 0))
            continue
        
        hits = list(idxWords.intersection(r, objects=True))
        if hits:
            flag = False
            for item in hits:
                wordBox = (wordBoxes[item.id]["left"], wordBoxes[item.id]["top"], wordBoxes[item.id]["width"], wordBoxes[item.id]["height"])
                if is_within(c, wordBox):
                    CCAndType.append((c, 0))
                    flag = True
                    # del(coorCC[i])
                    continue
            if flag:
                continue
        boxRest.append(c)

    typesOfCC = tell_me_text_or_graph(imgBgr.copy(), imgData.copy(), boxRest)
    CCAndType += zip(boxRest, typesOfCC)
    resultFinal = get_items(region, imgData.copy(), bdResult, CCAndType, name)
    '''
    imgData1 = detect_vertical_line3(imgData.copy())
    resultFinal = get_items_v02(region, imgData1.copy(), coorCC, name)
    # coorRegion = []
    # coorGraph = []
    # coorText = []
    # resultFinalA = absolute_coor(resultFinal)
    # for x in resultFinalA["questions"]:
    #     coorRegion.append(dic2coor(x["location"]))
    #     coorGraph.extend([dic2coor(i["location"]) for i in x["content"] if i["type"]=="graph"])
    #     coorText.extend([dic2coor(i["location"]) for i in x["content"] if i["type"]=="text"])

    # imgRegion = mark_box(imgBgr, coorRegion, color=(0,255,0))
    # imgGraph = mark_box(imgRegion, coorGraph, color=(0,0,255))
    # imgText = mark_box(imgGraph, coorText, color=(255,255,0))

    if desPath:
        with open(desPath[:-4] + '.json', 'w') as f:
            json.dump(resultFinal, f)
        # if saveImage:
        #     cv2.imwrite(desPath, imgText)
    # cv2.namedWindow("imgData", cv2.WINDOW_AUTOSIZE)
    # cv2.imshow("imgData", imgData)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()

    #return coor5, mistakes
    return resultFinal
Esempio n. 4
0
def get_items_from_region(coorRegion, imageRegion, coorCCRegion, itemDir,
                          suffix, idGraph):
    """

    Args:
        coorRegion: The coordinate of region.
        imageRegion: The image of region.
        coorCCRegion: The coordinate of CC in region.
        suffix: Image form suffix.
        idGraph: The id of graph in paper.

    Returns:
        A dic of question result.

    """
    xiaoti = {}
    bdResult = {}
    bdResult = bd_rec(imageRegion, client, imgForm=suffix, api='accurate')
    title = ''
    if "words_result" in bdResult and len(
            bdResult["words_result"]) > 0:  #结果返回错误处理
        title = bdResult["words_result"][0]["words"]
    # i = 0
    # if "words_result" in bdResult:
    #     while i < len(bdResult["words_result"]) and \
    #         not title.endswith(('。', '?', ')')):
    #         title += bdResult["words_result"][i]["words"]
    #         i += 1

    xiaoti["title"] = title
    xiaoti["location"] = coor2dic(coorRegion)
    xiaoti["content"] = []

    if is_special(title):
        locationOfTitle = bdResult["words_result"][0]["location"]
        boxOfGraph = (10, dic2coor(locationOfTitle)[3] + 10,
                      coorRegion[2] - coorRegion[0] - 10,
                      coorRegion[3] - coorRegion[1] - 10)
        itemText = {}
        itemText["id"] = 0
        itemText["location"] = locationOfTitle
        itemText["type"] = "text"
        itemText["value"] = title
        xiaoti["content"].append(itemText)

        itemGraph = {}
        itemPath = os.path.join(itemDir, str(idGraph) + '.png')
        cv2.imwrite(
            itemPath, imageRegion[boxOfGraph[1]:boxOfGraph[3],
                                  boxOfGraph[0]:boxOfGraph[2]])
        itemGraph["id"] = 1
        itemGraph["location"] = coor2dic(boxOfGraph)
        itemGraph["type"] = "graph"
        itemGraph["value"] = itemPath
        xiaoti["content"].append(itemGraph)
        return xiaoti

    wordBoxes = []
    if "words_result" in bdResult:
        for j, w in enumerate(bdResult["words_result"]):
            wordBoxes.append((j, dic2length(w["location"])))

    graphBoxes = []

    for j, b in wordBoxes:
        # print(j,b)
        b = expand_box(b)
        coorCCRegion = [c for c in coorCCRegion if not is_within(c, b)]

    n = 0
    for j, c in enumerate(coorCCRegion):
        # if c[2] > heightOfLine*3 or c[3] > heightOfLine*1.5:
        if c[2] > heightOfLine * 0.5 or c[3] > heightOfLine * 0.5:
            graphBoxes.append((n, c))
            n += 1
            c = expand_box(c)
            wordBoxes = [it for it in wordBoxes if not is_within(it[1], c)]

    mixBoxes = []
    for j, w in enumerate(wordBoxes.copy()):
        if w[1][2] > len(
                bdResult["words_result"][w[0]]["words"]) * heightOfLine * 1.5:
            del (wordBoxes[j - len(mixBoxes)])
            mixBoxes.append((n, w[1]))
            n += 1

    # Fuses image
    graphBoxes += mixBoxes
    idxWords = index.Index()
    for j, b in enumerate(wordBoxes):
        if b[1][2] < heightOfLine * 5:
            idxWords.insert(b[0], length2coor(b[1]))
    wordBoxes = [b for b in wordBoxes if b[1][2] >= heightOfLine * 5]

    count = 0
    idDeleted = []
    for j, b in graphBoxes.copy():
        bEx = expand_box(b)
        items = list(idxWords.intersection(length2coor(bEx), objects=True))
        if len(items) > 0:
            boxes = [num2int(i.bbox) for i in items] + [length2coor(b)]
            box = fuse_box(boxes)
            for i, it in enumerate(items):
                # del(wordBoxes[it.id-i-len(idDeleted)])
                idxWords.delete(it.id, it.bbox)
                idDeleted.append(it.id)
            del (graphBoxes[j - count])
            count += 1
            graphBoxes.append((n, box))
            n += 1
    if len(idxWords.leaves()[0][1]) > 0:
        wordItems = list(
            idxWords.intersection(idxWords.get_bounds(), objects=True))
        wordBoxes += [(i.id, coor2length(num2int(i.bbox))) for i in wordItems]

    idxGraphs = index.Index()
    for j, b in enumerate(graphBoxes):
        idxGraphs.insert(j, length2coor(b[1]))

    count = 0
    for j, b in enumerate(graphBoxes.copy()):
        bEx = expand_box(b[1])
        items = list(idxGraphs.intersection(length2coor(bEx), objects=True))
        if len(items) > 0:
            boxes = [num2int(i.bbox) for i in items] + [length2coor(b[1])]
            box = fuse_box(boxes)
            for i, it in enumerate(items):
                # if it.id-i not in range(0, len(graphBoxes)):
                #     print(it.id-i)
                #     print(graphBoxes)
                #     continue
                idxGraphs.delete(it.id, it.bbox)
            idxGraphs.insert(len(graphBoxes) + count, length2coor(box))
            count += 1
            # graphBoxes.append((n, box))
            # n += 1
    # print(idxGraphs.get_bounds())
    # print(idxGraphs.leaves())
    # exit(0)
    if len(graphBoxes) > 0:
        graphItems = list(
            idxGraphs.intersection(idxGraphs.get_bounds(), objects=True))
        graphBoxes = [(i.id, coor2length(num2int(i.bbox))) for i in graphItems]

    for j, b in graphBoxes:
        bEx = expand_box(b)
        wordBoxes = [
            it for it in wordBoxes
            if not is_within(it[1], bEx) and it[1][3] < it[1][2] * 1.5
        ]

    items = [(it, 0) for it in wordBoxes] + [(it, 1) for it in graphBoxes]
    idxItems = index.Index()
    for i, it in enumerate(items):
        idxItems.insert(it[0][0], length2coor(it[0][1]), obj=it[1])

    count = 0
    # flag = False
    while True:
        tempItems = []
        for it in items:
            itemInter = list(
                idxItems.intersection(length2coor(it[0][1]), objects=True))
            if len(itemInter) > 1:
                boxes = [num2int(i.bbox) for i in itemInter]
                box = fuse_box(boxes)
                for it1 in itemInter:
                    idxItems.delete(it1.id, it1.bbox)
                idxItems.insert(len(items) + count, length2coor(box), obj=1)
                count += 1
                itemExs = list(
                    idxItems.intersection(idxItems.get_bounds(), objects=True))
                tempItems = [((i.id, coor2length(num2int(i.bbox))), i.object)
                             for i in itemExs]
                break
        if len(tempItems) > 0:
            items = tempItems.copy()
        else:
            itemExs = list(
                idxItems.intersection(idxItems.get_bounds(), objects=True))
            items = [((i.id, coor2length(num2int(i.bbox))), i.object)
                     for i in itemExs]
            break

    # Sort
    items.sort(key=take_key)

    g = 0
    for j, it in enumerate(items):
        c = length2coor(it[0][1])
        item = {}
        item["id"] = j
        item["location"] = coor2dic(c)
        if it[1] == 0:
            item["type"] = "text"
            words = bdResult["words_result"][it[0][0]]["words"]
            item["value"] = words
        elif it[1] == 1:
            item["type"] = "graph"
            itemPath = os.path.join(itemDir, str(idGraph + g) + '.png')
            cv2.imwrite(itemPath, imageRegion[c[1]:c[3], c[0]:c[2]])
            item["value"] = itemPath
            g += 1
        xiaoti["content"].append(item)
    return xiaoti
Esempio n. 5
0
def deal_one_page(srcPath, desPath='', charsOnly=False, rectification=False):
    """Process a paper image.

    Args:
        srcPath: File path of image.
        desPath: Destination path.
        charsOnly: Whether only return chars coordinate result.
        rectification: Whether rectify image.

    Returns:
        Final result.
    """
    global heightOfLine
    coor5 = []
    mistakes = 0
    bdResult = {}
    ratio = 0

    name = os.path.basename(srcPath)
    if not os.path.exists(srcPath):
        print("Image path not exists!")
        return None
    try:
        imgBgr = cv2.imread(srcPath)
        if rectification:
            imgBgr = rectify(imgBgr.copy())
        imgData = cv2.cvtColor(imgBgr, cv2.COLOR_BGR2GRAY)

    except Exception as imageError:
        print(imageError, 'Could not read the image file, skip...')

    # 根据图像长宽判断是否为单栏
    ori_size = imgData.shape[0:2]
    single_column = True if ori_size[0] > ori_size[1] else False

    # 缩放到固定大小(双栏宽度为4000pixels, 单栏为2000pixels)
    if single_column and ori_size[1] != 2000:
        ratio = 2000 / ori_size[1]
    elif not single_column and ori_size[1] != 4000:
        ratio = 4000 / ori_size[1]

    if ratio:
        imgData = resize(imgData, x=ratio)
    new_size = imgData.shape[0:2]

    #原始图像去除竖线(长度大于100pixels,可能去除部分题目信息)
    imgEliVer = eli_ver(imgData.copy())  #
    # cv2.namedWindow("imgEliVer", cv2.WINDOW_AUTOSIZE)
    # cv2.imshow("imgEliVer", imgEliVer)

    #纵向划分试题区域
    coor1 = divide_ver(imgEliVer)
    # print('coor1: {}'.format(coor1))

    #计算行高(所有行高的中位数)
    if heightOfLine == 0:  #
        coor2 = divide_hor(imgEliVer, coor1)
        heightOfLines = [c[3] - c[1] for c in coor2]
        heightOfLine = median(heightOfLines)

    #获取原始图像联通区域以及消去较大的联通区域
    imgInv = imu.preprocess_bw_inv(imgData.copy())
    cv2.imwrite('imgInv.jpg', imgInv)
    coorCC = get_no_intersect_boxes('imgInv.jpg')
    imgEliCC = eli_large_cc(coorCC, imgData.copy(), heightOfLine)
    os.remove('imgInv.jpg')
    # cv2.namedWindow("imgEliCC", cv2.WINDOW_AUTOSIZE)
    # cv2.imshow("imgEliCC", imgEliCC)

    #去除较大联通区域的图像上去除竖线
    imgEli = eli_ver(imgEliCC)  #1
    # cv2.namedWindow("imgEli", cv2.WINDOW_AUTOSIZE)
    # cv2.imshow("imgEli", imgEli)

    #行划分
    coor2 = divide_hor(imgEli, coor1)  #2
    # print('coor2: {}'.format(coor2))
    # img2 = mark_box(imgBgr, coor2, color=(0,255,0))
    # cv2.namedWindow("img2", cv2.WINDOW_AUTOSIZE)
    # cv2.imshow("img2", img2)

    #获取每行前三个字符
    coor3 = find_char(imgEli, coor2)  #3
    # print(np.array(coor3))
    # for l in coor3:
    #     l = [length2coor(list(map(lambda x: x / ratio, coor2length(c)))) for c in l]
    #     img3 = mark_box(imgBgr, l, color=(0,255,0))
    # cv2.namedWindow("img3", cv2.WINDOW_AUTOSIZE)
    # cv2.imshow("img3", img3)
    # cv2.imwrite("img3.jpg", img3)

    if charsOnly:
        coor_chars = []
        #坐标换算
        for l in coor3:
            coor_chars.append([
                length2coor(list(map(lambda x: int(x / ratio),
                                     coor2length(c)))) for c in l
            ])
        return coor_chars

    #SVM分类字符图像
    # print('SVM classifying...')
    coor4 = svm_classify(imgEli, coor3)  #4
    # print(*coor4, sep='\n')

    #在二值图上重新获取字符以及SVM分类(补全部分漏检)
    imgBw = imu.preprocess_bw(imgEli, boxSize=(4, 4), morph=False)  #re
    coor3Bw = find_char(imgBw, coor2)
    coor4Bw = svm_classify(imgBw, coor3Bw)
    coor5 = update_result(coor4, coor4Bw)

    #百度识别
    try:
        # print('BaiDu recognizing...')
        bdResult = bd_rec(imgEliCC,
                          client,
                          imgForm=srcPath[-4:],
                          api='general')
    except Exception as e:
        print(
            'Baidu recognition error, check your internet connection. exit...')
        exit(0)
    # print(bdResult)

    #附加题处理
    addResult = additional_questions(bdResult)
    # print(addResult)
    if addResult:
        coor5.append(([addResult], 2))
    # print(*coor4, sep='\n')

    #自我评测处理
    sa = detect_self_assessment(bdResult)
    if sa:
        imgData[sa:, coor1[1][0]:coor1[1][1] + 10] = 255
    coor22 = divide_hor(imgData, coor1)
    # print(coor22)

    #生成题目区域
    region = integrate_lines(coor5, coor22, new_size)
    # print('region: {}'.format(region))

    #去除装订线及中间分割线(不会去除题目信息)
    imgData1 = detect_vertical_line3(imgData.copy())

    #导出最后结果
    resultFinal = get_items_v02(region, imgData1.copy(), coorCC, name)
    if desPath:
        with open(desPath[:-4] + '.json', 'w') as f:
            json.dump(resultFinal, f)

    # cv2.namedWindow("imgData", cv2.WINDOW_AUTOSIZE)
    # cv2.imshow("imgData", imgData)
    # cv2.waitKey(0)
    # cv2.destroyAllWindows()

    return resultFinal
Esempio n. 6
0
def testpaper(file_path, save_path):
    """Process a paper image.

    Args:
        file_path: File path of image.
        save_path: Destination path.

    Returns:
        Final result.
    """
    name = os.path.basename(file_path)
    image = cv2.imread(file_path)
    if image is None:
        print('ImageError: Could not read the image file, exit...')
        return None

    image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    # 根据图像长宽判断是否为单栏
    ori_size = image_gray.shape[0:2]
    single_column = True if ori_size[0] > ori_size[1] else False

    # 缩放到固定大小(双栏宽度为4000pixels, 单栏为2000pixels)
    if single_column and ori_size[1] != 2000:
        ratio = 2000 / ori_size[1]
    elif not single_column and ori_size[1] != 4000:
        ratio = 4000 / ori_size[1]
    else:
        ratio = 0

    if ratio:
        image_gray_scale = resize(image_gray, x=ratio)
    new_size = image_gray_scale.shape[0:2]

    img_bw_inv = imu.preprocess_bw_inv(image_gray_scale)
    img_bw = imu.preprocess_bw(image_gray_scale)
    img_bw2 = imu.preprocess_bw(image_gray_scale, boxSize=(5, 5))

    #原始图像去除竖线(长度大于100pixels,可能去除部分题目信息)
    imgEliVer = eli_ver(img_bw2)

    #纵向划分试题区域
    coor1 = divide_ver(imgEliVer)
    if not coor1:
        print('coor1: {}'.format(coor1))
        print("Can't divide vertically, skip...")
        return None

    #计算行高(所有行高的中位数)
    coor2 = divide_hor(imgEliVer, coor1)
    if coor2:
        heightOfLines = [c[3] - c[1] for c in coor2]
        heightOfLine = median(heightOfLines)
    else:
        print("Can't divide by line, skip...")
        return None

    #获取原始图像联通区域以及消去较大的联通区域
    imgInv = imu.preprocess_bw_inv(img_bw2.copy())
    cv2.imwrite('imgInv.jpg', imgInv)
    coorCC = get_no_intersect_boxes('imgInv.jpg')
    imgEliCC = eli_large_cc(coorCC, img_bw2.copy(), heightOfLine)
    os.remove('imgInv.jpg')

    #去除较大联通区域的图像上去除竖线
    imgEli = eli_ver(imgEliCC)

    #行划分
    coor2 = divide_hor(imgEli, coor1)

    #获取每行前三个字符
    coor3 = find_char(img_bw, coor2)

    #SVM分类字符图像
    coor4 = svm_classify(img_bw, coor3)

    #在二值图上重新获取字符以及SVM分类(补全部分漏检)
    imgBw = imu.preprocess_bw(imgEli, boxSize=(4, 4), morph=False)
    coor3Bw = find_char(imgBw, coor2)
    coor4Bw = svm_classify(imgBw, coor3Bw)
    coor5 = update_result(coor4, coor4Bw)

    # 百度文字识别API客户端
    client = bd_access()
    try:
        bdResult = bd_rec(imgEliCC, client, imgForm=name[-4:], api='general')
    except Exception as e:
        print(e, 'Baidu recognition error, exit...', sep='\n')
        return None

    # 附加题处理
    addResult = additional_questions(bdResult)
    if addResult:
        coor5.append(([addResult], 2))

    #自我评测处理
    sa = detect_self_assessment(bdResult)
    if sa:
        image_gray_scale[sa:, coor1[1][0]:coor1[1][1] + 10] = 255
    coor22 = divide_hor(image_gray_scale, coor1)

    # 生成题目区域
    region = integrate_lines(coor5, coor22, new_size)
    region = [
        length2coor(list(map(lambda x: int(x / ratio), coor2length(r))))
        for r in region
    ]

    #背景标记
    region_close = []
    for r in region:
        if r[2] <= r[0] or r[3] <= r[1]: continue
        border = imu.strip_white_boder2(image_gray[r[1]:r[3], r[0]:r[2]])
        r = num2int([
            border[0] + r[0], border[1] + r[1], border[2] + r[0],
            border[3] + r[1]
        ])
        region_close.append(r)
        background = np.zeros((r[3] - r[1], r[2] - r[0], 3), np.uint8)
        background[:] = (200, 170, 150)
        image[r[1]:r[3],
              r[0]:r[2]] = cv2.addWeighted(image[r[1]:r[3], r[0]:r[2]], 0.7,
                                           background, 0.3, 0)

    # image_process = np.concatenate([image_gray_scale, img_bw,
    #                 imgEliVer, imgEliCC, imgEli], axis=0)
    # cv2.imwrite("image_process.jpg", image_process)
    dic_result = {"positions": region_close}
    print(f'region: {region_close}')
    print('output to directory {}'.format(save_path))
    with open(save_path + '/' + name.replace('.jpg', '.json'), 'w') as f:
        json.dump(dic_result, f)
    cv2.imwrite(os.path.join(save_path, name), image)
    return region

    #去除装订线及中间分割线(不会去除题目信息)
    imgData1 = detect_vertical_line3(imgData.copy())

    #导出最后结果
    resultFinal = get_items_v02(region, imgData1.copy(), coorCC, name)
    if desPath:
        with open(desPath[:-4] + '.json', 'w') as f:
            json.dump(resultFinal, f)

    return resultFinal