Ejemplo n.º 1
0
def view_detect_result_json(reslut_file_root,
                            img_file_root,
                            classifier=None,
                            show=True):
    result_files = glob(pjoin(reslut_file_root, '*.json'))
    result_files = sorted(result_files,
                          key=lambda x: int(x.split('\\')[-1].split('_')[0]))
    print('Loading %d detection results' % len(result_files))
    for reslut_file in result_files:
        start_index = 0
        end_index = 100000
        index = reslut_file.split('\\')[-1].split('_')[0]

        if int(index) < start_index:
            continue
        if int(index) > end_index:
            break

        org = cv2.imread(pjoin(img_file_root, index + '.jpg'))
        print(index)
        compos = json.load(open(reslut_file, 'r'))['compos']
        bboxes = []
        for compo in compos:
            bboxes.append([(compo['column_min'], compo['row_min']),
                           (compo['column_max'], compo['row_max'])])

        if classifier is not None:
            classes = classifier.predict(seg.clipping(org, bboxes))
        else:
            classes = np.full(len(bboxes), 'ImageView')

        if show:
            draw_bounding_box(org, bboxes, classes, show=True)
Ejemplo n.º 2
0
def processing_block(org, binary, blocks_corner, classifier):
    '''
    :param org: original image
    :param binary: binary map of original image
    :param blocks_corner: list of corners of blocks
                        [(top_left, bottom_right)]
                        -> top_left: (column_min, row_min)
                        -> bottom_right: (column_max, row_max)
    :param classifier: cnn model
    :return: boundaries of detected components in blocks;
                        [up, bottom, left, right]
                        -> up, bottom: list of [(column_index, min/max row border)]
                        -> left, right: list of [(row_index, min/max column border)]
             corners of detected components in blocks;
             corresponding classes of components;
    '''
    blocks_clip_org = seg.clipping(org, blocks_corner, shrink=3)
    blocks_clip_bin = seg.clipping(binary, blocks_corner, shrink=3)

    all_compos_boundary = []
    all_compos_corner = []
    all_compos_class = []
    for i in range(len(blocks_corner)):
        # *** Substep 1.1 *** pre-processing: get block information -> binarization
        block_corner = blocks_corner[i]
        if blk.block_is_top_or_bottom_bar(blocks_corner[i], org.shape): continue
        block_clip_org = blocks_clip_org[i]
        block_clip_bin = blocks_clip_bin[i]

        # *** Substep 1.2 *** object extraction: extract components boundary -> get bounding box corner
        compos_boundary = det.boundary_detection(block_clip_bin)
        compos_corner = det.get_corner(compos_boundary)

        # *** Substep 1.3 *** classification: clip components -> classify components
        compos_clip = seg.clipping(block_clip_org, compos_corner)
        compos_class = classifier.predict(compos_clip)

        # *** Substep 1.4 *** refining: merge overlapping components -> convert the corners to holistic value in entire image
        compos_corner, compos_class = det.merge_corner(compos_corner, compos_class)
        compos_corner = util.corner_cvt_relative_position(compos_corner, block_corner[0][0], block_corner[0][1])

        if len(compos_boundary) > 0:
            all_compos_boundary += compos_boundary
            all_compos_corner += compos_corner
            all_compos_class += compos_class
    return all_compos_boundary, all_compos_corner, all_compos_class
Ejemplo n.º 3
0
def processing(org, binary, classifier, inspect_img=False):
    # *** Substep 2.1 *** object detection: get connected areas -> get boundary -> get corners
    compos_boundary = det.boundary_detection(binary)
    compos_corner = det.get_corner(compos_boundary)

    # *** Substep 2.2 *** classification: clip components -> classify components
    compos_clip = seg.clipping(org, compos_corner)
    compos_class = classifier.predict(compos_clip)

    # *** Substep 2.3 *** refining: merge overlapping components -> search components on background image
    compos_corner, compos_class = det.merge_corner(compos_corner, compos_class)
    if inspect_img:
        compos_corner, compos_class = det.compo_on_img(processing, org, binary, classifier, compos_corner, compos_class)
    return compos_boundary, compos_corner, compos_class
Ejemplo n.º 4
0
def compo_detection(input_img_path, output_root, uied_params=None,
                    resize_by_height=600, block_pad=4,
                    classifier=None, show=False):

    if uied_params is None:
        uied_params = {'param-grad':5, 'param-block':5, 'param-minarea':150}
    else:
        uied_params = json.loads(uied_params)
        print(uied_params)
    start = time.clock()
    name = input_img_path.split('/')[-1][:-4]
    ip_root = file.build_directory(pjoin(output_root, "ip"))

    # *** Step 1 *** pre-processing: read img -> get binary map
    org, grey = pre.read_img(input_img_path, resize_by_height)
    binary = pre.binarization(org, grad_min=int(uied_params['param-grad']))

    # *** Step 2 *** element detection
    det.rm_line(binary, show=show)
    # det.rm_line_v_h(binary, show=show)
    uicompos = det.component_detection(binary)

    # *** Step 4 *** results refinement
    # uicompos = det.rm_top_or_bottom_corners(uicompos, org.shape)
    file.save_corners_json(pjoin(ip_root, name + '_all.json'), uicompos)
    # uicompos = det.merge_text(uicompos, org.shape)
    draw.draw_bounding_box(org, uicompos, show=show, name='no-merge')
    uicompos = det.merge_intersected_corner(uicompos, org)
    Compo.compos_update(uicompos, org.shape)
    Compo.compos_containment(uicompos)
    draw.draw_bounding_box(org, uicompos, show=show, name='no-nesting')

    # *** Step 5 ** nesting inspection
    uicompos += nesting_inspection(org, grey, uicompos)
    uicompos = det.compo_filter(uicompos, min_area=int(uied_params['param-minarea']))
    Compo.compos_update(uicompos, org.shape)
    draw.draw_bounding_box(org, uicompos, show=show, name='ip-nesting', write_path=pjoin(ip_root, 'result.jpg'))

    # *** Step 5 *** Image Inspection: recognize image -> remove noise in image -> binarize with larger threshold and reverse -> rectangular compo detection
    # if classifier is not None:
    #     classifier['Image'].predict(seg.clipping(org, uicompos), uicompos)
    #     draw.draw_bounding_box_class(org, uicompos, show=show)
    #     uicompos = det.rm_noise_in_large_img(uicompos, org)
    #     draw.draw_bounding_box_class(org, uicompos, show=show)
    #     det.detect_compos_in_img(uicompos, binary_org, org)
    #     draw.draw_bounding_box(org, uicompos, show=show)
    # if classifier is not None:
    #     classifier['Noise'].predict(seg.clipping(org, uicompos), uicompos)
    #     draw.draw_bounding_box_class(org, uicompos, show=show)
    #     uicompos = det.rm_noise_compos(uicompos)

    # *** Step 6 *** element classification: all category classification
    if classifier is not None:
        classifier['Elements'].predict(seg.clipping(org, uicompos), uicompos)
        draw.draw_bounding_box_class(org, uicompos, show=show, name='cls', write_path=pjoin(ip_root, 'result.jpg'))

    Compo.compos_update(uicompos, org.shape)
    draw.draw_bounding_box(org, uicompos, show=show, name='final', write_path=pjoin(output_root, 'result.jpg'))
    file.save_corners_json(pjoin(ip_root, name + '.json'), uicompos)
    file.save_corners_json(pjoin(output_root, 'compo.json'), uicompos)
    seg.dissemble_clip_img_fill(pjoin(output_root, 'clips'), org, uicompos)

    print("[Compo Detection Completed in %.3f s] %s" % (time.clock() - start, input_img_path))
    if show:
        cv2.destroyAllWindows()
Ejemplo n.º 5
0
def compo_detection(input_img_path,
                    output_root,
                    num=0,
                    resize_by_height=600,
                    block_pad=4,
                    classifier=None,
                    show=False,
                    write_img=True):
    start = time.clock()
    name = input_img_path.split('\\')[-1][:-4]
    ip_root = file.build_directory(pjoin(output_root, "ip"))

    # *** Step 1 *** pre-processing: read img -> get binary map
    org, grey = pre.read_img(input_img_path, resize_by_height)
    binary = pre.binarization(
        org,
        show=show,
        write_path=pjoin(ip_root, name + '_binary.png') if write_img else None)
    binary_org = binary.copy()

    # *** Step 2 *** block processing: detect block -> calculate hierarchy -> detect components in block
    blocks = blk.block_division(
        grey,
        org,
        show=show,
        write_path=pjoin(ip_root, name + '_block.png') if write_img else None)
    blk.block_hierarchy(blocks)
    uicompos_in_blk = processing_block(org, binary, blocks, block_pad)

    # *** Step 3 *** non-block part processing: remove lines -> erase blocks from binary -> detect left components
    det.rm_line(binary, show=show)
    blk.block_bin_erase_all_blk(binary, blocks, block_pad)
    uicompos_not_in_blk = det.component_detection(binary)
    uicompos = uicompos_in_blk + uicompos_not_in_blk

    # *** Step 4 *** results refinement: remove top and bottom compos -> merge words into line
    uicompos = det.rm_top_or_bottom_corners(uicompos, org.shape)
    file.save_corners_json(pjoin(ip_root, name + '_all.json'), uicompos)
    uicompos = det.merge_text(uicompos, org.shape)
    draw.draw_bounding_box(org, uicompos, show=show)
    # uicompos = det.merge_intersected_corner(uicompos, org.shape)
    Compo.compos_containment(uicompos)
    # draw.draw_bounding_box(org, uicompos, show=show, write_path=pjoin(ip_root, name + '_ip.png') if write_img else None)

    # # *** Step 5 *** Image Inspection: recognize image -> remove noise in image -> binarize with larger threshold and reverse -> rectangular compo detection
    # if classifier is not None:
    #     classifier['Image'].predict(seg.clipping(org, uicompos), uicompos)
    #     draw.draw_bounding_box_class(org, uicompos, show=show)
    #     uicompos = det.rm_noise_in_large_img(uicompos, org)
    #     draw.draw_bounding_box_class(org, uicompos, show=show)
    #     det.detect_compos_in_img(uicompos, binary_org, org)
    #     draw.draw_bounding_box(org, uicompos, show=show)

    # if classifier is not None:
    #     classifier['Noise'].predict(seg.clipping(org, uicompos), uicompos)
    #     draw.draw_bounding_box_class(org, uicompos, show=show)
    #     uicompos = det.rm_noise_compos(uicompos)

    # *** Step 6 *** element classification: all category classification
    if classifier is not None:
        classifier['Elements'].predict(seg.clipping(org, uicompos), uicompos)
        draw.draw_bounding_box_class(org,
                                     uicompos,
                                     show=show,
                                     write_path=pjoin(ip_root,
                                                      name + '_cls.png'))

    # uicompos = det.compo_filter(uicompos, org)
    draw.draw_bounding_box(org, uicompos, show=show)
    file.save_corners_json(pjoin(ip_root, name + '.json'), uicompos)

    print("[Compo Detection Completed in %.3f s] %d %s" %
          (time.clock() - start, num, input_img_path))
    # Record run time
    open('time.txt', 'a').write(str(round(time.clock() - start, 3)) + '\n')
    if show:
        cv2.destroyAllWindows()