def main(): save = True show = False bad_num = 0 none_tree = 0 num = 0 start = 19558 # start point end = 100000 img_root = 'E:\\Mulong\\Datasets\\gui\\rico\\combined\\all\\' block_root = 'E:\\Mulong\\Datasets\\gui\\rico\\subtree\\rico-block\\json\\' tree_root = 'E:\\Mulong\\Datasets\\gui\\rico\\subtree\\rico-tree-filtered\\widget-layout\\' subtree_root = 'E:\\Mulong\\Datasets\\gui\\rico\\subtree\\rico-subtree\\widget-layout\\' for index in range(start, end): img_path = img_root + str(index) + '.jpg' block_path = block_root + str(index) + '.json' tree_path = tree_root + str(index) + '.json' subtree_path = subtree_root + str(index) + '.json' if not os.path.exists(block_path) or not os.path.exists(tree_path): continue start_time = time.clock() img, _ = pre.read_img(img_path, resize_height=2560) blocks = Block.load_blocks(block_path) resize_block(blocks, det_height=800, tgt_height=2560, bias=0) board_block = draw.draw_bounding_box(img, blocks, line=5) try: tree = Tree.load_tree(tree_path) except: bad_num += 1 print('*** F**k Json: %d %s ***' % (bad_num, tree_path)) continue if tree is not None: segments = segment_subtree(blocks, tree, img) else: none_tree += 1 print('*** Tree is None: %d %s ***' % (none_tree, tree_path)) continue if show: board_tree = np.full((2560, 1440, 3), 255, dtype=np.uint8) # used for draw new labels Tree.draw_tree(tree, board_tree, 0) cv2.imshow('block', cv2.resize(board_block, (300, 500))) cv2.imshow('tree', cv2.resize(board_tree, (300, 500))) cv2.waitKey() cv2.destroyAllWindows() Tree.view_segments(segments, img) if save: jfile = open(subtree_path, 'w') json.dump(segments, jfile, indent=4) print('[%.3fs]: %d %s' % (time.clock() - start_time, num, img_path)) num += 1
def check_subtree(block, tree, org, test=False): ''' check if the tree can be segmented by block relation: -1 : block in tree 0 : block, tree are not intersected 1 : tree in block 2 : block, tree are intersected 3 : block and tree are same ''' relation = block.relation(tree['bounds']) if test: print(relation, block.put_bbox(), tree['bounds']) img = org.copy() board_test_blk = draw.draw_bounding_box(img, [block], line=5) board_test_tree = cv2.resize(img, (1440, 2560)) Tree.draw_tree(tree, board_test_tree) cv2.imshow('tree-test', cv2.resize(board_test_tree, (300, 500))) cv2.imshow('blk-test', cv2.resize(board_test_blk, (300, 500))) cv2.waitKey() # block contains tree or block and tree are same if relation == 1 or relation == 3: return [tree] # non-intersected elif relation == 0: return None # intersected or tree contains block, search children else: if 'children' not in tree: return None subtrees = [] for child in tree['children']: subtree = check_subtree(block, child, org) if subtree is not None: if type(subtree) is list: subtrees += subtree if len(subtrees) > 0: return subtrees else: return None
def block_add_bkg(blocks, org, img_shape, show=False): board = np.full(img_shape[:2], 255, dtype=np.uint8) for block in blocks: block.block_fill_color(board, 0, flag='bbox') blocks_bkg = block_division(board, is_background=True) if show: ratio = 2 board_bkg = draw.draw_bounding_box(org, blocks_bkg, name='block_bkg') cv2.imshow( 'bkg_board', cv2.resize(board_bkg, (int( board_bkg.shape[1] / ratio), int(board_bkg.shape[0] / ratio)))) cv2.imshow( 'filled_board', cv2.resize( board, (int(board.shape[1] / ratio), int(board.shape[0] / ratio)))) cv2.waitKey() return blocks_bkg + blocks
def check_subtree(block, tree, test=False): ''' relation: -1 : a in b 0 : a, b are not intersected 1 : b in a 2 : a, b are identical or intersected ''' relation = block.relation(tree['bounds']) if test: print(relation, block.put_bbox(), tree['bounds']) board_test_blk = draw.draw_bounding_box(img, [block], line=5) board_test_tree = cv2.resize(img, (1440, 2560)) Tree.draw_tree(tree, board_test_tree, 0) cv2.imshow('tree-test', cv2.resize(board_test_tree, (300, 500))) cv2.imshow('blk-test', cv2.resize(board_test_blk, (300, 500))) cv2.waitKey() # block contains tree or block and tree are same if relation == 1 or relation == 3: return [tree] # non-intersected elif relation == 0: return None # else search children else: if 'children' not in tree: return None subtrees = [] for child in tree['children']: subtree = check_subtree(block, child) if subtree is not None: if type(subtree) is list: subtrees += subtree if len(subtrees) > 0: return subtrees else: return None
for index in range(start, end): img_path = img_root + str(index) + '.jpg' block_path = block_root + str(index) + '.json' tree_path = tree_root + str(index) + '.json' subtree_path = subtree_root + str(index) + '.json' if not os.path.exists(block_path) or not os.path.exists(tree_path): continue start_time = time.clock() img, _ = pre.read_img(img_path, resize_height=2560) block_img = cv2.imread(block_root + str(index) + '_blk.png') blocks = Block.load_blocks(block_path) resize_block(blocks, det_height=800, tgt_height=2560, bias=0) board_block = draw.draw_bounding_box(img, blocks, line=5) try: tree = Tree.load_tree(tree_path) except: bad_num += 1 print('*** F**k Json: %d %s ***' %(bad_num, tree_path)) continue segments = segment_subtree(blocks, tree) if show: board_tree = np.full((2560, 1440, 3), 255, dtype=np.uint8) # used for draw new labels Tree.draw_tree(tree, board_tree, 0) cv2.imshow('blk_img', cv2.resize(block_img, (300, 500))) cv2.imshow('block', cv2.resize(board_block, (300, 500))) cv2.imshow('tree', cv2.resize(board_tree, (300, 500)))
def compo_detection(input_img_path, output_root, uied_params=None, resize_by_height=600, block_pad=4, classifier=None, show=False): if uied_params is None: uied_params = {'param-grad':5, 'param-block':5, 'param-minarea':150} else: uied_params = json.loads(uied_params) print(uied_params) start = time.clock() name = input_img_path.split('/')[-1][:-4] ip_root = file.build_directory(pjoin(output_root, "ip")) # *** Step 1 *** pre-processing: read img -> get binary map org, grey = pre.read_img(input_img_path, resize_by_height) binary = pre.binarization(org, grad_min=int(uied_params['param-grad'])) # *** Step 2 *** element detection det.rm_line(binary, show=show) # det.rm_line_v_h(binary, show=show) uicompos = det.component_detection(binary) # *** Step 4 *** results refinement # uicompos = det.rm_top_or_bottom_corners(uicompos, org.shape) file.save_corners_json(pjoin(ip_root, name + '_all.json'), uicompos) # uicompos = det.merge_text(uicompos, org.shape) draw.draw_bounding_box(org, uicompos, show=show, name='no-merge') uicompos = det.merge_intersected_corner(uicompos, org) Compo.compos_update(uicompos, org.shape) Compo.compos_containment(uicompos) draw.draw_bounding_box(org, uicompos, show=show, name='no-nesting') # *** Step 5 ** nesting inspection uicompos += nesting_inspection(org, grey, uicompos) uicompos = det.compo_filter(uicompos, min_area=int(uied_params['param-minarea'])) Compo.compos_update(uicompos, org.shape) draw.draw_bounding_box(org, uicompos, show=show, name='ip-nesting', write_path=pjoin(ip_root, 'result.jpg')) # *** Step 5 *** Image Inspection: recognize image -> remove noise in image -> binarize with larger threshold and reverse -> rectangular compo detection # if classifier is not None: # classifier['Image'].predict(seg.clipping(org, uicompos), uicompos) # draw.draw_bounding_box_class(org, uicompos, show=show) # uicompos = det.rm_noise_in_large_img(uicompos, org) # draw.draw_bounding_box_class(org, uicompos, show=show) # det.detect_compos_in_img(uicompos, binary_org, org) # draw.draw_bounding_box(org, uicompos, show=show) # if classifier is not None: # classifier['Noise'].predict(seg.clipping(org, uicompos), uicompos) # draw.draw_bounding_box_class(org, uicompos, show=show) # uicompos = det.rm_noise_compos(uicompos) # *** Step 6 *** element classification: all category classification if classifier is not None: classifier['Elements'].predict(seg.clipping(org, uicompos), uicompos) draw.draw_bounding_box_class(org, uicompos, show=show, name='cls', write_path=pjoin(ip_root, 'result.jpg')) Compo.compos_update(uicompos, org.shape) draw.draw_bounding_box(org, uicompos, show=show, name='final', write_path=pjoin(output_root, 'result.jpg')) file.save_corners_json(pjoin(ip_root, name + '.json'), uicompos) file.save_corners_json(pjoin(output_root, 'compo.json'), uicompos) seg.dissemble_clip_img_fill(pjoin(output_root, 'clips'), org, uicompos) print("[Compo Detection Completed in %.3f s] %s" % (time.clock() - start, input_img_path)) if show: cv2.destroyAllWindows()
def compo_detection(input_img_path, output_root, num=0, resize_by_height=600, block_pad=4, classifier=None, show=False, write_img=True): start = time.clock() name = input_img_path.split('\\')[-1][:-4] ip_root = file.build_directory(pjoin(output_root, "ip")) # *** Step 1 *** pre-processing: read img -> get binary map org, grey = pre.read_img(input_img_path, resize_by_height) binary = pre.binarization( org, show=show, write_path=pjoin(ip_root, name + '_binary.png') if write_img else None) binary_org = binary.copy() # *** Step 2 *** block processing: detect block -> calculate hierarchy -> detect components in block blocks = blk.block_division( grey, org, show=show, write_path=pjoin(ip_root, name + '_block.png') if write_img else None) blk.block_hierarchy(blocks) uicompos_in_blk = processing_block(org, binary, blocks, block_pad) # *** Step 3 *** non-block part processing: remove lines -> erase blocks from binary -> detect left components det.rm_line(binary, show=show) blk.block_bin_erase_all_blk(binary, blocks, block_pad) uicompos_not_in_blk = det.component_detection(binary) uicompos = uicompos_in_blk + uicompos_not_in_blk # *** Step 4 *** results refinement: remove top and bottom compos -> merge words into line uicompos = det.rm_top_or_bottom_corners(uicompos, org.shape) file.save_corners_json(pjoin(ip_root, name + '_all.json'), uicompos) uicompos = det.merge_text(uicompos, org.shape) draw.draw_bounding_box(org, uicompos, show=show) # uicompos = det.merge_intersected_corner(uicompos, org.shape) Compo.compos_containment(uicompos) # draw.draw_bounding_box(org, uicompos, show=show, write_path=pjoin(ip_root, name + '_ip.png') if write_img else None) # # *** Step 5 *** Image Inspection: recognize image -> remove noise in image -> binarize with larger threshold and reverse -> rectangular compo detection # if classifier is not None: # classifier['Image'].predict(seg.clipping(org, uicompos), uicompos) # draw.draw_bounding_box_class(org, uicompos, show=show) # uicompos = det.rm_noise_in_large_img(uicompos, org) # draw.draw_bounding_box_class(org, uicompos, show=show) # det.detect_compos_in_img(uicompos, binary_org, org) # draw.draw_bounding_box(org, uicompos, show=show) # if classifier is not None: # classifier['Noise'].predict(seg.clipping(org, uicompos), uicompos) # draw.draw_bounding_box_class(org, uicompos, show=show) # uicompos = det.rm_noise_compos(uicompos) # *** Step 6 *** element classification: all category classification if classifier is not None: classifier['Elements'].predict(seg.clipping(org, uicompos), uicompos) draw.draw_bounding_box_class(org, uicompos, show=show, write_path=pjoin(ip_root, name + '_cls.png')) # uicompos = det.compo_filter(uicompos, org) draw.draw_bounding_box(org, uicompos, show=show) file.save_corners_json(pjoin(ip_root, name + '.json'), uicompos) print("[Compo Detection Completed in %.3f s] %d %s" % (time.clock() - start, num, input_img_path)) # Record run time open('time.txt', 'a').write(str(round(time.clock() - start, 3)) + '\n') if show: cv2.destroyAllWindows()