def get_outold_parameters(outold_image_path):
    img = cv2.imread(outold_image_path)
    binary_img = get_binary(img, [125, 255])
    binary_img, contours, hierarchy = cv2.findContours(binary_img,
                                                       cv2.RETR_EXTERNAL,
                                                       cv2.CHAIN_APPROX_SIMPLE)
    cv2.drawContours(img, contours, -1, (255, 225, 0), thickness=-1)
    f_name = get_file_name(outold_image_path)
    cv2.imwrite(f_name + '_marked.png', img)
    return [[OUTOLD_PARAMETER_NAMES[0], len(contours)]]
Ejemplo n.º 2
0
 def intersection_tables_ocr(self, table_coo, table_num,
                             highlight_readable_paras):
     print('I am working on intersection table OCR')
     bin_table = get_binary(self.image)
     table_lines = find_table(bin_table)
     region = find_text_region(table_lines, cv2.RETR_TREE)
     # read text of table into dictionary
     text_dict = {}
     for table in region[1:]:
         # get the coordinate order
         # use the minimum coordination as the represent point of each small table
         represent_point = sorted(table.tolist())[0]
         width = abs(table[1] - table[2])[0]
         height = abs(table[0] - table[1])[1]
         table_region = self.image[represent_point[1]:(represent_point[1] +
                                                       height),
                                   represent_point[0]:(represent_point[0] +
                                                       width)]
         # relative_coo_point = [relative_x_value, relative_y_value, relative_rec_width, relative_rec_height]
         relative_width = width / self.image_width
         relative_height = height / self.image_height
         o_coo_point = (np.add(represent_point,
                               table_coo[table_num][:2])).tolist()
         relative_coo_point = (np.divide(o_coo_point,
                                         self.image_original_coo)).tolist()
         relative_coo_point.append(relative_width)
         relative_coo_point.append(relative_height)
         # get text from table
         small_table_height, small_table_width, dim = table_region.shape
         if small_table_height == 0 and small_table_width == 0:
             continue
         elif 2 * width * height > self.image_height * self.image_width:
             continue
         elif height + self.soft_margin > self.image_height or width + self.soft_margin > self.image_width:
             continue
         # print('table_region:{}'.format(table_region.shape))
         text = pytesseract.image_to_string(table_region)
         text_dict[tuple(relative_coo_point)] = text
         if text and highlight_readable_paras:
             # highlight img
             cv2.rectangle(self.blank_image,
                           (o_coo_point[0] + self.soft_margin,
                            o_coo_point[1] + self.soft_margin),
                           (o_coo_point[0] + width - self.soft_margin,
                            o_coo_point[1] + height - self.soft_margin),
                           (0, 0, 255),
                           thickness=2)
     table_num += 1
     cv2.imwrite('blank_image.png', self.blank_image)
     # table_dicts.append(text_dict)
     return text_dict
    def get_table_text_dict(self, highlight_readable_paras=True):
        """
        save table text dict in a list

        """
        print('I am working on extracting GM2 information dictionary')

        def _draw_rectangle(_image, color):
            cv2.rectangle(_image, (table_locate_info[0], table_locate_info[1]),
                          (table_locate_info[0] + table_locate_info[2],
                           table_locate_info[1] + table_locate_info[3]),
                          color,
                          thickness=-1)

        table_num = 0
        table_dicts_group = []
        image_save_path = ''
        contrast_img = cv2.addWeighted(self.image, 1.3, self.blank_image,
                                       1 - 1.3, 5)
        imgs, table_coo = read_table.extract_table_from_img(self.image_path)
        pure_table_image = np.zeros([
            self.image_height + self.soft_margin,
            self.image_width + self.soft_margin, 3
        ], self.image.dtype)
        os.remove(self.image_path)
        for table in imgs:
            if not intersection_lines_detection(table):
                table_locate_info = self.get_pure_table_region(
                    table_coo, table_num)

                # filling small table region prepare big table contours detector
                _draw_rectangle(pure_table_image, color=(0, 0, 255))
                _draw_rectangle(self.covered_text_image, color=(0, 0, 0))

                table_num += 1
            else:
                table_num += 1
                continue

        dilate_kernel = cv2.getStructuringElement(cv2.MORPH_OPEN, (7, 7))
        dilate_image = cv2.dilate(pure_table_image,
                                  dilate_kernel,
                                  iterations=2)
        binary_table_region = get_binary(dilate_image, my_threshold=[45, 255])
        cv2.imwrite('binary_table_region.png', binary_table_region)
        table_edge_condition, table_region_contours = find_text_region(
            binary_table_region, cv2.RETR_EXTERNAL)
        print('I am working on tables OCR')
        covered_text_image = cv2.subtract(self.covered_text_image, self.image)
        for edge_condition in table_edge_condition:
            sorted_edge_condition = sorted(edge_condition.tolist())
            min_point = sorted_edge_condition[0]
            max_point = sorted_edge_condition[-1]
            cut_table = self.image[min_point[1]:max_point[1],
                                   min_point[0]:max_point[0]]
            covered_text_table = covered_text_image[min_point[1]:max_point[1],
                                                    min_point[0]:max_point[0]]
            c_x, c_y, c_z = cut_table.shape
            if c_x and c_y and c_z:
                table_ram = 'table_RAM.png'
                cv2.imwrite(table_ram, cut_table)
                min_max_points_group = find_min_max_points_group(
                    covered_text_table, min_point)
                iso_table_dict = self.ocr_detector(table_ram, self.blank_image,
                                                   self.soft_margin, min_point)
                if iso_table_dict:
                    grouped_small_tables = group_gerber_ocr_text(
                        min_max_points_group, iso_table_dict,
                        self.gerber_boxes)
                    table_dicts_group.append(
                        list(grouped_small_tables.values()))
                os.remove(table_ram)
        if highlight_readable_paras:
            print('highlight step 1')
            highlight_step_1 = cv2.addWeighted(contrast_img, 0.8,
                                               self.blank_image, 0.2, 3)
            f_name = get_file_name(self.gm_path)
            file_name = f_name.split('/')[-1]
            f_extension = get_extension(self.image_path)
            tmp_file_path = '/data/fastprint/tmp-files'
            current_time = datetime.now().strftime('%Y%m%d%H%M%S')
            save_path = "{tmp_file_path}/{current_time}".format(
                tmp_file_path=tmp_file_path, current_time=current_time)
            # 如果文件夹不存在,创建文件夹
            if not os.path.exists(save_path):
                os.mkdir(save_path)

            # 图片保存的路径
            image_save_path = "{save_path}/{file_name}_Marked_{f_extension}".format(
                save_path=save_path,
                file_name=file_name,
                f_extension=f_extension)
            cv2.imwrite(image_save_path, highlight_step_1)
        return table_dicts_group, image_save_path
Ejemplo n.º 4
0
    def get_table_text_dict(self, test_gerber=False, save_dict=False, highlight_readable_paras=None,
                            v_cut_save_path=None):
        """
        save table text dict in a list

        """
        print('I am working on extracting table information dictionary')
        table_num = 0
        table_loc = []
        table_dicts_group = []
        pure_text_dict_group = []
        # adjust contrast
        contrast_img = cv2.addWeighted(self.image, 1.3, self.blank_image, 1 - 1.3, 5)
        highlight_step_1 = self.blank_image.copy()
        gerber_file = get_file_type(self.image_path, test_gerber)
        if gerber_file:
            imgs, table_coo = read_table.extract_table_from_img(self.image_path)
            pure_table_image = np.zeros([self.image_height + self.soft_margin, self.image_width + self.soft_margin, 3],
                                        self.image.dtype)
            for table in imgs:
                if not intersection_lines_detection(table):
                    table_locate_info = self.get_pure_table_region(table_coo, table_num)
                    # filling small table region prepare big table contours detector
                    cv2.rectangle(pure_table_image, (table_locate_info[0], table_locate_info[1]),
                                  (table_locate_info[0] + table_locate_info[2],
                                   table_locate_info[1] + table_locate_info[3]),
                                  (0, 0, 255), thickness=-1)
                    table_loc.append(table_locate_info)
                    # cv2.rectangle(self.covered_text_image, (table_locate_info[0], table_locate_info[1]),
                    #               (table_locate_info[0] + table_locate_info[2],
                    #                table_locate_info[1] + table_locate_info[3]),
                    #               (0, 0, 0), thickness=-1)
                    table_num += 1
                else:
                    table_num += 1
                    continue
            # print(self.covered_text_image.shape)
            # f = open('4S7MD161A0_table_loc.pkl', 'wb')
            # pickle.dump(table_loc, f)
            # f.close()
            # cv2.imwrite('pure_pure_table.png', pure_table_image)

            dilate_kernel = cv2.getStructuringElement(cv2.MORPH_OPEN, (7, 7))
            dilate_image = cv2.dilate(pure_table_image, dilate_kernel, iterations=2)
            binary_table_region = get_binary(dilate_image, my_threshold=[45, 255])
            table_edge_condition, table_region_contours = find_text_region(binary_table_region, cv2.RETR_EXTERNAL)
            print('I am working on pure text OCR')
            o_img = self.image.copy()
            background_color = get_dominant_color(o_img)
            for edge_num in range(len(table_edge_condition)):
                # draw big table contours
                cv2.drawContours(o_img, table_edge_condition, edge_num, background_color, thickness=3)

            pure_text_dict_group = pure_text_region(o_img, background_color, self.blank_image)
            # cv2.imwrite('pure_table.png', binary_table_region)
            print('I am working on tables OCR')
            i = 0

            for edge_condition in table_edge_condition:
                sorted_edge_condition = sorted(edge_condition.tolist())
                min_point = sorted_edge_condition[0]
                max_point = sorted_edge_condition[-1]
                cut_table = self.image[min_point[1]:max_point[1], min_point[0]:max_point[0]]
                c_x, c_y, c_z = cut_table.shape
                if c_x and c_y and c_z:
                    table_ram = 'table_RAM' + str(i) + '.png'
                    # table_ram = 'table_RAM.png'
                    # print(table_ram)
                    cv2.imwrite(table_ram, cut_table)
                    i += 1
                    table_ram = ocr_preprocessed(table_ram)
                    iso_table_dict = self.ocr_detector(table_ram, self.blank_image, self.soft_margin, min_point)
                    if iso_table_dict:
                        table_dicts_group.append(iso_table_dict)
                    # os.remove(table_ram)
            if not table_dicts_group:
                print('not text')
                v_cut_detector(self.image, v_cut_save_path)
        elif not gerber_file:
            print('I am not gerber file using ocr')
            self.soft_margin = 0
            try:
                iso_table_dict = self.ocr_detector(self.image_path, self.blank_image, self.soft_margin, [0, 0])
            except Exception as e:
                print('OCR sever failed: {} Use backup solution'.format(e))
                iso_table_dict = self.ocr_detector(self.image_path, self.blank_image, self.soft_margin, [0, 0],
                                                   ocr_type='you_dao')

            if iso_table_dict:
                table_dicts_group.append(iso_table_dict)
        table_dicts_group.extend(pure_text_dict_group)

        if save_dict:
            # save table dictionary in pkl file
            print('save dictionary into pickle file')
            f_name = get_file_name(self.image_path)
            f_extension = get_extension(self.image_path)
            file = open(f_name + f_extension + '.pkl', 'wb')
            print('save path is:{}'.format(f_name + f_extension + '.pkl'))
            table_save_as_list = text_dict2text_list(table_dicts_group)
            print(table_save_as_list)
            pickle.dump(table_save_as_list, file)
            file.close()
        if highlight_readable_paras:
            print('highlight step 1')
            highlight_step_1 = cv2.addWeighted(contrast_img, 0.8, self.blank_image, 0.2, 3)
            f_name = get_file_name(self.image_path)
            f_extension = get_extension(self.image_path)
            cv2.imwrite(f_name + '_Marked_' + f_extension, highlight_step_1)
        return table_dicts_group, highlight_step_1
Ejemplo n.º 5
0
    def get_table_text_dict(self,
                            save_dict_path=None,
                            save_dict=False,
                            highlight_readable_paras=None):
        """
        save table text dict in a list

        """
        print('I am working on extracting table information dictionary')
        table_num = 0
        table_dict = {}
        table_keys_list = []
        table_dicts_group = []
        # adjust contrast
        contrast_img = cv2.addWeighted(self.image, 1.3, self.blank_image,
                                       1 - 1.3, 5)
        imgs, table_coo = read_table.extract_table_from_img(self.image_path)
        highlight_step_1 = self.blank_image.copy()
        pure_table_image = np.zeros([
            self.image_height + self.soft_margin,
            self.image_width + self.soft_margin, 3
        ], self.image.dtype)
        print('I am working on OCR')
        for table in imgs:
            # print('working on region {}'.format(table_num))
            if not intersection_lines_detection(table):
                table_key, table_value = self.no_intersection_tables_ocr(
                    table, table_coo, table_num, highlight_readable_paras)
                cv2.rectangle(
                    pure_table_image, (table_key[0], table_key[1]),
                    (table_key[0] + table_key[2], table_key[1] + table_key[3]),
                    (0, 0, 255),
                    thickness=4)
                table_dict[tuple(table_key)] = table_value
                table_keys_list.append(table_key)
                table_num += 1
            else:
                table_num += 1
                continue
        # image_copy = self.image.copy()
        binary_table_region = get_binary(pure_table_image)
        table_edge_condition, table_region_contours = find_text_region(
            binary_table_region, cv2.RETR_EXTERNAL)
        # cv2.drawContours(image_copy, table_region_contours, -1, (0, 255, 0), 12)
        # cv2.imwrite('pure_table_region.png', image_copy)
        # cv2.imshow('pure table region', pure_table_image)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()
        # group the discrete small tables in a table
        # table_edge_condition = get_iso_table_condition(table_keys_list)
        # print('return table text information dictionary')
        for edge_condition in table_edge_condition:
            represent_min_point = sorted(edge_condition.tolist())[0]
            represent_max_point = sorted(edge_condition.tolist())[-1]
            min_edge_x, min_edge_y = represent_min_point
            max_edge_x, max_edge_y = represent_max_point
            iso_table_dict = {}
            for key in table_keys_list:
                if min_edge_x <= key[0] < max_edge_x and min_edge_y <= key[
                        1] < max_edge_y:
                    iso_table_dict[tuple(key)] = table_dict[tuple(key)]
            table_dicts_group.append(iso_table_dict)
        if save_dict:
            # save table dictionary in pkl file
            file = open(save_dict_path, 'wb')
            pickle.dump(table_dict, file)
            file.close()
        if highlight_readable_paras:
            print('highlight step 1')
            highlight_step_1 = cv2.addWeighted(contrast_img, 0.8,
                                               self.blank_image, 0.2, 3)
            # f_name = get_file_name(save_highlight_path)
            # f_extension = get_extension(save_highlight_path)
            # cv2.imwrite('test_highlight_step1_marked.png', highlight_step_1)
        return table_dicts_group, highlight_step_1