示例#1
0
def recognize(shape, rotate_img, rotate_flag):
    for mode in ['handword', 'handnum', 'num', 'word', 'char', 'seal']:
        for i, item in enumerate(shape):
            if item['class'] == mode:
                print('*' * 30)
                print('mode:', mode)

                box = item['box']

                theta = crop_mode.find_min_rect_angle(box)
                tmp_img, vertice = crop_mode.rotate_img(
                    rotate_img, box, -theta / math.pi * 180)
                x_min, x_max, y_min, y_max = crop_mode.get_boundary(vertice)
                crop_img = tmp_img.crop(
                    (int(x_min), int(y_min), int(x_max), int(y_max)))

                if rotate_flag == 0:
                    width = img.width
                    height = img.height
                    center_x = (width - 1) / 2
                    center_y = (height - 1) / 2
                    new_vertice = np.zeros(box.shape)
                    new_vertice[:] = rotate_vertices(
                        box, -math.pi, np.array([[center_x], [center_y]]))
                    box = new_vertice

                str_box = []
                for site in box:
                    str_box.append(str(int(site)))
                print('box:', str_box)

                alphabet = alphabetdict[mode]
                n_class = len(alphabet) + 1

                converter = convert.strLabelConverter(alphabet)
                now_model = recog_model.CRNN(class_num=n_class,
                                             backbone='resnet',
                                             pretrain=False)
                state_dict = torch.load(os.path.join(
                    '/home/flask_web/static/pths', modeldict[mode]),
                                        map_location=torch.device('cpu'))
                now_model.load_state_dict(state_dict=state_dict)
                now_model.to(device)
                now_model.eval()

                result = text_recognize(crop_img, now_model, converter, device,
                                        mode)
                print('result:', result)
def seal_detect(img, model, device):
    boxes = detect.detect(img, model, device)
    boxes = detect.adjust_ratio(boxes, ratio_w, ratio_h)

    orig_vertices = []
    theta = 0
    if boxes is not None and boxes.size:
        for box in boxes:
            box = np.array(box[:8])
            orig_vertices.append(box)
            theta += crop_mode.find_min_rect_angle(box)

        orig_vertices = np.array(orig_vertices)
        theta /= len(boxes)
        tmp_img, vertices = crop_mode.rotate_allimg(img, orig_vertices,
                                                    -theta / math.pi * 180)

        dict_centers = {}
        for i, vertice in enumerate(vertices):
            avg_x = int(crop_mode.averagenum(vertice[::2]))
            avg_y = int(crop_mode.averagenum(vertice[1::2]))
            dict_centers[str(avg_x) + ',' + str(avg_y)] = i

        centers = crop_mode.sort_centers(dict_centers, 1)

        xcenters = []
        for center in centers:
            xcenters.append([center])

        shape = []
        for i, xcenter in enumerate(xcenters):
            for center in xcenter:
                anno = {}
                anno['box'] = orig_vertices[int(center[1])]
                anno['class'] = 'seal'
                shape.append(anno)

        return shape, boxes
        shape, seal_boxes = seal_detect(img_tmp, seal_model, device)

        boxes = detect.detect(img_tmp, detect_model, device)
        boxes = detect.adjust_ratio(boxes, ratio_w, ratio_h)

        plot_img = detect.plot_boxes(rotate_img, boxes)
        plot_img = detect.plot_boxes(plot_img, seal_boxes)
        plot_img.save('detected.jpg')
        print('detection result saved as:  detected.jpg')

        orig_vertices = []
        theta = 0
        for box in boxes:
            box = np.array(box[:8])
            orig_vertices.append(box)
            theta += crop_mode.find_min_rect_angle(box)

        orig_vertices = np.array(orig_vertices)
        theta /= len(boxes)

        tmp_img, vertices = crop_mode.rotate_allimg(rotate_img, orig_vertices,
                                                    -theta / math.pi * 180)

        dict_centers = {}
        for i, vertice in enumerate(vertices):
            avg_x = int(crop_mode.averagenum(vertice[::2]))
            avg_y = int(crop_mode.averagenum(vertice[1::2]))
            dict_centers[str(avg_x) + ',' + str(avg_y)] = i

        centers = crop_mode.sort_centers(dict_centers, 1)