コード例 #1
0
def test():
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list
    """ Load model """
    input_image = Input(shape=(None, None, 3), name='image', dtype=tf.float32)
    region, affinity = VGG16_UNet(input_tensor=input_image, weights=None)
    model = Model(inputs=[input_image], outputs=[region, affinity])
    model.load_weights(FLAGS.trained_model)
    """ For test images in a folder """
    image_list, _, _ = list_files(FLAGS.test_folder)

    t = time.time()
    """ Test images """
    for k, image_path in enumerate(image_list):

        print("Test image {:d}/{:d}: {:s}".format(k + 1, len(image_list),
                                                  image_path),
              end='\r')
        image = load_image(image_path)
        start_time = time.time()
        bboxes, score_text = predict(model, image, FLAGS.text_threshold,
                                     FLAGS.link_threshold, FLAGS.low_text)
        print(time.time() * 1000 - start_time * 1000)

        # save score text
        filename, file_ext = os.path.splitext(os.path.basename(image_path))
        mask_file = result_folder + "/res_" + filename + '_mask.jpg'
        cv2.imwrite(mask_file, score_text)
        # save text
        saveResult(image_path,
                   image[:, :, ::-1],
                   bboxes,
                   dirname=result_folder)
        # save json
        saveJson(image_path, bboxes, dirname=result_folder)
    print("elapsed time : {}s".format(time.time() - t))
コード例 #2
0
    def __getitem__(self, idx):
        fn = self.image_names[idx]
        image = load_image(os.path.join(self.images_dir, fn))
        label_name = self.label_names[idx]
        word_boxes, words = get_wordsList(os.path.join(self.labels_dir, label_name))
        char_boxes_list, affinity_boxes_list, confidence_list = self.get_affinity_boxes_list(image, word_boxes, words)
        height, width = image.shape[:2] #opencv方式
        heat_map_size = (height, width)
        #get pixel-wise confidence map
        sc_map = self.get_sc_map(heat_map_size, word_boxes, confidence_list) * 255
        region_scores = self.get_region_scores(heat_map_size, char_boxes_list) * 255
        affinity_scores = self.get_region_scores(heat_map_size, affinity_boxes_list) * 255

        #opencv转为PIL.Image
        image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
        #numpy.ndarray转为PIL.Image
        region_scores = Image.fromarray(np.uint8(region_scores))
        affinity_scores = Image.fromarray(np.uint8(affinity_scores))
        sc_map = Image.fromarray(np.uint8(sc_map))
        if self.image_transform is not None:
            image = self.image_transform(image)

        if self.label_transform is not None:
            region_scores = self.label_transform(region_scores)
            affinity_scores = self.label_transform(affinity_scores)
            sc_map = self.label_transform(sc_map)
        return image, region_scores, affinity_scores, sc_map
def test():
    os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_list
    """ Load model """
    input_image = Input(shape=(None, None, 3), name='image', dtype=tf.float32)
    region, affinity = VGG16_UNet(input_tensor=input_image, weights=None)
    model = Model(inputs=[input_image], outputs=[region, affinity])
    model.load_weights(FLAGS.trained_model)
    """ For test images in a folder """
    gt_all_imgs = load_data(
        "/home/ldf/CRAFT_keras/data/CTW/gt.pkl")  # SynthText, CTW
    t = time.time()
    PPR_list = list()
    num_list = list()
    dif_list = list()
    """ Test images """
    for k, [image_path, word_boxes, words,
            char_boxes_list] in enumerate(gt_all_imgs):

        image = load_image(image_path)
        start_time = time.time()
        bboxes, score_text = predict(model, image, FLAGS.text_threshold,
                                     FLAGS.link_threshold, FLAGS.low_text)
        """ Compute single pic's PPR and num """
        PPR, single_num, diff = compute_PPR(bboxes, char_boxes_list)

        PPR_list.append(PPR)
        num_list.append(single_num)
        dif_list.append(np.mean(diff))

    print("elapsed time : {}s".format(time.time() - t))
    result, MPD = compute_final_result(PPR_list, num_list, dif_list)
    print("PPR", result)
    print("MPD", MPD)
    def init_sample(self, flag=False):
        for sample_mark in self.sample_mark_list:
            if self.fakes[sample_mark]:
                sample_list = self.train_sample_lists[sample_mark]
                new_sample_list = list()

                for sample in sample_list:
                    if len(sample) == 5:
                        img_path, word_boxes, words, _, _ = sample
                    else:
                        img_path, word_boxes, words, _ = sample
                    img = load_image(img_path)
                    char_boxes_list = list()
                    confidence_list = list()
                    for word_box, word in zip(word_boxes, words):
                        char_boxes, confidence = self.fake_char_boxes(img, word_box, len(word))
                        char_boxes_list.append(char_boxes)
                        confidence_list.append(confidence)
                    new_sample_list.append([img_path, word_boxes, words, char_boxes_list, confidence_list])

                self.train_sample_lists[sample_mark] = new_sample_list
            elif flag:
                sample_list = self.train_sample_lists[sample_mark]
                new_sample_list = list()

                for sample in sample_list:
                    if len(sample) == 5:
                        img_path, word_boxes, words, char_boxes_list, _ = sample
                    else:
                        img_path, word_boxes, words, char_boxes_list = sample
                    confidence_list = [1] * len(word_boxes)
                    new_sample_list.append([img_path, word_boxes, words, char_boxes_list, confidence_list])

                self.train_sample_lists[sample_mark] = new_sample_list
コード例 #5
0
ファイル: test.py プロジェクト: Yorwxue/Scene_Text_Detection
    result_folder = './result/'
    if not os.path.isdir(result_folder):
        os.mkdir(result_folder)

    # LinkRefiner
    refine_net = None
    # if args.refine:
    #     # TODO
    #     pass

    # load data
    for k, image_path in enumerate(image_list):
        print("Test image {:d}/{:d}: {:s}".format(k + 1, len(image_list),
                                                  image_path),
              end='\r')
        image = load_image(image_path)

        bboxes, polys, score_text = test_net(net, image, args.text_threshold,
                                             args.link_threshold,
                                             args.low_text, args.poly,
                                             refine_net)

        # save score text
        filename, file_ext = os.path.splitext(os.path.basename(image_path))
        mask_file = result_folder + "/%s_res_" % prefix_filename + filename + '_mask.jpg'
        cv2.imwrite(mask_file, score_text)

        saveResult(image_path,
                   image,
                   polys,
                   dirname=result_folder,