tf.app.flags.DEFINE_integer('class_num', 230, '') tf.app.flags.DEFINE_string('test_data_path', '/path/to/your/testing images/', '') tf.app.flags.DEFINE_string('test_gt_path', '/path/to/your/testing annotations/', '') tf.app.flags.DEFINE_string('gpu_list', '0', '') tf.app.flags.DEFINE_string('checkpoint_path', '/path/to/your/checkpoints/', '') tf.app.flags.DEFINE_string('output_dir', 'outputs/', '') tf.app.flags.DEFINE_bool('no_write_images', True, 'do not write images') # tf.app.flags.DEFINE_bool('use_vacab', True, 'strong, normal or weak') from module import Backbone_branch, RoI_rotate, classification_branch from data_provider.data_utils import restore_rectangle FLAGS = tf.app.flags.FLAGS detect_part = Backbone_branch.Backbone(is_training=False) roi_rotate_part = RoI_rotate.RoIRotate() recognize_part = classification_branch.Recognition(is_training=False) font = cv2.FONT_HERSHEY_SIMPLEX def iou_cal(predict_box, gt_box): union = np.bitwise_or(predict_box, gt_box) inter = np.bitwise_and(predict_box, gt_box) # union = cv2.cvtColor(union, cv2.COLOR_GRAY2BGR) # inter = cv2.cvtColor(inter, cv2.COLOR_GRAY2BGR) contours_union, hierarchy_union = cv2.findContours(union, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) contours_inter, hierarchy_inter = cv2.findContours(inter, cv2.RETR_EXTERNAL,
tf.app.flags.DEFINE_string('checkpoint_path', 'checkpoints/', '') tf.app.flags.DEFINE_boolean('restore', False, 'whether to resotre from checkpoint') tf.app.flags.DEFINE_integer('save_checkpoint_steps', 1000, '') tf.app.flags.DEFINE_integer('save_summary_steps', 100, '') tf.app.flags.DEFINE_string('pretrained_model_path', None, '') import icdar # import synth from module import Backbone_branch, Recognition_branch, RoI_rotate FLAGS = tf.app.flags.FLAGS # gpus = list(range(len(FLAGS.gpu_list.split(',')))) detect_part = Backbone_branch.Backbone(is_training=True) roi_rotate_part = RoI_rotate.RoIRotate() recognize_part = Recognition_branch.Recognition(is_training=True) def build_graph(input_images, input_transform_matrix, input_box_masks, input_box_widths, input_seq_len): shared_feature, f_score, f_geometry = detect_part.model(input_images) pad_rois = roi_rotate_part.roi_rotate_tensor_pad(shared_feature, input_transform_matrix, input_box_masks, input_box_widths) recognition_logits = recognize_part.build_graph(pad_rois, input_box_widths) # _, dense_decode = recognize_part.decode(recognition_logits, input_box_widths) return f_score, f_geometry, recognition_logits