def freezing_graph(config, checkpoint, output_dir):
  if not os.path.exists(output_dir):
    os.makedirs(output_dir)

  shape = (None,) + tuple(config.input_shape) # NHWC, dynamic batch
  graph = tf.Graph()
  with graph.as_default():
    with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=False):
      # Input for inferences from a frozen graph
      input_tensor = tf.placeholder(dtype=tf.float32, shape=shape, name='input')
      prob = inference(config.rnn_cells_num, input_tensor, config.num_classes)
      prob = tf.transpose(prob, (1, 0, 2))
      data_length = tf.fill([tf.shape(prob)[1]], tf.shape(prob)[0])
      result = tf.nn.ctc_greedy_decoder(prob, data_length, merge_repeated=True)
      predictions = tf.to_int32(result[0][0])
      # Ouput for inferences from a frozen graph
      tf.sparse_to_dense(predictions.indices, [tf.shape(input_tensor, out_type=tf.int64)[0], config.max_lp_length],
                         predictions.values, default_value=-1, name='d_predictions')
      init = tf.initialize_all_variables()
      saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)

  sess = tf.Session(graph=graph)
  sess.run(init)
  saver.restore(sess, checkpoint)
  frozen = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["d_predictions"])
  tf.train.write_graph(sess.graph, output_dir, 'graph.pbtxt', as_text=True)
  path_to_frozen_model = graph_io.write_graph(frozen, output_dir, 'graph.pb.frozen', as_text=False)
  return path_to_frozen_model
def export(config, tfmo, batch_size=1, precision='FP32'):
    shape = (None, ) + tuple(config.input_shape)  # NHWC, dynamic batch
    graph = tf.Graph()
    with graph.as_default():
        with slim.arg_scope([slim.batch_norm, slim.dropout],
                            is_training=False):
            input_tensor = tf.placeholder(dtype=tf.float32,
                                          shape=shape,
                                          name='input')
            prob = inference(config.rnn_cells_num, input_tensor,
                             config.num_classes)
            prob = tf.transpose(prob, (1, 0, 2))
            data_length = tf.fill([tf.shape(prob)[1]], tf.shape(prob)[0])
            result = tf.nn.ctc_greedy_decoder(prob,
                                              data_length,
                                              merge_repeated=True)
            predictions = tf.to_int32(result[0][0])
            tf.sparse_to_dense(predictions.indices, [
                tf.shape(input_tensor, out_type=tf.int64)[0],
                config.max_lp_length
            ],
                               predictions.values,
                               default_value=-1,
                               name='d_predictions')
            init = tf.initialize_all_variables()
            saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)

    sess = tf.Session(graph=graph)
    sess.run(init)
    checkpoints_dir = config.model_dir
    latest_checkpoint = tf.train.latest_checkpoint(checkpoints_dir)
    saver.restore(sess, latest_checkpoint)
    frozen = tf.graph_util.convert_variables_to_constants(
        sess, sess.graph_def, ["d_predictions"])
    tf.train.write_graph(sess.graph,
                         os.path.join(config.model_dir, 'ie_model/'),
                         'graph.pbtxt',
                         as_text=True)
    path_to_frozen_model = graph_io.write_graph(frozen,
                                                os.path.join(
                                                    config.model_dir,
                                                    'ie_model/'),
                                                'graph.pb.frozen',
                                                as_text=False)
    execute_tfmo(tfmo, path_to_frozen_model, shape, batch_size, precision)
def validate(config):
  if hasattr(config.eval, 'random_seed'):
    np.random.seed(config.eval.random_seed)
    tf.set_random_seed(config.eval.random_seed)
    random.seed(config.eval.random_seed)

  if hasattr(config.eval.execution, 'CUDA_VISIBLE_DEVICES'):
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = config.train.execution.CUDA_VISIBLE_DEVICES

  height, width, channels_num = config.input_shape
  rnn_cells_num = config.rnn_cells_num

  graph = tf.Graph()
  with graph.as_default():
    with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=False):
      inp_data, label_val, file_names = data_input(height, width, channels_num,
                                                   config.eval.file_list_path, batch_size=config.eval.batch_size)

      prob = inference(rnn_cells_num, inp_data, config.num_classes)
      prob = tf.transpose(prob, (1, 0, 2))  # prepare for CTC

      data_length = tf.fill([tf.shape(prob)[1]], tf.shape(prob)[0])  # input seq length, batch size

      result = tf.nn.ctc_greedy_decoder(prob, data_length, merge_repeated=True)

      predictions = tf.to_int32(result[0][0])
      d_predictions = tf.sparse_to_dense(predictions.indices,
                                         [tf.shape(inp_data, out_type=tf.int64)[0], config.max_lp_length],
                                         predictions.values, default_value=-1, name='d_predictions')

      init = tf.initialize_all_variables()
      saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)

  # session
  conf = tf.ConfigProto()
  if hasattr(config.eval.execution, 'per_process_gpu_memory_fraction'):
    conf.gpu_options.per_process_gpu_memory_fraction = config.train.execution.per_process_gpu_memory_fraction
  if hasattr(config.eval.execution, 'allow_growth'):
    conf.gpu_options.allow_growth = config.train.execution.allow_growth

  sess = tf.Session(graph=graph, config=conf)
  coord = tf.train.Coordinator()
  threads = tf.train.start_queue_runners(sess=sess, coord=coord)

  sess.run(init)


  checkpoints_dir = config.model_dir
  latest_checkpoint = None
  wait_iters = 0

  if not os.path.exists(os.path.join(checkpoints_dir, 'eval')):
    os.mkdir(os.path.join(checkpoints_dir, 'eval'))
  writer = tf.summary.FileWriter(os.path.join(checkpoints_dir, 'eval'), sess.graph)


  while True:
    if config.eval.checkpoint != '':
      new_checkpoint = config.eval.checkpoint
    else:
      new_checkpoint = tf.train.latest_checkpoint(checkpoints_dir)
    if latest_checkpoint != new_checkpoint:
      latest_checkpoint = new_checkpoint
      saver.restore(sess, latest_checkpoint)
      current_step = tf.train.load_variable(latest_checkpoint, 'global_step')

      test_size = dataset_size(config.eval.file_list_path)
      time_start = time.time()

      mean_accuracy, mean_accuracy_minus_1 = 0.0, 0.0

      steps = int(test_size / config.eval.batch_size) if int(test_size / config.eval.batch_size) else 1
      num = 0
      for _ in range(steps):
        val, slabel, _ = sess.run([d_predictions, label_val, file_names])
        acc, acc1, num_ = accuracy(slabel, val, config.vocab, config.r_vocab)
        mean_accuracy += acc
        mean_accuracy_minus_1 += acc1
        num += num_

      writer.add_summary(
        tf.Summary(value=[tf.Summary.Value(tag='evaluation/acc', simple_value=float(mean_accuracy / num)),
                          tf.Summary.Value(tag='evaluation/acc-1', simple_value=float(mean_accuracy_minus_1 / num))]),
        current_step)
      print('Test acc: {}'.format(mean_accuracy / num))
      print('Test acc-1: {}'.format(mean_accuracy_minus_1 / num))
      print('Time per step: {} for test size {}'.format(time.time() - time_start / steps, test_size))
    else:
      if wait_iters % 12 == 0:
        sys.stdout.write('\r')
        for _ in range(11 + wait_iters // 12):
          sys.stdout.write(' ')
        sys.stdout.write('\r')
        for _ in range(1 + wait_iters // 12):
          sys.stdout.write('|')
      else:
        sys.stdout.write('.')
      sys.stdout.flush()
      time.sleep(5)
      wait_iters += 1
    if config.eval.checkpoint != '':
      break


  coord.request_stop()
  coord.join(threads)
  sess.close()
def train(config, init_checkpoint):
  if hasattr(config.train, 'random_seed'):
    np.random.seed(config.train.random_seed)
    tf.set_random_seed(config.train.random_seed)
    random.seed(config.train.random_seed)

  if hasattr(config.train.execution, 'CUDA_VISIBLE_DEVICES'):
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = config.train.execution.CUDA_VISIBLE_DEVICES

  CTCUtils.vocab = config.vocab
  CTCUtils.r_vocab = config.r_vocab

  input_train_data = InputData(batch_size=config.train.batch_size,
                               input_shape=config.input_shape,
                               file_list_path=config.train.file_list_path,
                               apply_basic_aug=config.train.apply_basic_aug,
                               apply_stn_aug=config.train.apply_stn_aug,
                               apply_blur_aug=config.train.apply_blur_aug)


  graph = tf.Graph()
  with graph.as_default():
    global_step = tf.Variable(0, name='global_step', trainable=False)
    input_data, input_labels = input_train_data.input_fn()

    prob = inference(config.rnn_cells_num, input_data, config.num_classes)
    prob = tf.transpose(prob, (1, 0, 2))  # prepare for CTC

    data_length = tf.fill([tf.shape(prob)[1]], tf.shape(prob)[0])  # input seq length, batch size
    ctc = tf.py_func(CTCUtils.compute_ctc_from_labels, [input_labels], [tf.int64, tf.int64, tf.int64])
    ctc_labels = tf.to_int32(tf.SparseTensor(ctc[0], ctc[1], ctc[2]))

    predictions = tf.to_int32(
      tf.nn.ctc_beam_search_decoder(prob, data_length, merge_repeated=False, beam_width=10)[0][0])
    tf.sparse_tensor_to_dense(predictions, default_value=-1, name='d_predictions')
    tf.reduce_mean(tf.edit_distance(predictions, ctc_labels, normalize=False), name='error_rate')

    loss = tf.reduce_mean(
      tf.nn.ctc_loss(inputs=prob, labels=ctc_labels, sequence_length=data_length, ctc_merge_repeated=True), name='loss')

    learning_rate = tf.train.piecewise_constant(global_step, [150000, 200000],
                                                [config.train.learning_rate, 0.1 * config.train.learning_rate,
                                                 0.01 * config.train.learning_rate])
    opt_loss = tf.contrib.layers.optimize_loss(loss, global_step, learning_rate, config.train.opt_type,
                                               config.train.grad_noise_scale, name='train_step')

    tf.global_variables_initializer()
    saver = tf.train.Saver(max_to_keep=1000, write_version=tf.train.SaverDef.V2, save_relative_paths=True)

  conf = tf.ConfigProto()
  if hasattr(config.train.execution, 'per_process_gpu_memory_fraction'):
    conf.gpu_options.per_process_gpu_memory_fraction = config.train.execution.per_process_gpu_memory_fraction
  if hasattr(config.train.execution, 'allow_growth'):
    conf.gpu_options.allow_growth = config.train.execution.allow_growth

  session = tf.Session(graph=graph, config=conf)
  coordinator = tf.train.Coordinator()
  threads = tf.train.start_queue_runners(sess=session, coord=coordinator)

  session.run('init')

  if init_checkpoint:
    tf.logging.info('Initialize from: ' + init_checkpoint)
    saver.restore(session, init_checkpoint)
  else:
    lastest_checkpoint = tf.train.latest_checkpoint(config.model_dir)
    if lastest_checkpoint:
      tf.logging.info('Restore from: ' + lastest_checkpoint)
      saver.restore(session, lastest_checkpoint)

  writer = None
  if config.train.need_to_save_log:
    writer = tf.summary.FileWriter(config.model_dir, session.graph)

  graph.finalize()

  mean_accuracy, mean_accuracy_minus_1 = 0 ,0
  num = 0

  for i in range(config.train.steps):
    curr_step, curr_learning_rate, curr_loss, curr_opt_loss = session.run([global_step, learning_rate, loss, opt_loss])

    if i % config.train.display_iter == 0:
      if config.train.need_to_save_log:

        writer.add_summary(tf.Summary(value=[tf.Summary.Value(tag='train/loss',
                                                              simple_value=float(curr_loss)),
                                             tf.Summary.Value(tag='train/learning_rate',
                                                              simple_value=float(curr_learning_rate)),
                                             tf.Summary.Value(tag='train/optimization_loss',
                                                              simple_value=float(curr_opt_loss))
                                             ]),
                           curr_step)
        writer.flush()

      tf.logging.info('Iteration: ' + str(curr_step) + ', Train loss: ' + str(curr_loss))

    if ((curr_step % config.train.save_checkpoints_steps == 0 or curr_step == config.train.steps)
        and config.train.need_to_save_weights):
      saver.save(session, config.model_dir + '/model.ckpt-{:d}.ckpt'.format(curr_step))

  coordinator.request_stop()
  coordinator.join(threads)
  session.close()
Beispiel #5
0
def infer(config):
    if hasattr(config.infer, 'random_seed'):
        np.random.seed(config.infer.random_seed)
        tf.set_random_seed(config.infer.random_seed)
        random.seed(config.infer.random_seed)

    if hasattr(config.infer.execution, 'CUDA_VISIBLE_DEVICES'):
        os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
        os.environ[
            "CUDA_VISIBLE_DEVICES"] = config.train.execution.CUDA_VISIBLE_DEVICES

    height, width, channels_num = config.input_shape
    rnn_cells_num = config.rnn_cells_num

    graph = tf.Graph()

    with graph.as_default():
        with slim.arg_scope([slim.batch_norm, slim.dropout],
                            is_training=False):
            inp_data, filenames = data_input(
                height,
                width,
                channels_num,
                config.infer.file_list_path,
                batch_size=config.infer.batch_size)

            prob = inference(rnn_cells_num, inp_data, config.num_classes)
            prob = tf.transpose(prob, (1, 0, 2))  # prepare for CTC

            data_length = tf.fill(
                [tf.shape(prob)[1]],
                tf.shape(prob)[0])  # input seq length, batch size

            result = tf.nn.ctc_greedy_decoder(prob,
                                              data_length,
                                              merge_repeated=True)

            predictions = tf.to_int32(result[0][0])
            d_predictions = tf.sparse_to_dense(predictions.indices, [
                tf.shape(inp_data, out_type=tf.int64)[0], config.max_lp_length
            ],
                                               predictions.values,
                                               default_value=-1,
                                               name='d_predictions')

            init = tf.initialize_all_variables()
            saver = tf.train.Saver(write_version=tf.train.SaverDef.V2)

    # session
    conf = tf.ConfigProto()
    if hasattr(config.eval.execution, 'per_process_gpu_memory_fraction'):
        conf.gpu_options.per_process_gpu_memory_fraction = config.train.execution.per_process_gpu_memory_fraction
    if hasattr(config.eval.execution, 'allow_growth'):
        conf.gpu_options.allow_growth = config.train.execution.allow_growth

    sess = tf.Session(graph=graph, config=conf)
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    sess.run(init)

    latest_checkpoint = config.infer.checkpoint
    if config.infer.checkpoint == '':
        latest_checkpoint = tf.train.latest_checkpoint(config.model_dir)

    saver.restore(sess, latest_checkpoint)

    infer_size = dataset_size(config.infer.file_list_path)
    steps = int(infer_size /
                config.infer.batch_size) if int(infer_size /
                                                config.infer.batch_size) else 1
    ii = 0
    for _ in range(steps):

        vals, batch_filenames = sess.run([d_predictions, filenames])
        #print(batch_filenames)
        pred = decode_beams(vals, config.r_vocab)

        for i, filename in enumerate(batch_filenames):
            filename = filename.decode('utf-8')

            img = cv2.imread(filename)
            size = cv2.getTextSize(pred[i], cv2.FONT_HERSHEY_SIMPLEX, 0.55, 2)
            text_width = size[0][0]
            text_height = size[0][1]

            img_he, img_wi, _ = img.shape
            img = cv2.copyMakeBorder(img,
                                     0,
                                     text_height + 10,
                                     0,
                                     0 if text_width < img_wi else text_width -
                                     img_wi,
                                     cv2.BORDER_CONSTANT,
                                     value=(255, 255, 255))
            cv2.putText(img, pred[i], (0, img_he + text_height + 5),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 0, 0), 2)

            #cv2.imshow('License Plate', img)
            ii = ii + 1
            nameImg = 'result_' + str(ii) + '.png'
            savePath = "results"
            pathImg = savePath + "/" + nameImg
            print("Result saved at: {}".format(pathImg))

            cv2.imwrite(os.path.join(savePath, nameImg), img)
            key = cv2.waitKey(1)
            if key == 27:
                break

    coord.request_stop()
    coord.join(threads)
    sess.close()