Beispiel #1
0
def _build_crnn_model(crnn_config,
                      detection_model,
                      is_training,
                      add_summaries=True):
    json_path = crnn_config.json_dir  # placeholder for actual values
    dict_params = import_params_from_json(json_filename=json_path)
    parameters = Params(**dict_params)
    crnn_target_assigner = target_assigner.create_target_assigner(
        'CRNN',
        'transcription',
        use_matmul_gather=False,
        iou_threshold=crnn_config.assigner_iou_threshold)
    return CRNN(parameters, detection_model, crnn_target_assigner)
Beispiel #2
0
def main(unused_argv):
    # 输出路径不存在就创建
    if not os.path.exists(FLAGS.output_model_dir):
        os.makedirs(FLAGS.output_model_dir)

    if FLAGS.params_file:
        dict_params = import_params_from_json(json_filename=FLAGS.params_file)
        parameters = Params(**dict_params)
    else:
        parameters = Params(
            train_batch_size=128,
            eval_batch_size=59,
            learning_rate=0.001,  # 1e-3 recommended
            learning_decay_rate=0.5,
            learning_decay_steps=23438 * 2,
            evaluate_every_epoch=1,
            save_interval=5e3,
            input_shape=(32, 304),
            image_channels=3,
            optimizer='adam',
            digits_only=False,
            alphabet=Alphabet.CHINESECHAR_LETTERS_DIGITS_EXTENDED,
            alphabet_decoding='same',
            csv_delimiter=' ',
            csv_files_train=FLAGS.csv_files_train,
            csv_files_eval=FLAGS.csv_files_eval,
            output_model_dir=FLAGS.output_model_dir,
            n_epochs=FLAGS.nb_epochs,
            gpu=FLAGS.gpu)

    model_params = {
        'Params': parameters,
    }
    # 保存配置
    parameters.export_experiment_params()

    os.environ['CUDA_VISIBLE_DEVICES'] = parameters.gpu
    config_sess = tf.ConfigProto()
    config_sess.gpu_options.per_process_gpu_memory_fraction = 0.4

    # Count number of image filenames in csv
    n_samples = 0
    for file in parameters.csv_files_train:
        with open(file, mode='r', encoding='utf8') as csvfile:
            n_samples += len(csvfile.readlines())

    save_checkpoints_steps = int(
        np.ceil(n_samples / parameters.train_batch_size))
    keep_checkpoint_max = parameters.n_epochs
    print(n_samples, 'save_checkpoints_steps', save_checkpoints_steps,
          ' keep_checkpoint_max', keep_checkpoint_max)
    # Config estimator

    est_config = tf.estimator.RunConfig()
    est_config = est_config.replace(
        keep_checkpoint_max=keep_checkpoint_max,
        save_checkpoints_steps=save_checkpoints_steps,
        session_config=config_sess,
        save_summary_steps=100,
        model_dir=parameters.output_model_dir)

    estimator = tf.estimator.Estimator(model_fn=crnn_fn,
                                       params=model_params,
                                       model_dir=parameters.output_model_dir,
                                       config=est_config)
    try:
        tensors_to_log = {'train_accuracy': 'train_accuracy'}
        logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log,
                                                  every_n_iter=100)
        for e in range(0, parameters.n_epochs,
                       parameters.evaluate_every_epoch):
            estimator.train(input_fn=data_loader(
                csv_filename=parameters.csv_files_train,
                params=parameters,
                batch_size=parameters.train_batch_size,
                num_epochs=parameters.evaluate_every_epoch,
                data_augmentation=True,
                image_summaries=True),
                            hooks=[logging_hook])
            eval_results = estimator.evaluate(
                input_fn=data_loader(csv_filename=parameters.csv_files_eval,
                                     params=parameters,
                                     batch_size=parameters.eval_batch_size,
                                     num_epochs=1),
                steps=np.floor(n_samples / parameters.eval_batch_size),
            )
            print('Evaluation results: %s' % (str(eval_results)))
        # for tensorflow1.4
        # estimator.train(input_fn=input_fn(filename=parameters.csv_files_train,
        #                                   is_training=True
        #                                   params=parameters,
        #                                   batch_size=parameters.train_batch_size,
        #                                   num_epochs=parameters.n_epochs),
        #                 hooks=[logging_hook])
    except KeyboardInterrupt:
        print('Interrupted')
        estimator.export_savedmodel(
            os.path.join(parameters.output_model_dir, 'export'),
            preprocess_image_for_prediction(min_width=10))
        print('Exported model to {}'.format(
            os.path.join(parameters.output_model_dir, 'export')))

    estimator.export_savedmodel(
        os.path.join(parameters.output_model_dir, 'export'),
        preprocess_image_for_prediction(min_width=10))
    print('Exported model to {}'.format(
        os.path.join(parameters.output_model_dir, 'export')))
Beispiel #3
0
                        help='Output directory (for exported model)',
                        default='./exported_model')
    parser.add_argument('-g',
                        '--gpu',
                        type=str,
                        help='GPU 1, 0 or '
                        ' for CPU',
                        default='')
    args = vars(parser.parse_args())

    os.environ['CUDA_VISIBLE_DEVICES'] = args.get('gpu')
    config_sess = tf.ConfigProto()
    config_sess.gpu_options.per_process_gpu_memory_fraction = 0.6

    # Import params from the json file
    params_json = import_params_from_json(args.get('model_dir'))
    params = Params(**params_json)
    print(params)

    # Config
    est_config = tf.estimator.RunConfig()
    est_config.replace(keep_checkpoint_max=10,
                       save_checkpoints_steps=params.save_interval,
                       session_config=config_sess,
                       save_checkpoints_secs=None,
                       save_summary_steps=1000)

    model_params = {
        'Params': params,
    }
                        default=30,
                        help='Number of epochs')
    parser.add_argument('-g',
                        '--gpu',
                        type=str,
                        help="GPU 0,1 or '' ",
                        default='')
    parser.add_argument('-p',
                        '--params-file',
                        type=str,
                        help='Parameters filename',
                        default=None)
    args = vars(parser.parse_args())

    if args.get('params_file'):
        dict_params = import_params_from_json(
            json_filename=args.get('params_file'))
        parameters = Params(**dict_params)
    else:
        parameters = Params(
            train_batch_size=128,
            eval_batch_size=128,
            learning_rate=1e-3,  # 1e-3 recommended
            learning_decay_rate=0.95,
            learning_decay_steps=5000,
            evaluate_every_epoch=5,
            save_interval=5e3,
            input_shape=(32, 450),
            optimizer='adam',
            digits_only=False,
            alphabet=dict_as_str(),
            alphabet_decoding='same',