def _build_crnn_model(crnn_config, detection_model, is_training, add_summaries=True): json_path = crnn_config.json_dir # placeholder for actual values dict_params = import_params_from_json(json_filename=json_path) parameters = Params(**dict_params) crnn_target_assigner = target_assigner.create_target_assigner( 'CRNN', 'transcription', use_matmul_gather=False, iou_threshold=crnn_config.assigner_iou_threshold) return CRNN(parameters, detection_model, crnn_target_assigner)
def convert_cpkt(checkpoint_path, model_output): ''' cpkt文件转换为pb文件 :param checkpoint_path: cpkt路径 :param model_output: pb文件保存位置 :return: None ''' # 输入路径不存在就报错 if not os.path.exists(checkpoint_path): assert FileNotFoundError # 输出路径不存在就创建 if not os.path.exists(model_output): print(model_output, 'not exist') os.mkdir(model_output) parameters = Params( digits_only=False, alphabet=Alphabet.CHINESECHAR_LETTERS_DIGITS_EXTENDED, output_model_dir=model_output, image_channels=3, ) model_params = { 'Params': parameters, } # Config estimator est_config = tf.estimator.RunConfig() est_config = est_config.replace(model_dir=parameters.output_model_dir) estimator = tf.estimator.Estimator( model_fn=crnn_fn, params=model_params, config=est_config, model_dir=parameters.output_model_dir, ) try: estimator.export_savedmodel( os.path.join(model_output, 'export'), preprocess_image_for_prediction( min_width=10, image_channels=parameters.image_channels), checkpoint_path=checkpoint_path) print('Exported model to {}'.format( os.path.join(model_output, 'export'))) except KeyboardInterrupt: print('Interrupted')
dataset = dataset.repeat(num_epochs) # Map example_parser over dataset, and batch results by up to batch_size dataset = dataset.map(example_parser).prefetch(batch_size) dataset = dataset.batch(batch_size) iterator = dataset.make_one_shot_iterator() images, labels = iterator.get_next() return images, labels if __name__ == '__main__': parameters = Params(eval_batch_size=128, input_shape=(32, 304), digits_only=False, alphabet=Alphabet.CHINESECHAR_LETTERS_DIGITS_EXTENDED, alphabet_decoding='same', image_channels=3) next_batch = input_fn(filename='/media/zhoujun/文件/val1.tfrecords', is_training=False, params=parameters, batch_size=2) # Now let's try it out, retrieving and printing one batch of data. # Although this code looks strange, you don't need to understand # the details. with tf.Session() as sess: first_batch = sess.run(next_batch) print(first_batch['images'])
def main(unused_argv): # 输出路径不存在就创建 if not os.path.exists(FLAGS.output_model_dir): os.makedirs(FLAGS.output_model_dir) if FLAGS.params_file: dict_params = import_params_from_json(json_filename=FLAGS.params_file) parameters = Params(**dict_params) else: parameters = Params( train_batch_size=128, eval_batch_size=59, learning_rate=0.001, # 1e-3 recommended learning_decay_rate=0.5, learning_decay_steps=23438 * 2, evaluate_every_epoch=1, save_interval=5e3, input_shape=(32, 304), image_channels=3, optimizer='adam', digits_only=False, alphabet=Alphabet.CHINESECHAR_LETTERS_DIGITS_EXTENDED, alphabet_decoding='same', csv_delimiter=' ', csv_files_train=FLAGS.csv_files_train, csv_files_eval=FLAGS.csv_files_eval, output_model_dir=FLAGS.output_model_dir, n_epochs=FLAGS.nb_epochs, gpu=FLAGS.gpu) model_params = { 'Params': parameters, } # 保存配置 parameters.export_experiment_params() os.environ['CUDA_VISIBLE_DEVICES'] = parameters.gpu config_sess = tf.ConfigProto() config_sess.gpu_options.per_process_gpu_memory_fraction = 0.4 # Count number of image filenames in csv n_samples = 0 for file in parameters.csv_files_train: with open(file, mode='r', encoding='utf8') as csvfile: n_samples += len(csvfile.readlines()) save_checkpoints_steps = int( np.ceil(n_samples / parameters.train_batch_size)) keep_checkpoint_max = parameters.n_epochs print(n_samples, 'save_checkpoints_steps', save_checkpoints_steps, ' keep_checkpoint_max', keep_checkpoint_max) # Config estimator est_config = tf.estimator.RunConfig() est_config = est_config.replace( keep_checkpoint_max=keep_checkpoint_max, save_checkpoints_steps=save_checkpoints_steps, session_config=config_sess, save_summary_steps=100, model_dir=parameters.output_model_dir) estimator = tf.estimator.Estimator(model_fn=crnn_fn, params=model_params, model_dir=parameters.output_model_dir, config=est_config) try: tensors_to_log = {'train_accuracy': 'train_accuracy'} logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=100) for e in range(0, parameters.n_epochs, parameters.evaluate_every_epoch): estimator.train(input_fn=data_loader( csv_filename=parameters.csv_files_train, params=parameters, batch_size=parameters.train_batch_size, num_epochs=parameters.evaluate_every_epoch, data_augmentation=True, image_summaries=True), hooks=[logging_hook]) eval_results = estimator.evaluate( input_fn=data_loader(csv_filename=parameters.csv_files_eval, params=parameters, batch_size=parameters.eval_batch_size, num_epochs=1), steps=np.floor(n_samples / parameters.eval_batch_size), ) print('Evaluation results: %s' % (str(eval_results))) # for tensorflow1.4 # estimator.train(input_fn=input_fn(filename=parameters.csv_files_train, # is_training=True # params=parameters, # batch_size=parameters.train_batch_size, # num_epochs=parameters.n_epochs), # hooks=[logging_hook]) except KeyboardInterrupt: print('Interrupted') estimator.export_savedmodel( os.path.join(parameters.output_model_dir, 'export'), preprocess_image_for_prediction(min_width=10)) print('Exported model to {}'.format( os.path.join(parameters.output_model_dir, 'export'))) estimator.export_savedmodel( os.path.join(parameters.output_model_dir, 'export'), preprocess_image_for_prediction(min_width=10)) print('Exported model to {}'.format( os.path.join(parameters.output_model_dir, 'export')))
default='./exported_model') parser.add_argument('-g', '--gpu', type=str, help='GPU 1, 0 or ' ' for CPU', default='') args = vars(parser.parse_args()) os.environ['CUDA_VISIBLE_DEVICES'] = args.get('gpu') config_sess = tf.ConfigProto() config_sess.gpu_options.per_process_gpu_memory_fraction = 0.6 # Import params from the json file params_json = import_params_from_json(args.get('model_dir')) params = Params(**params_json) print(params) # Config est_config = tf.estimator.RunConfig() est_config.replace(keep_checkpoint_max=10, save_checkpoints_steps=params.save_interval, session_config=config_sess, save_checkpoints_secs=None, save_summary_steps=1000) model_params = { 'Params': params, } estimator = tf.estimator.Estimator(
parser.add_argument('-g', '--gpu', type=str, help="GPU 0,1 or '' ", default='') parser.add_argument('-p', '--params-file', type=str, help='Parameters filename', default=None) args = vars(parser.parse_args()) if args.get('params_file'): dict_params = import_params_from_json( json_filename=args.get('params_file')) parameters = Params(**dict_params) else: parameters = Params( train_batch_size=128, eval_batch_size=128, learning_rate=1e-3, # 1e-3 recommended learning_decay_rate=0.95, learning_decay_steps=5000, evaluate_every_epoch=5, save_interval=5e3, input_shape=(32, 450), optimizer='adam', digits_only=False, alphabet=dict_as_str(), alphabet_decoding='same', csv_delimiter='\t',
from src.config import Params, import_params_from_json if __name__ == '__main__': parser = argparse.ArgumentParser(description='Train the model according to the specified config in the JSON. ' 'The optional arguments override the ones from the file.') parser.add_argument('params_file', type=str, help='Parameters filename (JSON)') parser.add_argument('-o', '--output_model_dir', type=str, required=False, help='Directory for output') parser.add_argument('-n', '--nb_epochs', type=int, required=False, help='Number of epochs') parser.add_argument('-g', '--gpu', type=str, required=False, help="GPU 0,1 or '' ") args = vars(parser.parse_args()) args = dict(filter(lambda kv: kv[1] is not None, args.items())) dict_params = import_params_from_json(json_filename=args.get('params_file')) dict_params.update(args) parameters = Params(**dict_params) os.makedirs(parameters.output_model_dir, exist_ok=True) model_params = { 'Params': parameters, } # The parameters are saved in the output_dir to keep their most recent copy, # including the overriding command line arguments parameters.export_experiment_params() os.environ['CUDA_VISIBLE_DEVICES'] = parameters.gpu config_sess = tf.ConfigProto() config_sess.gpu_options.allow_growth = True
from src.data_handler import data_loader from src.data_handler import preprocess_image_for_prediction from src.read_dict import dict_as_str from src.config import Params, Alphabet, import_params_from_json import cv2 parameters = Params(train_batch_size=128, eval_batch_size=128, learning_rate=1e-3, # 1e-3 recommended learning_decay_rate=0.95, learning_decay_steps=5000, evaluate_every_epoch=5, save_interval=5e3, input_shape=(32, 304), optimizer='adam', digits_only=True, alphabet=dict_as_str(), alphabet_decoding='same', csv_delimiter='\t', csv_files_eval='/Users/samueltin/Projects/jb/crnn_tf_data/Test/sample.csv', csv_files_train='/Users/samueltin/Projects/jb/crnn_tf_data/Train/sample.csv', output_model_dir='./estimator/', n_epochs=1, gpu='' ) model_params = { 'Params': parameters, } parameters.export_experiment_params()