def init_args() -> Tuple[argparse.Namespace, EasyDict]: """ :return: parsed arguments and (updated) config.cfg object """ parser = argparse.ArgumentParser() parser.add_argument('-d', '--dataset_dir', type=str, help='Directory containing train_features.tfrecords') parser.add_argument('-c', '--chardict_dir', type=str, help='Directory where character dictionaries for the dataset were stored') parser.add_argument('-m', '--model_dir', type=str, help='Directory where to store model checkpoints') parser.add_argument('-t', '--tboard_dir', type=str, help='Directory where to store TensorBoard logs') parser.add_argument('-f', '--config_file', type=str, help='Use this global configuration file') parser.add_argument('-e', '--decode_outputs', action='store_true', default=False, help='Activate decoding of predictions during training (slow!)') parser.add_argument('-w', '--weights_path', type=str, help='Path to pre-trained weights to continue training') parser.add_argument('-j', '--num_threads', type=int, default=int(os.cpu_count()/2), help='Number of threads to use in batch shuffling') args = parser.parse_args() config = load_config(args.config_file) if args.dataset_dir: config.cfg.PATH.TFRECORDS_DIR = args.dataset_dir if args.chardict_dir: config.cfg.PATH.CHAR_DICT_DIR = args.chardict_dir if args.model_dir: config.cfg.PATH.MODEL_SAVE_DIR = args.model_dir if args.tboard_dir: config.cfg.PATH.TBOARD_SAVE_DIR = args.tboard_dir return args, config.cfg
def init_args() -> Tuple[argparse.Namespace, EasyDict]: """ :return: parsed arguments and (updated) config.cfg object """ parser = argparse.ArgumentParser() parser.add_argument('-d', '--dataset_dir', type=str, help='Directory containing test_features.tfrecords') parser.add_argument('-c', '--chardict_dir', type=str, help='Directory where character dictionaries for the dataset were stored') parser.add_argument('-w', '--weights_path', type=str, required=True, help='Path to pre-trained weights') parser.add_argument('-n', '--num_classes', type=int, required=True, help='Force number of character classes to this number. ' 'Use 37 to run with the demo data. ' 'Set to 0 for auto (read from files in charset_dir)') parser.add_argument('-f', '--config_file', type=str, help='Use this global configuration file') parser.add_argument('-v', '--visualize', type=bool, default=False, help='Whether to display images') parser.add_argument('-b', '--one_batch', default=False, action='store_true', help='Test only one batch of the dataset') parser.add_argument('-j', '--num_threads', type=int, default=int(os.cpu_count() / 2), help='Number of threads to use in batch shuffling') args = parser.parse_args() config = load_config(args.config_file) if args.dataset_dir: config.cfg.PATH.TFRECORDS_DIR = args.dataset_dir if args.chardict_dir: config.cfg.PATH.CHAR_DICT_DIR = args.chardict_dir return args, config.cfg
def init_args() -> Tuple[argparse.Namespace, EasyDict]: """ :return: parsed arguments and (updated) config.cfg object """ parser = argparse.ArgumentParser() parser.add_argument('--image_path', type=str, help='Path to the image to be tested', default='data/test_images/test_01.jpg') parser.add_argument( '--weights_path', type=str, help='Path to the pre-trained weights to use', default='model/shadownet/shadownet_2017-09-29-19-16-33.ckpt-39999') parser.add_argument('-f', '--config_file', type=str, help='Use this global configuration file') parser.add_argument( '-c', '--chardict_dir', type=str, help= 'Directory where character dictionaries for the dataset were stored') parser.add_argument( '-n', '--num_classes', type=int, default=37, help='Force number of character classes to this number. ' 'Set to 0 for auto (read from charset_dir)') args = parser.parse_args() config = load_config(args.config_file) if args.chardict_dir: config.cfg.PATH.CHAR_DICT_DIR = args.chardict_dir return args, config.cfg
ord_map_dict_path = os.path.join(charset_dir, "ord_map.json") else: char_dict_path = os.path.join("data/char_dict", "char_dict.json") ord_map_dict_path = os.path.join("data/char_dict", "ord_map.json") feature_io = TextFeatureIO(char_dict_path, ord_map_dict_path) feature_io.writer.write_features(tfrecords_path=tfrecord_path, labels=labels, images=images, imagenames=imagenames) if __name__ == '__main__': args = init_args() config = load_config(args.config_file) if not ops.exists(args.dataset_dir): raise ValueError('Dataset {:s} doesn\'t exist'.format( args.dataset_dir)) os.makedirs(args.save_dir, exist_ok=True) print('Initializing the dataset provider...') provider = data_provider.TextDataProvider( dataset_dir=args.dataset_dir, annotation_name=args.annotation_file, validation_set=args.validation_split > 0, validation_split=args.validation_split, shuffle='every_epoch',