dt = hf['test']['dt'][:256] gt = hf['test']['gt'][:256] predict_file = os.path.join(output_path, "predict.txt") predicts = [''] * len(dt) if os.path.isfile(predict_file): with open(predict_file, "r") as lg: predicts = [line[5:] for line in lg if line.startswith("TE_P")] for x in range(len(dt)): print(f"Image shape:\t{dt[x].shape}") print(f"Ground truth:\t{gt[x].decode()}") print(f"Predict:\t{predicts[x]}\n") cv2.imshow("img", pp.adjust_to_see(dt[x])) cv2.waitKey(0) elif args.image: tokenizer = Tokenizer(chars=charset_base, max_text_length=max_text_length) img = pp.preproc(args.image, input_size=input_size) x_test = pp.normalization([img]) model = HTRModel(architecture=args.arch, input_size=input_size, vocab_size=tokenizer.vocab_size, top_paths=10) model.compile()
compression="gzip", compression_opts=9) print(f"[OK] {i} partition.") print(f"Transformation finished.") elif args.cv2: with h5py.File(hdf5_src, "r") as hf: dt = hf["test"]["dt"][:] gt = hf["test"]["gt"][:] for x in range(len(dt)): print(f"Image shape: {dt[x].shape}") print(f"Ground truth: {gt[x].decode()}\n") cv2.imshow("img", pp.adjust_to_see(dt[x])) cv2.waitKey(0) elif args.train or args.test: os.makedirs(output_path, exist_ok=True) dtgen = DataGenerator(hdf5_src=hdf5_src, batch_size=args.batch_size, charset=charset_base, max_text_length=max_text_length) network_func = getattr(architecture, args.arch) ioo = network_func(input_size=input_size, output_size=(dtgen.tokenizer.vocab_size + 1), learning_rate=0.001)