def test_image_no_detection(self): image = load_image(IMAGE_NO_DETECTION_PATH) detected_image, classes = detect(self.tf_sess, image) self.assertEqual((375, 500, 3), detected_image.shape) self.assertListEqual([], sorted(classes))
def test_image_4k_detection(self): image = load_image(IMAGE_4K_PATH) detected_image, classes = detect(self.tf_sess, image) self.assertEqual((2560, 3840, 3), detected_image.shape) self.assertListEqual(['car', 'person'], sorted(classes))
def test_image_fullhd_detection(self): image = load_image(IMAGE_FULL_HD_PATH) detected_image, classes = detect(self.tf_sess, image) self.assertEqual((1280, 1920, 3), detected_image.shape) self.assertListEqual(['car', 'person'], sorted(classes))
def process_file_object(mc: Minio, tf_sess: tf.Session, bucket_name: str, key: str, input_prefix: str, output_prefix: str): file_name = os.path.basename(key) print('Processing file: {}'.format(key)) try: tmp_input_file_path = os.path.join('/tmp', file_name) tmp_output_file_path = os.path.join('/tmp', 'output_' + file_name) print('Downloading file: {}'.format(key)) try: ret = mc.fget_object(bucket_name, key, tmp_input_file_path) except NoSuchKey: print('File not found, ignoring') return print('Loading image into memory') try: image_np = load_image(tmp_input_file_path) finally: os.remove(tmp_input_file_path) print('Detecting objects in file: {}'.format(tmp_input_file_path)) image_np, classes = detect(tf_sess, image_np) print('Detected classes: {}'.format(','.join(classes))) metadata = { **(ret.metadata or {}), "x-amz-meta-classes": ','.join(classes), } try: save_image(image_np, tmp_output_file_path) output_key = key.replace(input_prefix, output_prefix, 1) print('Uploading file to: {}'.format(output_key)) mc.fput_object(bucket_name, output_key, tmp_output_file_path, metadata=metadata) finally: os.remove(tmp_output_file_path) finally: mc.remove_object(bucket_name, key) print('Finished processing file')
def find_dates(tokens, postdate): postdate = postdate.astimezone(get_time_zone()) all_dates = detect(tokens, postdate, TOLERANCE, tag_date, valid_state, parse_date) return process_dates(all_dates, tokens, postdate)
'--input_channel', type=int, default=1, help='the number of input channel of Feature extractor') parser.add_argument( '--output_channel', type=int, default=512, help='the number of output channel of Feature extractor') parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state') opt = parser.parse_args() images = detection.detect("test/milo.jpeg", "model/craft_mlt_25k.pth") """ vocab / character number configuration """ if opt.sensitive: opt.character = string.printable[: -6] # same with ASTER setting (use 94 char). cudnn.benchmark = True cudnn.deterministic = True opt.num_gpu = torch.cuda.device_count() labels = ["indomi", "milo"] recognition.demo(opt, 'model/TPS-ResNet-BiLSTM-Attn.pth', images, labels) print("elapsed time : {}s".format(time.time() - t))