def test_classification_json(): task = Tasks.CLASSIFICATION image_size = (120, 160) classes = ("aaa", "bbb") params = { "task": task, "classes": classes, "image_size": image_size, "data_format": "NCHW", } batch_size = 2 inputs = np.random.uniform(size=[batch_size, len(classes)]) raw_images = np.zeros((batch_size, 320, 280, 3), dtype=np.uint8) image_files = ["dummy.png", "dumpy_2.pny"] call = JsonOutput(**params) json_output = call(inputs, raw_images, image_files) output = json.loads(json_output) assert output["classes"] == [{"id": i, "name": name} for i, name in enumerate(classes)] assert output["task"] == str(task.value) results = output["results"] assert [result["file_path"] for result in results] == image_files for i in range(batch_size): predictions = results[i]["prediction"] assert [prediction["probability"] for prediction in predictions] == inputs[i, :].astype(str).tolist()
def test_object_detection_json(): task = Tasks.OBJECT_DETECTION image_size = (120, 160) classes = ("aaa", "bbb") params = { "task": task, "classes": classes, "image_size": image_size, "data_format": "NCHW", } batch_size = 2 box_sizes = (3, 5) boxes_1 = np.concatenate([ np.random.randint(120, size=(box_sizes[0], 4)), np.random.randint(len(classes), size=(box_sizes[0], 1)), np.random.uniform(size=(box_sizes[0], 1)), ], axis=1) boxes_2 = np.concatenate([ np.random.randint(120, size=(box_sizes[1], 4)), np.random.randint(len(classes), size=(box_sizes[1], 1)), np.random.uniform(size=(box_sizes[1], 1)), ], axis=1) inputs = [boxes_1, boxes_2] raw_images = np.zeros((batch_size, 320, 280, 3), dtype=np.uint8) image_files = ["dummy.png", "dumpy_2.pny"] call = JsonOutput(**params) json_output = call(inputs, raw_images, image_files) output = json.loads(json_output) assert output["classes"] == [{"id": i, "name": name} for i, name in enumerate(classes)] assert output["task"] == str(task.value) results = output["results"] assert [result["file_path"] for result in results] == image_files for i in range(batch_size): predictions = results[i]["prediction"] assert [prediction["score"] for prediction in predictions] == inputs[i][:, 5].astype(str).tolist() assert [prediction["class"]["id"] for prediction in predictions] == inputs[i][:, 4].astype(int).tolist() resized_boxes = np.stack([ inputs[i][:, 0] * 280 / image_size[1], inputs[i][:, 1] * 320 / image_size[0], inputs[i][:, 2] * 280 / image_size[1], inputs[i][:, 3] * 320 / image_size[0], ], axis=1) assert np.allclose([prediction["box"] for prediction in predictions], resized_boxes)
def test_semantic_segmentation_json(): task = Tasks.SEMANTIC_SEGMENTATION image_size = (120, 160) classes = ("aaa", "bbb") params = { "task": task, "classes": classes, "image_size": image_size, "data_format": "NCHW", } batch_size = 2 predict = np.random.uniform(size=(batch_size, len(classes), image_size[0], image_size[1])) raw_images = np.zeros((batch_size, 320, 280, 3), dtype=np.uint8) image_files = ["dummy.png", "dumpy_2.pny"] call = JsonOutput(**params) json_output = call(predict, raw_images, image_files) output = json.loads(json_output) assert output["classes"] == [{"id": i, "name": name} for i, name in enumerate(classes)] assert output["task"] == str(task.value) results = output["results"] assert [result["file_path"] for result in results] == image_files for i in range(batch_size): predictions = results[i]["prediction"] for class_id in range(len(classes)): mask = predictions[i]["mask"] mask_data = base64.b64decode(mask) mask_pil_image = PIL.Image.open(BytesIO(mask_data)) mask_image = np.array(mask_pil_image) assert mask_image.shape == (320, 280)
def run_prediction(input_image, model, config_file, trial=1): if not input_image or not model or not config_file: logger.error('Please check usage with --help option') exit(1) config = load_yaml(config_file) # load the image image_data = load_image(input_image) raw_image = image_data # initialize Network nn = _init(model, config) # pre process for image image_data, bench_pre = _timerfunc( _pre_process, (image_data, config.PRE_PROCESSOR, config.DATA_FORMAT), trial) # add the batch dimension image_data = np.expand_dims(image_data, axis=0) # run the model to inference output, bench_inference = _timerfunc(_run, (nn, image_data), trial) logger.info('Output: (before post process)\n{}'.format(output)) # pre process for output output, bench_post = _timerfunc(_post_process, (output, config.POST_PROCESSOR), trial) logger.info('Output: (after post process)\n{}'.format(output)) # json output json_output = JsonOutput( task=Tasks(config.TASK), classes=config.CLASSES, image_size=config.IMAGE_SIZE, data_format=config.DATA_FORMAT, bench={ "total": bench_pre + bench_post + bench_inference, "pre": bench_pre, "post": bench_post, "inference": bench_inference, }, ) image_from_json = ImageFromJson( task=Tasks(config.TASK), classes=config.CLASSES, image_size=config.IMAGE_SIZE, ) output_dir = "output" outputs = output raw_images = [raw_image] image_files = [input_image] json_obj = json_output(outputs, raw_images, image_files) _save_json(output_dir, json_obj) filename_images = image_from_json(json_obj, raw_images, image_files) _save_images(output_dir, filename_images) logger.info( "Benchmark avg result(sec) for {} trials: pre_process: {} inference: {} post_process: {} Total: {}" .format( trial, bench_pre, bench_inference, bench_post, bench_pre + bench_post + bench_inference, ))
def __init__(self, task, classes, image_size, data_format): self.json_output = JsonOutput(task, classes, image_size, data_format) self.image_from_json = ImageFromJson(task, classes, image_size)
def run_prediction(input_image, model, config_file, trial=1): if not input_image or not model or not config_file: logger.error('Please check usage with --help option') exit(1) config = load_yaml(config_file) # load the image image_data = load_image(input_image) raw_image = image_data # initialize Network nn = _init(model, config) pre_process = build_pre_process(config.PRE_PROCESSOR) post_process = build_post_process(config.POST_PROCESSOR) # call functions once to exclude the first result which include some initializations init_output = _pre_process(image_data, pre_process, config.DATA_FORMAT) init_output = _run(nn, init_output) init_output = _post_process(init_output, post_process) results_total = [] results_pre = [] results_run = [] results_post = [] for _ in range(trial): # pre process for image output, bench_pre = _timerfunc( _pre_process, (image_data, pre_process, config.DATA_FORMAT)) # run the model to inference output, bench_run = _timerfunc(_run, (nn, output)) # pre process for output output, bench_post = _timerfunc(_post_process, (output, post_process)) results_total.append(bench_pre + bench_run + bench_post) results_pre.append(bench_pre) results_run.append(bench_run) results_post.append(bench_post) time_stat = { "total": { "mean": np.mean(results_total), "std": np.std(results_total) }, "pre": { "mean": np.mean(results_pre), "std": np.std(results_pre) }, "post": { "mean": np.mean(results_post), "std": np.std(results_post) }, "run": { "mean": np.mean(results_run), "std": np.std(results_run) }, } # json output json_output = JsonOutput( task=Tasks(config.TASK), classes=config.CLASSES, image_size=config.IMAGE_SIZE, data_format=config.DATA_FORMAT, bench=time_stat, ) image_from_json = ImageFromJson( task=Tasks(config.TASK), classes=config.CLASSES, image_size=config.IMAGE_SIZE, ) output_dir = "output" outputs = output raw_images = [raw_image] image_files = [input_image] json_obj = json_output(outputs, raw_images, image_files) _save_json(output_dir, json_obj) filename_images = image_from_json(json_obj, raw_images, image_files) _save_images(output_dir, filename_images) logger.info("Benchmark avg result(sec) for {} trials".format(trial)) logger.info(time_stat)