def run_test_tsv(times=1): cases = parse_tsv_key(tsv_path) srv = InferenceServer(const.app_name, const.cfg) run_test_empty_image(srv) run_test_bad_image(srv) # run_test_bad_arg(srv) for _ in range(times): shuffle(cases) for index, case in enumerate(cases): start = time.time() log.info('run case {}/{} image:{}'.format(index + 1, len(cases), case[0])) request = pb.InferenceRequest(data=pb.InferenceRequest.RequestData( body=read_test_image_with_cache(tsv_prefix + case[0])), params=json.dumps({'limit': 3})) response = srv.net_inference_wrap(request) assert isinstance(response, pb.InferenceResponse) assert response.result assert response.code == 200 result = json.loads(response.result) confidences = result['confidences'] confidences.sort(key=lambda x: x['index']) actual = json.loads(json.dumps(confidences)) expected = json.loads(case[1]) assert DeepDiff(expected, actual, significant_digits=3) == {} log.info('use time {}'.format(time.time() - start))
def run_test_bad_image(srv): request = pb.InferenceRequest( data=pb.InferenceRequest.RequestData(body=b'xxxx'), params=json.dumps({'limit': 3})) response = srv.net_inference_wrap(request) assert response.code == 400 assert response.message == 'cv2 load image from body failed'
def serve(self): network_in_context = zmq.Context() network_in = network_in_context.socket(zmq.REP) network_in.connect(const.INFERENCE_ZMQ_IN) self.monitor_push.send( pb.MonitorMetric( kind="inference_started_success", pid=str(self.pid), ).SerializeToString()) while True: buf = network_in.recv() request = pb.InferenceRequest() request.ParseFromString(buf) response = self.net_inference_wrap(request) network_in.send(response.SerializeToString())
def run_test_empty_image(srv): request = pb.InferenceRequest( data=pb.InferenceRequest.RequestData(body=b'')) response = srv.net_inference_wrap(request) assert response.code == 400 assert response.message == 'cv2 load image from body failed'