def infer_zero(tester, pf, batch_size, tensor_dtype, input_shapes, output_shapes, model_version=None, use_http=True, use_grpc=True, use_http_json_tensors=True, use_streaming=True, shm_region_name_prefix=None, use_system_shared_memory=False, use_cuda_shared_memory=False, priority=0, timeout_us=0): tester.assertTrue( use_http or use_grpc or use_http_json_tensors or use_streaming) configs = [] if use_http: configs.append(("localhost:8000", "http", False, True)) if use_http_json_tensors and (tensor_dtype != np.float16): configs.append(("localhost:8000", "http", False, False)) if use_grpc: configs.append(("localhost:8001", "grpc", False, False)) if use_streaming: configs.append(("localhost:8001", "grpc", True, False)) tester.assertEqual(len(input_shapes), len(output_shapes)) io_cnt = len(input_shapes) if shm_region_name_prefix is None: shm_region_name_prefix = ["input", "output"] input_dict = {} expected_dict = {} shm_ip_handles = list() shm_op_handles = list() for io_num in range(io_cnt): if pf == "libtorch" or pf == "libtorch_nobatch": input_name = "INPUT__{}".format(io_num) output_name = "OUTPUT__{}".format(io_num) else: input_name = "INPUT{}".format(io_num) output_name = "OUTPUT{}".format(io_num) input_shape = input_shapes[io_num] output_shape = output_shapes[io_num] rtensor_dtype = _range_repr_dtype(tensor_dtype) if (rtensor_dtype != np.bool): input_array = np.random.randint(low=np.iinfo(rtensor_dtype).min, high=np.iinfo(rtensor_dtype).max, size=input_shape, dtype=rtensor_dtype) else: input_array = np.random.choice(a=[False, True], size=input_shape) if tensor_dtype != np.object: input_array = input_array.astype(tensor_dtype) expected_array = np.ndarray.copy(input_array) else: expected_array = np.array([unicode(str(x), encoding='utf-8') for x in input_array.flatten()], dtype=object) input_array = np.array([str(x) for x in input_array.flatten()], dtype=object).reshape(input_array.shape) expected_array = expected_array.reshape(output_shape) expected_dict[output_name] = expected_array output_byte_size = expected_array.nbytes if batch_size == 1: input_list = [input_array] else: input_list = [x for x in input_array] # Serialization of string tensors in the case of shared memory must be done manually if tensor_dtype == np.object: input_list_tmp = serialize_byte_tensor_list(input_list) else: input_list_tmp = input_list input_byte_size = sum([ip.nbytes for ip in input_list_tmp]) # create and register shared memory region for inputs and outputs shm_io_handles = su.create_set_either_shm_region([shm_region_name_prefix[0]+str(io_num), shm_region_name_prefix[1]+str(io_num)], input_list_tmp, input_byte_size, output_byte_size, use_system_shared_memory, use_cuda_shared_memory) if len(shm_io_handles) != 0: shm_ip_handles.append(shm_io_handles[0]) shm_op_handles.append(shm_io_handles[1]) input_dict[input_name] = input_array if model_version is not None: model_version = str(model_version) else: model_version = "" # Run inference and check results for each config for config in configs: model_name = tu.get_zero_model_name(pf, io_cnt, tensor_dtype) if config[1] == "http": triton_client = httpclient.InferenceServerClient( config[0], verbose=True) else: triton_client = grpcclient.InferenceServerClient( config[0], verbose=True) inputs = [] output_req = [] for io_num, (input_name, output_name) in enumerate(zip(input_dict.keys(), expected_dict.keys())): input_data = input_dict[input_name] input_byte_size = input_data.nbytes output_byte_size = expected_dict[output_name].nbytes if config[1] == "http": inputs.append(httpclient.InferInput( input_name, input_data.shape, np_to_triton_dtype(tensor_dtype))) output_req.append(httpclient.InferRequestedOutput( output_name, binary_data=config[3])) else: inputs.append(grpcclient.InferInput( input_name, input_data.shape, np_to_triton_dtype(tensor_dtype))) output_req.append( grpcclient.InferRequestedOutput(output_name)) if not (use_cuda_shared_memory or use_system_shared_memory): if config[1] == "http": inputs[-1].set_data_from_numpy(input_data, binary_data=config[3]) else: inputs[-1].set_data_from_numpy(input_data) else: # Register necessary shared memory regions/handles su.register_add_either_shm_regions(inputs, output_req, shm_region_name_prefix, (shm_ip_handles, shm_op_handles), io_num, input_byte_size, output_byte_size, use_system_shared_memory, use_cuda_shared_memory, triton_client) if config[2]: user_data = UserData() triton_client.start_stream(partial(completion_callback, user_data)) try: results = triton_client.async_stream_infer(model_name, inputs, model_version=model_version, outputs=output_req, request_id=str(_unique_request_id()), priority=priority, timeout=timeout_us) except Exception as e: triton_client.stop_stream() raise e triton_client.stop_stream() (results, error) = user_data._completed_requests.get() if error is not None: raise error else: results = triton_client.infer(model_name, inputs, model_version=model_version, outputs=output_req, request_id=str(_unique_request_id()), priority=priority, timeout=timeout_us) last_response = results.get_response() if config[1] == "http": response_model_name = last_response["model_name"] if model_version != "": response_model_version = last_response["model_version"] response_outputs = last_response["outputs"] else: response_model_name = last_response.model_name if model_version != "": response_model_version = last_response.model_version response_outputs = last_response.outputs tester.assertEqual(response_model_name, model_name) if model_version != "": tester.assertEqual(response_model_version, model_version) tester.assertEqual(len(response_outputs), io_cnt) for result in response_outputs: if config[1] == "http": result_name = result["name"] else: result_name = result.name tester.assertTrue(result_name in expected_dict) if use_system_shared_memory or use_cuda_shared_memory: if pf == "libtorch" or pf == "libtorch_nobatch": io_num = int(result_name.split("OUTPUT__")[1]) else: io_num = int(result_name.split("OUTPUT")[1]) shm_handle = shm_op_handles[io_num] output = results.get_output(result_name) if config[1] == "http": output_datatype = output['datatype'] output_shape = output['shape'] else: output_datatype = output.datatype output_shape = output.shape output_dtype = triton_to_np_dtype(output_datatype) if use_system_shared_memory: output_data = shm.get_contents_as_numpy( shm_handle, output_dtype, output_shape) elif use_cuda_shared_memory: output_data = cudashm.get_contents_as_numpy( shm_handle, output_dtype, output_shape) else: output_data = results.as_numpy(result_name) if (output_data.dtype == np.object) and (config[3] == False): output_data = output_data.astype(np.bytes_) expected = expected_dict[result_name] tester.assertEqual(output_data.shape, expected.shape) tester.assertTrue(np.array_equal(output_data, expected), "{}, {}, expected: {}, got {}".format( model_name, result_name, expected, output_data)) if len(shm_ip_handles) != 0: for io_num in range(io_cnt): if use_cuda_shared_memory: triton_client.unregister_cuda_shared_memory( shm_region_name_prefix[0]+str(io_num)+'_data') triton_client.unregister_cuda_shared_memory( shm_region_name_prefix[0]+str(io_num)+'_data') cudashm.destroy_shared_memory_region(shm_ip_handles[io_num]) cudashm.destroy_shared_memory_region(shm_op_handles[io_num]) else: triton_client.unregister_system_shared_memory( shm_region_name_prefix[1]+str(io_num)+'_data') triton_client.unregister_system_shared_memory( shm_region_name_prefix[1]+str(io_num)+'_data') shm.destroy_shared_memory_region(shm_ip_handles[io_num]) shm.destroy_shared_memory_region(shm_op_handles[io_num]) return results
def infer_exact(tester, pf, tensor_shape, batch_size, input_dtype, output0_dtype, output1_dtype, output0_raw=True, output1_raw=True, model_version=None, swap=False, outputs=("OUTPUT0", "OUTPUT1"), use_http=True, use_grpc=True, use_http_json_tensors=True, skip_request_id_check=False, use_streaming=True, correlation_id=0, shm_region_names=None, precreated_shm_regions=None, use_system_shared_memory=False, use_cuda_shared_memory=False, priority=0, timeout_us=0): tester.assertTrue( use_http or use_http_json_tensors or use_grpc or use_streaming) configs = [] if use_http: configs.append(("localhost:8000", "http", False, True)) if output0_raw == output1_raw: # Float16 not supported for Input and Output via JSON if use_http_json_tensors and (input_dtype != np.float16) and \ (output0_dtype != np.float16) and (output1_dtype != np.float16): configs.append(("localhost:8000", "http", False, False)) if use_grpc: configs.append(("localhost:8001", "grpc", False, False)) if use_streaming: configs.append(("localhost:8001", "grpc", True, False)) # outputs are sum and difference of inputs so set max input # values so that they will not overflow the output. This # allows us to do an exact match. For float types use 8, 16, # 32 int range for fp 16, 32, 64 respectively. When getting # class outputs the result value/probability is returned as a # float so must use fp32 range in that case. rinput_dtype = _range_repr_dtype(input_dtype) routput0_dtype = _range_repr_dtype( output0_dtype if output0_raw else np.float32) routput1_dtype = _range_repr_dtype( output1_dtype if output1_raw else np.float32) val_min = max(np.iinfo(rinput_dtype).min, np.iinfo(routput0_dtype).min, np.iinfo(routput1_dtype).min) / 2 val_max = min(np.iinfo(rinput_dtype).max, np.iinfo(routput0_dtype).max, np.iinfo(routput1_dtype).max) / 2 num_classes = 3 input0_array = np.random.randint(low=val_min, high=val_max, size=tensor_shape, dtype=rinput_dtype) input1_array = np.random.randint(low=val_min, high=val_max, size=tensor_shape, dtype=rinput_dtype) if input_dtype != np.object: input0_array = input0_array.astype(input_dtype) input1_array = input1_array.astype(input_dtype) if not swap: output0_array = input0_array + input1_array output1_array = input0_array - input1_array else: output0_array = input0_array - input1_array output1_array = input0_array + input1_array if output0_dtype == np.object: output0_array = np.array([unicode(str(x), encoding='utf-8') for x in (output0_array.flatten())], dtype=object).reshape(output0_array.shape) else: output0_array = output0_array.astype(output0_dtype) if output1_dtype == np.object: output1_array = np.array([unicode(str(x), encoding='utf-8') for x in (output1_array.flatten())], dtype=object).reshape(output1_array.shape) else: output1_array = output1_array.astype(output1_dtype) if input_dtype == np.object: in0n = np.array([str(x) for x in input0_array.reshape(input0_array.size)], dtype=object) input0_array = in0n.reshape(input0_array.shape) in1n = np.array([str(x) for x in input1_array.reshape(input1_array.size)], dtype=object) input1_array = in1n.reshape(input1_array.shape) # prepend size of string to output string data if output0_dtype == np.object: if batch_size == 1: output0_array_tmp = serialize_byte_tensor_list([output0_array]) else: output0_array_tmp = serialize_byte_tensor_list(output0_array) else: output0_array_tmp = output0_array if output1_dtype == np.object: if batch_size == 1: output1_array_tmp = serialize_byte_tensor_list([output1_array]) else: output1_array_tmp = serialize_byte_tensor_list(output1_array) else: output1_array_tmp = output1_array OUTPUT0 = "OUTPUT0" OUTPUT1 = "OUTPUT1" INPUT0 = "INPUT0" INPUT1 = "INPUT1" if pf == "libtorch" or pf == "libtorch_nobatch": OUTPUT0 = "OUTPUT__0" OUTPUT1 = "OUTPUT__1" INPUT0 = "INPUT__0" INPUT1 = "INPUT__1" output0_byte_size = sum([o0.nbytes for o0 in output0_array_tmp]) output1_byte_size = sum([o1.nbytes for o1 in output1_array_tmp]) if batch_size == 1: input0_list = [input0_array] input1_list = [input1_array] else: input0_list = [x for x in input0_array] input1_list = [x for x in input1_array] # Serialization of string tensors in the case of shared memory must be done manually if input_dtype == np.object: input0_list_tmp = serialize_byte_tensor_list(input0_list) input1_list_tmp = serialize_byte_tensor_list(input1_list) else: input0_list_tmp = input0_list input1_list_tmp = input1_list input0_byte_size = sum([i0.nbytes for i0 in input0_list_tmp]) input1_byte_size = sum([i1.nbytes for i1 in input1_list_tmp]) # Create system/cuda shared memory regions if needed shm_regions, shm_handles = su.create_set_shm_regions(input0_list_tmp, input1_list_tmp, output0_byte_size, output1_byte_size, outputs, shm_region_names, precreated_shm_regions, use_system_shared_memory, use_cuda_shared_memory) if model_version is not None: model_version = str(model_version) else: model_version = "" # Run inference and check results for each config for config in configs: model_name = tu.get_model_name( pf, input_dtype, output0_dtype, output1_dtype) if config[1] == "http": triton_client = httpclient.InferenceServerClient( config[0], verbose=True) else: triton_client = grpcclient.InferenceServerClient( config[0], verbose=True) inputs = [] if config[1] == "http": inputs.append(httpclient.InferInput( INPUT0, tensor_shape, np_to_triton_dtype(input_dtype))) inputs.append(httpclient.InferInput( INPUT1, tensor_shape, np_to_triton_dtype(input_dtype))) else: inputs.append(grpcclient.InferInput( INPUT0, tensor_shape, np_to_triton_dtype(input_dtype))) inputs.append(grpcclient.InferInput( INPUT1, tensor_shape, np_to_triton_dtype(input_dtype))) if not (use_cuda_shared_memory or use_system_shared_memory): if config[1] == "http": inputs[0].set_data_from_numpy( input0_array, binary_data=config[3]) inputs[1].set_data_from_numpy( input1_array, binary_data=config[3]) else: inputs[0].set_data_from_numpy(input0_array) inputs[1].set_data_from_numpy(input1_array) else: # Register necessary shared memory regions/handles su.register_add_shm_regions(inputs, outputs, shm_regions, precreated_shm_regions, shm_handles, input0_byte_size, input1_byte_size, output0_byte_size, output1_byte_size, use_system_shared_memory, use_cuda_shared_memory, triton_client) if batch_size == 1: expected0_sort_idx = [np.flip(np.argsort(x.flatten()), 0) for x in output0_array.reshape((1,) + tensor_shape)] expected1_sort_idx = [np.flip(np.argsort(x.flatten()), 0) for x in output1_array.reshape((1,) + tensor_shape)] else: expected0_sort_idx = [np.flip(np.argsort(x.flatten()), 0) for x in output0_array.reshape(tensor_shape)] expected1_sort_idx = [np.flip(np.argsort(x.flatten()), 0) for x in output1_array.reshape(tensor_shape)] # Force binary_data = False for shared memory and class output_req = [] i = 0 if "OUTPUT0" in outputs: if len(shm_regions) != 0: if config[1] == "http": output_req.append(httpclient.InferRequestedOutput( OUTPUT0, binary_data=config[3])) else: output_req.append(grpcclient.InferRequestedOutput(OUTPUT0)) output_req[-1].set_shared_memory( shm_regions[2]+'_data', output0_byte_size) else: if output0_raw: if config[1] == "http": output_req.append(httpclient.InferRequestedOutput( OUTPUT0, binary_data=config[3])) else: output_req.append( grpcclient.InferRequestedOutput(OUTPUT0)) else: if config[1] == "http": output_req.append(httpclient.InferRequestedOutput( OUTPUT0, binary_data=config[3], class_count=num_classes)) else: output_req.append(grpcclient.InferRequestedOutput( OUTPUT0, class_count=num_classes)) i += 1 if "OUTPUT1" in outputs: if len(shm_regions) != 0: if config[1] == "http": output_req.append(httpclient.InferRequestedOutput( OUTPUT1, binary_data=config[3])) else: output_req.append(grpcclient.InferRequestedOutput(OUTPUT1)) output_req[-1].set_shared_memory( shm_regions[2+i]+'_data', output1_byte_size) else: if output1_raw: if config[1] == "http": output_req.append(httpclient.InferRequestedOutput( OUTPUT1, binary_data=config[3])) else: output_req.append( grpcclient.InferRequestedOutput(OUTPUT1)) else: if config[1] == "http": output_req.append(httpclient.InferRequestedOutput( OUTPUT1, binary_data=config[3], class_count=num_classes)) else: output_req.append(grpcclient.InferRequestedOutput( OUTPUT1, class_count=num_classes)) if config[2]: user_data = UserData() triton_client.start_stream(partial(completion_callback, user_data)) try: results = triton_client.async_stream_infer(model_name, inputs, model_version=model_version, outputs=output_req, request_id=str(_unique_request_id())) except Exception as e: triton_client.stop_stream() raise e triton_client.stop_stream() (results, error) = user_data._completed_requests.get() if error is not None: raise error else: results = triton_client.infer(model_name, inputs, model_version=model_version, outputs=output_req, request_id=str(_unique_request_id())) last_response = results.get_response() if not skip_request_id_check: global _seen_request_ids if config[1] == "http": request_id = int(last_response["id"]) else: request_id = int(last_response.id) tester.assertFalse(request_id in _seen_request_ids, "request_id: {}".format(request_id)) _seen_request_ids.add(request_id) if config[1] == "http": response_model_name = last_response["model_name"] if model_version != "": response_model_version = last_response["model_version"] response_outputs = last_response["outputs"] else: response_model_name = last_response.model_name if model_version != "": response_model_version = last_response.model_version response_outputs = last_response.outputs tester.assertEqual(response_model_name, model_name) if model_version != "": tester.assertEqual(str(response_model_version), model_version) tester.assertEqual(len(response_outputs), len(outputs)) for result in response_outputs: if config[1] == "http": result_name = result["name"] else: result_name = result.name if ((result_name == OUTPUT0 and output0_raw) or (result_name == OUTPUT1 and output1_raw)): if use_system_shared_memory or use_cuda_shared_memory: if result_name == OUTPUT0: shm_handle = shm_handles[2] else: shm_handle = shm_handles[3] output = results.get_output(result_name) if config[1] == "http": output_datatype = output['datatype'] output_shape = output['shape'] else: output_datatype = output.datatype output_shape = output.shape output_dtype = triton_to_np_dtype(output_datatype) if use_system_shared_memory: output_data = shm.get_contents_as_numpy( shm_handle, output_dtype, output_shape) elif use_cuda_shared_memory: output_data = cudashm.get_contents_as_numpy( shm_handle, output_dtype, output_shape) else: output_data = results.as_numpy(result_name) if (output_data.dtype == np.object) and (config[3] == False): output_data = output_data.astype(np.bytes_) if result_name == OUTPUT0: tester.assertTrue(np.array_equal(output_data, output0_array), "{}, {} expected: {}, got {}".format( model_name, OUTPUT0, output0_array, output_data)) elif result_name == OUTPUT1: tester.assertTrue(np.array_equal(output_data, output1_array), "{}, {} expected: {}, got {}".format( model_name, OUTPUT1, output1_array, output_data)) else: tester.assertTrue( False, "unexpected raw result {}".format(result_name)) else: for b in range(batch_size): # num_classes values must be returned and must # match expected top values if "nobatch" in pf: class_list = results.as_numpy(result_name) else: class_list = results.as_numpy(result_name)[b] tester.assertEqual(len(class_list), num_classes) if batch_size == 1: expected0_flatten = output0_array.flatten() expected1_flatten = output1_array.flatten() else: expected0_flatten = output0_array[b].flatten() expected1_flatten = output1_array[b].flatten() for idx, class_label in enumerate(class_list): # can't compare indices since could have different # indices with the same value/prob, so check that # the value of each index equals the expected value. # Only compare labels when the indices are equal. if type(class_label) == str: ctuple = class_label.split(':') else: ctuple = "".join(chr(x) for x in class_label).split(':') cval = float(ctuple[0]) cidx = int(ctuple[1]) if result_name == OUTPUT0: tester.assertEqual(cval, expected0_flatten[cidx]) tester.assertEqual( cval, expected0_flatten[expected0_sort_idx[b][idx]]) if cidx == expected0_sort_idx[b][idx]: tester.assertEqual(ctuple[2], 'label{}'.format( expected0_sort_idx[b][idx])) elif result_name == OUTPUT1: tester.assertEqual(cval, expected1_flatten[cidx]) tester.assertEqual( cval, expected1_flatten[expected1_sort_idx[b][idx]]) else: tester.assertTrue( False, "unexpected class result {}".format(result_name)) # Unregister system/cuda shared memory regions if they exist su.unregister_cleanup_shm_regions(shm_regions, shm_handles, precreated_shm_regions, outputs, use_system_shared_memory, use_cuda_shared_memory) return results
request_count = 2 try: # Need to specify large enough concurrency to issue all the # inference requests to the server in parallel. triton_client = tritonhttpclient.InferenceServerClient( url=FLAGS.url, verbose=FLAGS.verbose, concurrency=request_count) except Exception as e: print("context creation failed: " + str(e)) sys.exit() model_name = 'simple' # Infer inputs = [] outputs = [] inputs.append(tritonhttpclient.InferInput('INPUT0', [1, 16], "INT32")) inputs.append(tritonhttpclient.InferInput('INPUT1', [1, 16], "INT32")) # Create the data for the two input tensors. Initialize the first # to unique integers and the second to all ones. input0_data = np.arange(start=0, stop=16, dtype=np.int32) input0_data = np.expand_dims(input0_data, axis=0) input1_data = np.ones(shape=(1, 16), dtype=np.int32) # Initialize the data inputs[0].set_data_from_numpy(input0_data, binary_data=True) inputs[1].set_data_from_numpy(input1_data, binary_data=True) outputs.append( tritonhttpclient.InferRequestedOutput('OUTPUT0', binary_data=True)) outputs.append(
def _test_unicode_bytes(self, model_name): # We use a simple model that takes an input tensor of 8 byte strings # and returns an output tensors of 8 strings. The output tensor # is the same as the input tensor. # Create the inference server client for the model. triton_client = tritonhttpclient.InferenceServerClient( "localhost:8000", verbose=True) # Create the data for the input tensor. Initialize the tensor to 8 # byte strings. (dtype of np.bytes_) # Sample string that should no longer cause failure in0 = np.array([ [ b'\nF\n\'\n\x01a\x12"\x1a \n\x1e\xfa\x03\x94\x01\x0f\xd7\x02\xf1\x05\xdf\x01\x82\x03\xb5\x05\xc1\x07\xba\x06\xff\x06\xc7\x07L\xf5\x03\xe2\x07\xa9\x03\n\x0c\n\x01b\x12\x07\x1a\x05\n\x03\x89\xcc=\n\r\n\x01c\x12\x08\x12\x06\n\x04\xdf\\\xcb\xbf' ], [ b'\n:\n\x1a\n\x01a\x12\x15\x1a\x13\n\x11*\xe3\x05\xc5\x06\xda\x07\xcb\x06~\xb1\x05\xb3\x01\xa9\x02\x15\n\r\n\x01b\x12\x08\x1a\x06\n\x04\xf6\xa2\xc5\x01\n\r\n\x01c\x12\x08\x12\x06\n\x04\xbb[\n\xbf' ], [ b'\nL\n-\n\x01a\x12(\x1a&\n$\x87\x07\xce\x01\xe7\x06\xee\x04\xe1\x03\xf1\x03\xd7\x07\xbe\x02\xb8\x05\xe0\x05\xe4\x01\x88\x06\xb6\x03\xb9\x05\x83\x06\xf8\x04\xe2\x04\xf4\x06\n\x0c\n\x01b\x12\x07\x1a\x05\n\x03\x89\xcc=\n\r\n\x01c\x12\x08\x12\x06\n\x04\xbc\x99+@' ], [ b'\n2\n\x12\n\x01a\x12\r\x1a\x0b\n\t\x99\x02\xde\x04\x9f\x04\xc5\x053\n\r\n\x01b\x12\x08\x1a\x06\n\x04\xf6\xa2\xc5\x01\n\r\n\x01c\x12\x08\x12\x06\n\x04\x12\x07\x83\xbe' ], [ b'\nJ\n\r\n\x01b\x12\x08\x1a\x06\n\x04\x9b\x94\xad\x04\n\r\n\x01c\x12\x08\x12\x06\n\x04\xc3\x8a\x08\xbf\n*\n\x01a\x12%\x1a#\n!\x9c\x02\xb2\x02\xcd\x02\x9d\x07\x8d\x01\xb6\x05a\xf1\x01\xf0\x05\xdb\x02\xac\x04\xbd\x05\xe0\x04\xd2\x06\xaf\x02\xa8\x01\x8b\x04' ], [ b'\n3\n\x13\n\x01a\x12\x0e\x1a\x0c\n\n<\xe2\x05\x8a\x01\xb3\x07?\xfd\x01\n\r\n\x01b\x12\x08\x1a\x06\n\x04\xf6\xa2\xc5\x01\n\r\n\x01c\x12\x08\x12\x06\n\x04\x1b\x931\xbf' ], [ b'\n&\n\x07\n\x01a\x12\x02\x1a\x00\n\x0c\n\x01b\x12\x07\x1a\x05\n\x03\x89\xcc=\n\r\n\x01c\x12\x08\x12\x06\n\x04{\xbc\x0e>' ], [ b'\nF\n\'\n\x01a\x12"\x1a \n\x1e\x97\x01\x93\x02\x9e\x01\xac\x06\xff\x01\xd8\x05\xe1\x07\xd8\x04g]\x9a\x05\xff\x06\xde\x07\x8f\x04\x97\x04\xda\x03\n\x0c\n\x01b\x12\x07\x1a\x05\n\x03\x9a\xb7I\n\r\n\x01c\x12\x08\x12\x06\n\x04\xfb\x87\x83\xbf' ] ], dtype='|S78').flatten() # Send inference request to the inference server. Get results for # both output tensors. inputs = [] outputs = [] inputs.append(tritonhttpclient.InferInput('INPUT0', in0.shape, "BYTES")) inputs[0].set_data_from_numpy(in0) outputs.append(tritonhttpclient.InferRequestedOutput('OUTPUT0')) results = triton_client.infer(model_name=model_name, inputs=inputs, outputs=outputs) out0 = results.as_numpy('OUTPUT0') # We expect there to be 1 results (with batch-size 1). Verify # that all 8 result elements are the same as the input. self.assertTrue(np.array_equal(in0, out0)) # Same test but for np.object_ in0_object = in0.astype(np.object) inputs = [] outputs = [] inputs.append( tritonhttpclient.InferInput('INPUT0', in0_object.shape, "BYTES")) inputs[0].set_data_from_numpy(in0_object) outputs.append(tritonhttpclient.InferRequestedOutput('OUTPUT0')) results = triton_client.infer(model_name=model_name, inputs=inputs, outputs=outputs) output0_object = results.as_numpy('OUTPUT0') # We expect there to be 1 results (with batch-size 1). Verify # that all 8 result elements are the same as the input. self.assertTrue(np.array_equal(in0_object, output0_object)) # Verify that np.bytes_ and np.object_ are the same. self.assertTrue(np.array_equal(out0, output0_object)) # Same test but for np.bytes_ in0_bytes = in0.astype(np.bytes_) inputs = [] outputs = [] inputs.append( tritonhttpclient.InferInput('INPUT0', in0_bytes.shape, "BYTES")) inputs[0].set_data_from_numpy(in0_object) outputs.append(tritonhttpclient.InferRequestedOutput('OUTPUT0')) results = triton_client.infer(model_name=model_name, inputs=inputs, outputs=outputs) output0_byte = results.as_numpy('OUTPUT0') # We expect there to be 1 results (with batch-size 1). Verify # that all 8 result elements are the same as the input. self.assertTrue(np.array_equal(in0_bytes, output0_byte)) # Verify that the output is the same as before self.assertTrue(np.array_equal(out0, output0_byte))
# to occupy an SM model_name = FLAGS.model model_version = "1" # Create the data for the input tensor. input_data = np.array([FLAGS.delay], dtype=np.int32) # Create the inference context for the model. if FLAGS.protocol.lower() == "grpc": triton_client = tritongrpcclient.InferenceServerClient( FLAGS.url, verbose=FLAGS.verbose) inputs = [tritongrpcclient.InferInput('in', input_data.shape, "INT32")] else: triton_client = tritonhttpclient.InferenceServerClient( FLAGS.url, verbose=FLAGS.verbose) inputs = [tritonhttpclient.InferInput('in', input_data.shape, "INT32")] inputs[0].set_data_from_numpy(input_data) # Send N inference requests to the inference server. Time the inference for both # requests start_time = time() for i in range(FLAGS.count): triton_client.async_infer(model_name, inputs, partial(completion_callback), model_version=model_version, request_id=str(i), headers=FLAGS.http_headers)
def test_nobatch_request_for_batching_model(self): input_size = 16 # graphdef_int32_int8_int8 has a batching version with max batch size of 8. # The server should return an error if the batch size is not included in the # input shapes. tensor_shape = (input_size, ) for protocol in ["http", "grpc"]: model_name = tu.get_model_name("graphdef", np.int32, np.int8, np.int8) in0 = np.random.randint(low=0, high=100, size=tensor_shape, dtype=np.int32) in1 = np.random.randint(low=0, high=100, size=tensor_shape, dtype=np.int32) inputs = [] outputs = [] if protocol == "http": triton_client = tritonhttpclient.InferenceServerClient( url='localhost:8000', verbose=True) inputs.append( tritonhttpclient.InferInput('INPUT0', tensor_shape, "INT32")) inputs.append( tritonhttpclient.InferInput('INPUT1', tensor_shape, "INT32")) outputs.append( tritonhttpclient.InferRequestedOutput('OUTPUT0')) outputs.append( tritonhttpclient.InferRequestedOutput('OUTPUT1')) else: triton_client = tritongrpcclient.InferenceServerClient( url='localhost:8001', verbose=True) inputs.append( tritongrpcclient.InferInput('INPUT0', tensor_shape, "INT32")) inputs.append( tritongrpcclient.InferInput('INPUT1', tensor_shape, "INT32")) outputs.append( tritongrpcclient.InferRequestedOutput('OUTPUT0')) outputs.append( tritongrpcclient.InferRequestedOutput('OUTPUT1')) # Initialize the data inputs[0].set_data_from_numpy(in0) inputs[1].set_data_from_numpy(in1) try: results = triton_client.infer(model_name, inputs, outputs=outputs) self.assertTrue( False, "expected failure with no batch request for batching model" ) except InferenceServerException as ex: pass
requests = [] responses = [] result_filenames = [] request_ids = [] image_idx = 0 last_request = False user_data = UserData() # Holds the handles to the ongoing HTTP async requests. async_requests = [] sent_count = 0 try: for image in image_data: sent_count += 1 inputs = [tritonhttpclient.InferInput(input_name, image.shape, dtype)] outputs = [tritonhttpclient.InferRequestedOutput(output_name)] inputs[0].set_data_from_numpy(image, binary_data=True) responses.append( triton_client.infer(model_name, inputs, request_id=str(sent_count), model_version=model_version, outputs=outputs)) except InferenceServerException as e: print("inference failed: " + str(e)) sys.exit(1) for response in responses: this_id = response.get_response()["id"]
]], dtype='float32') #np.random.rand(*shape).astype(np.float32) input1_data = np.array([[ 123, 630, 1741, 169492, 439138, 549150, 549420, 559916, 561648, 562203, 595960, 617230, 785371, 951890, 954587, 961209, 1127998, 1268021, 1272637, 1273122, 1274952, 1284808, 1599234, 1599246, 1661028, 1679074, 1713689 ]], dtype='uint32') input2_data = np.array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26 ]], dtype='int32') inputs = [ httpclient.InferInput("DES", input0_data.shape, np_to_triton_dtype(input0_data.dtype)), httpclient.InferInput("CATCOLUMN", input1_data.shape, np_to_triton_dtype(input1_data.dtype)), httpclient.InferInput("ROWINDEX", input2_data.shape, np_to_triton_dtype(input2_data.dtype)), ] inputs[0].set_data_from_numpy(input0_data) inputs[1].set_data_from_numpy(input1_data) inputs[2].set_data_from_numpy(input2_data) outputs = [httpclient.InferRequestedOutput("OUTPUT0")] response = client.infer(model_name, inputs, request_id=str(1), outputs=outputs)
for idx in range(batch_size): input_filenames.append(filenames[idx]) repeated_image_data.append(image_data[idx]) batched_image_data = np.stack(repeated_image_data, axis=0) # Set the input data inputs = [] if FLAGS.protocol.lower() == "grpc": inputs.append( tritongrpcclient.InferInput(input_name, batched_image_data.shape, "BYTES")) inputs[0].set_data_from_numpy(batched_image_data) else: inputs.append( tritonhttpclient.InferInput(input_name, batched_image_data.shape, "BYTES")) inputs[0].set_data_from_numpy(batched_image_data, binary_data=True) outputs = [] if FLAGS.protocol.lower() == "grpc": outputs.append( tritongrpcclient.InferRequestedOutput(output_name, class_count=FLAGS.classes)) else: outputs.append( tritonhttpclient.InferRequestedOutput(output_name, binary_data=False, class_count=FLAGS.classes)) # Send request result = triton_client.infer(model_name, inputs, outputs=outputs)
# Put input data values into shared memory shm.set_shared_memory_region(shm_ip0_handle, [input0_data_serialized]) shm.set_shared_memory_region(shm_ip1_handle, [input1_data_serialized]) # Register Input0 and Input1 shared memory with Triton Server triton_client.register_system_shared_memory("input0_data", "/input0_simple", input0_byte_size) triton_client.register_system_shared_memory("input1_data", "/input1_simple", input1_byte_size) # Set the parameters to use data from shared memory inputs = [] inputs.append(httpclient.InferInput('INPUT0', [1, 16], "BYTES")) inputs[-1].set_shared_memory("input0_data", input0_byte_size) inputs.append(httpclient.InferInput('INPUT1', [1, 16], "BYTES")) inputs[-1].set_shared_memory("input1_data", input1_byte_size) outputs = [] outputs.append(httpclient.InferRequestedOutput('OUTPUT0', binary_data=True)) outputs[-1].set_shared_memory("output0_data", output0_byte_size) outputs.append(httpclient.InferRequestedOutput('OUTPUT1', binary_data=True)) outputs[-1].set_shared_memory("output1_data", output1_byte_size) results = triton_client.infer(model_name=model_name,
from tritonclientutils import * import tritongrpcclient as grpcclient import tritonhttpclient as httpclient import numpy as np model_name = "python_float32_float32_float32" shape = [4] with httpclient.InferenceServerClient("localhost:8000") as client: input0_data = np.random.rand(*shape).astype(np.float32) input1_data = np.random.rand(*shape).astype(np.float32) inputs = [ httpclient.InferInput("INPUT0", input0_data.shape, np_to_triton_dtype(input0_data.dtype)), httpclient.InferInput("INPUT1", input1_data.shape, np_to_triton_dtype(input1_data.dtype)), ] inputs[0].set_data_from_numpy(input0_data) inputs[1].set_data_from_numpy(input1_data) outputs = [ httpclient.InferRequestedOutput("OUTPUT0"), httpclient.InferRequestedOutput("OUTPUT1"), ] response = client.infer(model_name, inputs, request_id=str(1), outputs=outputs)