Ejemplo n.º 1
0
def requestGenerator(input_name, output_name, c, h, w, format, dtype, FLAGS,
                     input_filenames):
    request = grpc_service_v2_pb2.ModelInferRequest()
    request.model_name = FLAGS.model_name
    request.model_version = FLAGS.model_version

    filenames = []
    if os.path.isdir(FLAGS.image_filename):
        filenames = [
            os.path.join(FLAGS.image_filename, f)
            for f in os.listdir(FLAGS.image_filename)
            if os.path.isfile(os.path.join(FLAGS.image_filename, f))
        ]
    else:
        filenames = [
            FLAGS.image_filename,
        ]

    filenames.sort()

    output = grpc_service_v2_pb2.ModelInferRequest(
    ).InferRequestedOutputTensor()
    output.name = output_name
    output.parameters['classification'].int64_param = FLAGS.classes
    request.outputs.extend([output])

    input = grpc_service_v2_pb2.ModelInferRequest().InferInputTensor()
    input.name = input_name
    input.datatype = dtype
    if format == mc.ModelInput.FORMAT_NHWC:
        input.shape.extend([FLAGS.batch_size, h, w, c])
    else:
        input.shape.extend([FLAGS.batch_size, c, h, w])

    # Preprocess image into input data according to model requirements
    # Preprocess the images into input data according to model
    # requirements
    image_data = []
    for filename in filenames:
        img = Image.open(filename)
        image_data.append(
            preprocess(img, format, dtype, c, h, w, FLAGS.scaling))

    # Send requests of FLAGS.batch_size images.
    input_bytes = None
    for idx in range(FLAGS.batch_size):
        # wrap over if requested batch size exceeds number of provided images
        img_idx = idx % len(filenames)
        input_filenames.append(filenames[img_idx])
        if input_bytes is None:
            input_bytes = image_data[img_idx].tobytes()
        else:
            input_bytes += image_data[img_idx].tobytes()

    input_contents = grpc_service_v2_pb2.InferTensorContents()
    input_contents.raw_contents = input_bytes
    input.contents.CopyFrom(input_contents)
    request.inputs.extend([input])

    yield request
Ejemplo n.º 2
0
def requestGenerator(input_name, output_name, c, h, w, format, dtype, FLAGS):
    request = grpc_service_v2_pb2.ModelInferRequest()
    request.model_name = FLAGS.model_name
    request.model_version = FLAGS.model_version

    output = grpc_service_v2_pb2.ModelInferRequest(
    ).InferRequestedOutputTensor()
    output.name = output_name
    request.outputs.extend([output])

    input = grpc_service_v2_pb2.ModelInferRequest().InferInputTensor()
    input.name = input_name
    input.datatype = dtype
    if format == mc.ModelInput.FORMAT_NHWC:
        input.shape.extend([FLAGS.batch_size, h, w, c])
    else:
        input.shape.extend([FLAGS.batch_size, c, h, w])

    # Preprocess image into input data according to model requirements
    image_data = None
    with Image.open(FLAGS.image_filename) as img:
        image_data = preprocess(img, format, dtype, c, h, w, FLAGS.scaling)

    # Send requests of FLAGS.batch_size images.
    input_bytes = None
    for idx in range(FLAGS.batch_size):
        if input_bytes is None:
            input_bytes = image_data.tobytes()
        else:
            input_bytes += image_data.tobytes()

    input_contents = grpc_service_v2_pb2.InferTensorContents()
    input_contents.raw_contents = input_bytes
    input.contents.CopyFrom(input_contents)
    request.inputs.extend([input])

    yield request
Ejemplo n.º 3
0
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

    target_shape = (360, 360)
    height, width, _ = img.shape
    resized_img = cv2.resize(img, target_shape)
    image_np = resized_img / 255.0
    image_exp = np.expand_dims(image_np, axis=0)
    image_transposed = image_exp.transpose((0, 3, 1, 2))

    raw_input = np.float32(image_transposed).flatten().tobytes()

    conv2d = grpc_service_v2_pb2.ModelInferRequest().InferInputTensor()
    conv2d.name = "conv2d__0"
    conv2d.shape.extend([1, 3, 360, 360])

    conv2d_contents = grpc_service_v2_pb2.InferTensorContents()
    conv2d_contents.raw_contents = raw_input
    conv2d.contents.CopyFrom(conv2d_contents)

    request.inputs.extend([conv2d])

    loc_conv = grpc_service_v2_pb2.ModelInferRequest(
    ).InferRequestedOutputTensor()
    loc_conv.name = "loc_branch_concat__0"
    cls_conv = grpc_service_v2_pb2.ModelInferRequest(
    ).InferRequestedOutputTensor()
    cls_conv.name = "cls_branch_concat__1"
    request.outputs.extend([loc_conv, cls_conv])

    response = grpc_stub.ModelInfer(request)
    print("model infer:\n{}".format(response))
def requestGenerator(input_name, output_name, c, h, w, format, dtype, FLAGS,
                     result_filenames):
    request = grpc_service_v2_pb2.ModelInferRequest()
    request.model_name = FLAGS.model_name
    request.model_version = FLAGS.model_version

    filenames = []
    if os.path.isdir(FLAGS.image_filename):
        filenames = [
            os.path.join(FLAGS.image_filename, f)
            for f in os.listdir(FLAGS.image_filename)
            if os.path.isfile(os.path.join(FLAGS.image_filename, f))
        ]
    else:
        filenames = [
            FLAGS.image_filename,
        ]

    filenames.sort()

    output = grpc_service_v2_pb2.ModelInferRequest(
    ).InferRequestedOutputTensor()
    output.name = output_name
    output.parameters['classification'].int64_param = FLAGS.classes
    request.outputs.extend([output])

    input = grpc_service_v2_pb2.ModelInferRequest().InferInputTensor()
    input.name = input_name
    input.datatype = dtype
    if format == mc.ModelInput.FORMAT_NHWC:
        input.shape.extend([FLAGS.batch_size, h, w, c])
    else:
        input.shape.extend([FLAGS.batch_size, c, h, w])

    # Preprocess image into input data according to model requirements
    # Preprocess the images into input data according to model
    # requirements
    image_data = []
    for filename in filenames:
        img = Image.open(filename)
        image_data.append(
            preprocess(img, format, dtype, c, h, w, FLAGS.scaling))

    # Send requests of FLAGS.batch_size images. If the number of
    # images isn't an exact multiple of FLAGS.batch_size then just
    # start over with the first images until the batch is filled.
    image_idx = 0
    last_request = False
    while not last_request:
        input_bytes = None
        input_filenames = []
        request.ClearField("inputs")
        for idx in range(FLAGS.batch_size):
            input_filenames.append(filenames[image_idx])
            if input_bytes is None:
                input_bytes = image_data[image_idx].tobytes()
            else:
                input_bytes += image_data[image_idx].tobytes()

            image_idx = (image_idx + 1) % len(image_data)
            if image_idx == 0:
                last_request = True

        input_contents = grpc_service_v2_pb2.InferTensorContents()
        input_contents.raw_contents = input_bytes
        input.contents.CopyFrom(input_contents)
        request.inputs.extend([input])
        result_filenames.append(input_filenames)
        yield request
Ejemplo n.º 5
0
    target_shape = (260, 260)
    resized_img = cv2.resize(img, target_shape)
    image_np = resized_img / 255.0
    image_exp = np.expand_dims(image_np, axis=0)
    processed_data = np.float32(image_exp)

    raw_input = processed_data.flatten().tobytes()
    raw_height = np.array([np.int64(height)]).tobytes()
    raw_width = np.array([np.int64(width)]).tobytes()

    input_data = grpc_service_v2_pb2.ModelInferRequest().InferInputTensor()
    input_data.name = "data"
    input_data.shape.extend([1, 260, 260, 3])

    input_data_contents = grpc_service_v2_pb2.InferTensorContents()
    input_data_contents.raw_contents = raw_input
    input_data.contents.CopyFrom(input_data_contents)

    input_height = grpc_service_v2_pb2.ModelInferRequest().InferInputTensor()
    input_height.name = "height"
    input_height.shape.extend([1, 1])

    input_height_contents = grpc_service_v2_pb2.InferTensorContents()
    input_height_contents.raw_contents = raw_height
    input_height.contents.CopyFrom(input_height_contents)

    input_width = grpc_service_v2_pb2.ModelInferRequest().InferInputTensor()
    input_width.name = "width"
    input_width.shape.extend([1, 1])