示例#1
0
def main(_):
    channel = grpc.insecure_channel(FLAGS.server)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = FLAGS.model
    request.model_spec.signature_name = "serving_default"

    img = cv2.imread(FLAGS.img)
    retr, buffer = cv2.imencode(".jpg", img)
    jpg_str = base64.urlsafe_b64encode(buffer).decode("utf-8")
    difference_sum = 0
    for i in range(FLAGS.num_tests):
        request.inputs['image'].CopyFrom(make_tensor_proto(jpg_str,shape=[1,]))
        start = time.time()
        response = stub.Predict(request,10)
        diff = time.time() - start
        print("Predict num: {} consumed time: {}".format(i,diff))
        difference_sum += diff
        if FLAGS.vis:
            classes = np.squeeze(make_ndarray(response.outputs["class"]))
            box = np.squeeze(make_ndarray(response.outputs["box"]))
            confidence = np.squeeze(make_ndarray(response.outputs["confidence"]))
            visimg = cv2.imread(FLAGS.img)
            for o in range(len(classes)):
                draw_boxes(visimg,box[o],confidence[o],classes[o])
            cv2.imshow("Vis",visimg)
            cv2.waitKey(1)
    print("Average time: {}".format(difference_sum / FLAGS.num_tests))
def _padding_size_pad_layer(node, name_to_node):
  """Computes padding size given a TF padding node.

  Args:
    node: Tensorflow node (NodeDef proto).
    name_to_node: Dict keyed by node name, each entry containing the node's
      NodeDef.

  Returns:
    total_padding_x: Total padding size for horizontal direction (integer).
    padding_x: Padding size for horizontal direction, left side (integer).
    total_padding_y: Total padding size for vertical direction (integer).
    padding_y: Padding size for vertical direction, top side (integer).

  Raises:
    ValueError: If padding layer is invalid.
  """
  paddings_layer_name = node.input[1]
  if not paddings_layer_name.endswith("/paddings"):
    raise ValueError("Padding layer name does not end with '/paddings'")
  paddings_node = name_to_node[paddings_layer_name]
  if paddings_node.op != "Const":
    raise ValueError("Padding op is not Const")
  value = paddings_node.attr["value"]
  t = make_ndarray(value.tensor)
  padding_y = t[1][0]
  padding_x = t[2][0]
  total_padding_y = padding_y + t[1][1]
  total_padding_x = padding_x + t[2][1]
  if (t[0][0] != 0) or (t[0][1] != 0):
    raise ValueError("padding is not zero for first tensor dim")
  if (t[3][0] != 0) or (t[3][1] != 0):
    raise ValueError("padding is not zero for last tensor dim")
  return total_padding_x, padding_x, total_padding_y, padding_y
示例#3
0
def _pool_kernel_size(node, name_to_node):
    """Computes kernel size given a TF pooling node.

  Args:
    node: Tensorflow node (NodeDef proto).

  Returns:
    kernel_size_x: Kernel size for horizontal direction (integer).
    kernel_size_y: Kernel size for vertical direction (integer).

  Raises:
    ValueError: If pooling is invalid.
  """
    if node.op == "MaxPoolV2":
        ksize_input_name = node.input[1]
        if not ksize_input_name.endswith("/ksize"):
            raise ValueError("Kernel size name does not end with '/ksize'")
        ksize_node = name_to_node[ksize_input_name]
        value = ksize_node.attr["value"]
        t = make_ndarray(value.tensor)
        kernel_size_y = t[1]
        kernel_size_x = t[2]
        if t[0] != 1:
            raise ValueError("pool ksize for first dim is not 1")
        if t[3] != 1:
            raise ValueError("pool ksize for last dim is not 1")
    else:
        ksize = node.attr["ksize"]
        kernel_size_y = ksize.list.i[1]
        kernel_size_x = ksize.list.i[2]
        if ksize.list.i[0] != 1:
            raise ValueError("pool ksize for first dim is not 1")
        if ksize.list.i[3] != 1:
            raise ValueError("pool ksize for last dim is not 1")
    return kernel_size_x, kernel_size_y
def _padding_size_pad_layer(node, name_to_node):
    """Computes padding size given a TF padding node.

  Args:
    node: Tensorflow node (NodeDef proto).
    name_to_node: Dict keyed by node name, each entry containing the node's
      NodeDef.

  Returns:
    total_padding_x: Total padding size for horizontal direction (integer).
    padding_x: Padding size for horizontal direction, left side (integer).
    total_padding_y: Total padding size for vertical direction (integer).
    padding_y: Padding size for vertical direction, top side (integer).

  Raises:
    ValueError: If padding layer is invalid.
  """
    paddings_layer_name = node.input[1]
    if not paddings_layer_name.endswith("/paddings"):
        raise ValueError("Padding layer name does not end with '/paddings'")
    paddings_node = name_to_node[paddings_layer_name]
    if paddings_node.op != "Const":
        raise ValueError("Padding op is not Const")
    value = paddings_node.attr["value"]
    t = make_ndarray(value.tensor)
    padding_y = t[1][0]
    padding_x = t[2][0]
    total_padding_y = padding_y + t[1][1]
    total_padding_x = padding_x + t[2][1]
    if (t[0][0] != 0) or (t[0][1] != 0):
        raise ValueError("padding is not zero for first tensor dim")
    if (t[3][0] != 0) or (t[3][1] != 0):
        raise ValueError("padding is not zero for last tensor dim")
    return total_padding_x, padding_x, total_padding_y, padding_y
示例#5
0
def _padding_size_pad_layer(node, name_to_order_node):
  """Computes padding size given a TF padding node.

  Args:
    node: Tensorflow node (NodeDef proto).
    name_to_order_node: Map from name to {order, node}. Output of
      graph_compute_order.get_compute_order().

  Returns:
    padding_x: Padding size for horizontal direction (integer).
    padding_y: Padding size for vertical direction (integer).

  Raises:
    ValueError: If padding layer is invalid.
  """
  paddings_layer_name = node.input[1]
  if not paddings_layer_name.endswith("/paddings"):
    raise ValueError("Padding layer name does not end with '/paddings'")
  paddings_node = name_to_order_node[paddings_layer_name].node
  if paddings_node.op != "Const":
    raise ValueError("Padding op is not Const")
  value = paddings_node.attr["value"]
  t = make_ndarray(value.tensor)
  padding_y = t[1][0]
  padding_x = t[2][0]
  if t[0][0] != 0:
    raise ValueError("padding is not zero for first tensor dim")
  if t[3][0] != 0:
    raise ValueError("padding is not zero for last tensor dim")
  return padding_x, padding_y
def _stride_size(node, name_to_node):
  """Computes stride size given a TF node.

  Args:
    node: Tensorflow node (NodeDef proto).

  Returns:
    stride_x: Stride size for horizontal direction (integer).
    stride_y: Stride size for vertical direction (integer).
  """
  if node.op == "MaxPoolV2":
    strides_input_name = node.input[2]
    if not strides_input_name.endswith("/strides"):
      raise ValueError("Strides name does not end with '/strides'")
    strides_node = name_to_node[strides_input_name]
    value = strides_node.attr["value"]
    t = make_ndarray(value.tensor)
    stride_y = t[1]
    stride_x = t[2]
  else:
    strides_attr = node.attr["strides"]
    logging.vlog(4, "strides_attr = %s", strides_attr)
    stride_y = strides_attr.list.i[1]
    stride_x = strides_attr.list.i[2]
  return stride_x, stride_y
def _pool_kernel_size(node, name_to_node):
  """Computes kernel size given a TF pooling node.

  Args:
    node: Tensorflow node (NodeDef proto).

  Returns:
    kernel_size_x: Kernel size for horizontal direction (integer).
    kernel_size_y: Kernel size for vertical direction (integer).

  Raises:
    ValueError: If pooling is invalid.
  """
  if node.op == "MaxPoolV2":
    ksize_input_name = node.input[1]
    if not ksize_input_name.endswith("/ksize"):
      raise ValueError("Kernel size name does not end with '/ksize'")
    ksize_node = name_to_node[ksize_input_name]
    value = ksize_node.attr["value"]
    t = make_ndarray(value.tensor)
    kernel_size_y = t[1]
    kernel_size_x = t[2]
    if t[0] != 1:
      raise ValueError("pool ksize for first dim is not 1")
    if t[3] != 1:
      raise ValueError("pool ksize for last dim is not 1")
  else:
    ksize = node.attr["ksize"]
    kernel_size_y = ksize.list.i[1]
    kernel_size_x = ksize.list.i[2]
    if ksize.list.i[0] != 1:
      raise ValueError("pool ksize for first dim is not 1")
    if ksize.list.i[3] != 1:
      raise ValueError("pool ksize for last dim is not 1")
  return kernel_size_x, kernel_size_y
def _stride_size(node, name_to_node):
    """Computes stride size given a TF node.

  Args:
    node: Tensorflow node (NodeDef proto).
    name_to_node: For MaxPoolV2, mapping from variable name Tensorflow node.

  Returns:
    stride_x: Stride size for horizontal direction (integer).
    stride_y: Stride size for vertical direction (integer).

  Raises:
    ValueError: If stride input cannot be found in `name_to_node`.
  """
    if node.op == "MaxPoolV2":
        strides_input_name = node.input[2]
        if not strides_input_name.endswith("/strides"):
            raise ValueError("Strides name does not end with '/strides'")
        strides_node = name_to_node[strides_input_name]
        value = strides_node.attr["value"]
        t = make_ndarray(value.tensor)
        stride_y = t[1]
        stride_x = t[2]
    else:
        strides_attr = node.attr["strides"]
        logging.vlog(4, "strides_attr = %s", strides_attr)
        stride_y = strides_attr.list.i[1]
        stride_x = strides_attr.list.i[2]
    return stride_x, stride_y
示例#9
0
def _padding_size_pad_layer(node, name_to_order_node):
    """Computes padding size given a TF padding node.

  Args:
    node: Tensorflow node (NodeDef proto).
    name_to_order_node: Map from name to {order, node}. Output of
      graph_compute_order.get_compute_order().

  Returns:
    padding_x: Padding size for horizontal direction (integer).
    padding_y: Padding size for vertical direction (integer).

  Raises:
    ValueError: If padding layer is invalid.
  """
    paddings_layer_name = node.input[1]
    if not paddings_layer_name.endswith("/paddings"):
        raise ValueError("Padding layer name does not end with '/paddings'")
    paddings_node = name_to_order_node[paddings_layer_name].node
    if paddings_node.op != "Const":
        raise ValueError("Padding op is not Const")
    value = paddings_node.attr["value"]
    t = make_ndarray(value.tensor)
    padding_y = t[1][0]
    padding_x = t[2][0]
    if t[0][0] != 0:
        raise ValueError("padding is not zero for first tensor dim")
    if t[3][0] != 0:
        raise ValueError("padding is not zero for last tensor dim")
    return padding_x, padding_y
示例#10
0
def _make_ndarray(values):
    '''
    A wrapper function around tensorflow's `make_ndarray` method.

    ------------------
    Refer above
    ------------------
    '''
    return make_ndarray(values)
示例#11
0
def do_inference(hostport, work_dir, concurrency, X, batch_size=200, y=None):
    """Tests PredictionService with concurrent requests.
    Args:
    hostport: Host:port address of the PredictionService.
    work_dir: The full path of working directory for test data set.
    concurrency: Maximum number of concurrent requests.
    num_tests: Number of test images to use.
    Returns:
    The classification error rate.
    Raises:
    IOError: An error occurred processing test data set.
    """
    host, port = hostport.split(':')
    channel = implementations.insecure_channel(host, int(port))
    #stub = prediction_service_pb2.PredictionServiceStub(channel)
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'helitrax' # Note this is associated with --model_name argument to tensorflow_model_server
    #request.model_spec.signature_name = signature_constants.CLASSIFY_INPUTS
    request.model_spec.signature_name = 'classify'


    #import pdb; pdb.set_trace()
    num_batches = X.shape[1] / batch_size
    X_batches = np.hsplit(X, num_batches)
    check = False
    if not (y is None):
        check = True
        y_batches = np.hsplit(y, num_batches)

    results = []
    scores = [0,0,0,0] # tp, tn, fp, fn
    for i in range(len(X_batches)):
        X = X_batches[i]
        request.inputs[signature_constants.CLASSIFY_INPUTS].CopyFrom(
            tf.contrib.util.make_tensor_proto(X, dtype=dtypes.float32, shape=[20, 200, 79]))
        result = stub.Predict(request, 100.0)
        ary = util.make_ndarray(result.outputs['classes'])
        results.append(ary)

        if check:
            predictions = ary
            for j in range(len(y_batches[i])):
                if y_batches[i][j][1] == 1:
                    if predictions[i] == 1:
                        scores[0] += 1
                    else:
                        scores[3] += 1
                if y_batches[i][j][1] == 0:
                    if predictions[i] == 1:
                        scores[2] += 1
                    else:
                        scores[1] += 1

    return (np.concatenate(results), scores)
示例#12
0
def inference(stub, request, imgs, kwargs):
    print("Start processing:")
    print('\tModel name: {}'.format(kwargs['model_name']))
    print('\tImages in shape: {}\n'.format(imgs.shape))
    processing_times = np.zeros((0), int)
    batch_size = int(kwargs['batch_size'])
    if not (kwargs.get('images_number') or kwargs.get('images_number') != 0):
        iterations = int((imgs.shape[0] // batch_size))
    else:
        iterations = int(kwargs.get('images_number'))
    iteration = 0
    while iteration <= iterations:
        for x in range(0, imgs.shape[0] - batch_size + 1, batch_size):
            iteration += 1
            if iteration > iterations:
                break
            end_batch = x + batch_size
            if end_batch > imgs.shape[0]:
                end_batch = imgs.shape[0]
            batch = imgs[x:end_batch]
            request.inputs[kwargs['input_name']].CopyFrom(
                tf_contrib_util.make_tensor_proto(batch, shape=(batch.shape)))
            start_time = datetime.datetime.now()
            result = stub.Predict(request, RPC_TIMEOUT)
            # result includes a dictionary with all model outputs
            end_time = datetime.datetime.now()
            if kwargs['output_name'] not in result.outputs:
                print("Invalid output name", kwargs['output_name'])
                print("Available outputs:")
                for Y in result.outputs:
                    print(Y)
                exit(1)
            duration = (end_time - start_time).total_seconds() * 1000
            processing_times = \
                np.append(processing_times, np.array([int(duration)]))
            output = \
                tf_contrib_util.make_ndarray(result.outputs[kwargs['output_name']])

            print('Iteration {}; Processing time: {:.2f} ms; speed {:.2f} fps'.
                  format(iteration, round(np.average(duration), 2),
                         round(1000 * batch_size / np.average(duration), 2)))

            if kwargs['no_imagenet_classes']:
                continue

            print('\tImagenet top results in a single batch:')
            for i in range(output.shape[0]):
                single_result = output[[i], ...]
                max_class = np.argmax(single_result)
                print('\t\t {} image in batch: {}'.format(
                    i + 1, classes.imagenet_classes[max_class]))

    if kwargs['performance']:
        get_processing_performance(processing_times, batch_size)

    return output
示例#13
0
def doTest(host, port):
    from tensorflow_serving.apis.predict_pb2 import PredictRequest
    from tensorflow_serving.apis.prediction_service_pb2_grpc import PredictionServiceStub
    from grpc import insecure_channel, StatusCode
    from tensorflow.contrib.util import make_tensor_proto, make_ndarray
    from tensorflow import float32

    target = "%s:%s"%(host, port)

    print "Sending prediction request to", target, "\n"

    channel = insecure_channel(target)
    stub = PredictionServiceStub(channel)

    request = PredictRequest()
    request.model_spec.name = "campaign"
    request.model_spec.signature_name = ""

    request.inputs["hour"].CopyFrom(make_tensor_proto(6, shape=[1], dtype=float32))
    request.inputs["week"].CopyFrom(make_tensor_proto(5, shape=[1], dtype=float32))
    request.inputs["sid"].CopyFrom(make_tensor_proto("47320", shape=[1]))
    request.inputs["sspid"].CopyFrom(make_tensor_proto("3", shape=[1]))
    request.inputs["country"].CopyFrom(make_tensor_proto("DK", shape=[1]))
    request.inputs["os"].CopyFrom(make_tensor_proto("6", shape=[1]))
    request.inputs["domain"].CopyFrom(make_tensor_proto("video9.in", shape=[1]))
    request.inputs["isp"].CopyFrom(make_tensor_proto("Tele Danmark", shape=[1]))
    request.inputs["browser"].CopyFrom(make_tensor_proto("4", shape=[1]))
    request.inputs["type"].CopyFrom(make_tensor_proto("site", shape=[1]))
    request.inputs["lat"].CopyFrom(make_tensor_proto(35000, shape=[1], dtype=float32))
    request.inputs["lon"].CopyFrom(make_tensor_proto(105000, shape=[1], dtype=float32))
    request.inputs["connectiontype"].CopyFrom(make_tensor_proto("2", shape=[1]))
    request.inputs["devicetype"].CopyFrom(make_tensor_proto("1", shape=[1]))
    request.inputs["donottrack"].CopyFrom(make_tensor_proto("0", shape=[1]))
    request.inputs["userid"].CopyFrom(make_tensor_proto("984273063", shape=[1]))
    request.inputs["ua"].CopyFrom(make_tensor_proto("Mozilla/5.0 (Linux; U; Android 5.1.1; en-US; Redmi Note 3 Build/LMY47V) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 UCBrowser/11.0.8.855 U3/0.8.0 Mobile Safari/534.30", shape=[1]))

    (result, status) = stub.Predict.with_call(request)

    if status.code() != StatusCode.OK:
        print "call failed", status
        return

    predictions = make_ndarray(result.outputs["classes"])

    if predictions.size == 0:
        print "no predition replied"
        return

    cidIndex = predictions[0]
    print "Server predict with index", cidIndex
示例#14
0
    def get_generic_value(cls, value):
        """quantized numpy array is mapped to np.uint8 typed

    FIXME: I'm not sure if it's a good idea
    """
        np_array = make_ndarray(value)
        dtype = np_array.dtype
        if dtype.fields is None:
            pass
        elif dtype[0] in [np.uint8, np.int8]:
            np_array = np_array.astype(dtype[0])
        else:
            raise ValueError('Unsupported numpy dtype: %s' % dtype)
        return cls.__utensor_generic_type__(np_array=np_array, dtype=dtype)
示例#15
0
def infer_batch(batch_input, input_tensor, grpc_stub, model_spec_name,
                model_spec_version, output_tensors):
    request = predict_pb2.PredictRequest()
    request.model_spec.name = model_spec_name
    if model_spec_version is not None:
        request.model_spec.version.value = model_spec_version
    print("input shape", list(batch_input.shape))
    request.inputs[input_tensor].CopyFrom(
        make_tensor_proto(batch_input, shape=list(batch_input.shape)))
    result = grpc_stub.Predict(request, 10.0)
    data = {}
    for output_tensor in output_tensors:
        data[output_tensor] = make_ndarray(result.outputs[output_tensor])
    return data
示例#16
0
def infer(imgs, slice_number, input_tensor, grpc_stub, model_spec_name,
          model_spec_version, output_tensors):
    request = predict_pb2.PredictRequest()
    request.model_spec.name = model_spec_name
    if model_spec_version is not None:
        request.model_spec.version.value = model_spec_version
    img = imgs[slice_number, ...]
    print("input shape", list((1, ) + img.shape))
    request.inputs[input_tensor].CopyFrom(
        make_tensor_proto(img, shape=list((1, ) + img.shape)))
    result = grpc_stub.Predict(request, 10.0)
    data = {}
    for output_tensor in output_tensors:
        data[output_tensor] = make_ndarray(result.outputs[output_tensor])
    return data
def inference(stub, request, imgs, kwargs):
    print("Start processing:")
    print(f"\tModel name: {kwargs['model_name']}")
    print(f"\tImages in shape: {imgs.shape}\n")

    processing_times = np.zeros((0), int)
    batch_size = int(kwargs['batch_size'])
    iteration = 0
    for x in range(0, imgs.shape[0], batch_size):
        iteration += 1
        if iteration > imgs.shape[0]: break
        end_batch = x + batch_size
        if end_batch > imgs.shape[0]: end_batch = imgs.shape[0]
        batch = imgs[x:end_batch]
        request.inputs[kwargs['input_name']].CopyFrom(
            tf_contrib_util.make_tensor_proto(batch, shape=(batch.shape)))
        start_time = datetime.datetime.now()
        result = stub.Predict(
            request,
            10.0)  # result includes a dictionary with all model outputs
        end_time = datetime.datetime.now()
        duration = (end_time - start_time).total_seconds() * 1000
        processing_times = np.append(processing_times,
                                     np.array([int(duration)]))
        output = tf_contrib_util.make_ndarray(
            result.outputs[kwargs['output_name']])

        print(
            f'Iteration {iteration}; '
            f'Processing time: {round(np.average(duration), 2):.2f} ms; '
            f'speed {round(1000 * batch_size / np.average(duration), 2):.2f} fps'
        )

        if kwargs['no_imagenet']:
            continue

        print(f'\tImagenet top results in a single batch:')
        for i in range(output.shape[0]):
            single_result = output[[i], ...]
            max_class = np.argmax(single_result)
            print(
                f'\t\t {i+1} image in batch: {classes.imagenet_classes[max_class]}'
            )

    if kwargs['performance']:
        get_processing_performance(processing_times, batch_size)

    return output
示例#18
0
def _parse_graph_info(graph_def):
    """Parse GraphDef
  Fetch input tensors and output tensors name for reconstructing
  graph in uTensor Context object

  Argument
  ========
  - graph_def <tf.GraphDef>: a GraphDef object

  Return
  ======
  - graph_nodes <defaultdict>: a dict with key as operation name and
    value as a defaultdict with keys 'input_tensor' and 'output_tensor'
    which maps to a set of input/output tensor names respectively

  Note
  ====
  - thought the output tensor names is irrelevent for TensorFlow, but it
    is neccessary for uTensor
  """
    OperationInfo = namedtuple('OperationInfo',
                               field_names=[
                                   'input_tensor', 'output_tensor', 'op_type',
                                   'output_content', 'op_attr'
                               ])
    graph = Graph()
    with graph.as_default():  # pylint: disable=E1129
        import_graph_def(graph_def, name="")
    graph_info = {}
    with Session(graph=graph):
        for node in graph_def.node:
            op = graph.get_operation_by_name(node.name)
            input_tensor = [(t.name, t.dtype, _parse_shape(t.shape))
                            for t in op.inputs]
            output_tensor = [(t.name, t.dtype, _parse_shape(t.shape))
                             for t in op.outputs]
            op_type = node.op
            output_content = {}
            op_attr = node.attr
            if node.op in ["Const"]:
                for tensor_name, _, _ in output_tensor:
                    output_content[tensor_name] = make_ndarray(
                        node.attr['value'].tensor)
            graph_info[node.name] = OperationInfo(input_tensor, output_tensor,
                                                  op_type, output_content,
                                                  op_attr)
    return graph_info
示例#19
0
def test_prepare_output_as_list(outputs_names, shapes, types):
    outputs = {}
    x = 0
    for key, value in outputs_names.items():
        outputs[value] = np.ones(shape=shapes[x], dtype=types[x])
        x += 1

    output = predict_utils.\
        prepare_output_as_list(inference_output=outputs,
                               model_available_outputs=outputs_names)

    x = 0
    for key, value in outputs_names.items():
        temp_output = tf_contrib_util.make_ndarray(output.outputs[key])
        assert temp_output.shape == shapes[x]
        assert temp_output.dtype == types[x]
        x += 1
示例#20
0
def test_predict_successful_version(mocker, get_grpc_service_for_predict):
    infer_mocker = mocker.patch('ie_serving.models.ir_engine.IrEngine.infer')
    expected_response = np.ones(shape=(2, 2))
    infer_mocker.return_value = ({'output': expected_response}, None)
    requested_version = 1
    request = get_fake_request(model_name='test', data_shape=(1, 1, 1),
                               input_blob='input', version=requested_version)
    grpc_server = get_grpc_service_for_predict
    rpc = grpc_server.invoke_unary_unary(
        PREDICT_SERVICE.methods_by_name['Predict'],
        (),
        request, None)
    rpc.initial_metadata()
    response, trailing_metadata, code, details = rpc.termination()

    encoded_response = make_ndarray(response.outputs['output'])
    assert requested_version == response.model_spec.version.value
    assert grpc.StatusCode.OK == code
    assert expected_response.shape == encoded_response.shape
def get_weights_from_pb(pb_file):
    """
    takes in input a pb file, restores the weights of the network, and returns a list containing a ndarray with the
    weights and the biases for each layer
    :param pb_file: path to the pb file of the network
    :return: a list containing for each layer a ndarray with its parameters
    """
    weights_to_ret = []
    with tf.gfile.GFile(pb_file, "rb") as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        # get all the weights tensor as np.array
        weights = [
            make_ndarray(node_def.attr['value'].tensor).flatten().tolist()
            for node_def in graph_def.node
            if node_def.attr['value'].tensor.dtype is not 0 and (
                'Variable' in node_def.name or 'kernel' in node_def.name
                or 'bias' in node_def.name or 'weights' in node_def.name)
        ]
        # flatten the elements in weights
        for i in range(0, len(weights), 2):
            weights_to_ret.append(weights[i] + weights[i + 1])
        return weights_to_ret
def thread_function(thr_id, network_name, input_layer, output_layer, input_dimension,
                    ip, port, disp_buf, src_type, src_name):

  if src_type == "Camera":
    # UVC camera init - camera threads always come first and we use it
    # to generate the camera indexes
    cam = cv2.VideoCapture(thr_id)
    if not (cam.isOpened()):
      log.error("Failed to open the UVC camera {}".format(thr_id))
      return

    cam.set(cv2.CAP_PROP_FRAME_WIDTH, CAM_WIDTH)
    cam.set(cv2.CAP_PROP_FRAME_HEIGHT, CAM_HEIGHT)
    # not all UVC cameras honor below request
    cam.set(cv2.CAP_PROP_FPS, CAM_FPS)
    # If your camera sends other than MJPEG, change below
    cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG"))
  elif src_type == "Video":
    # Assumption: src_name will be valid
    cam = cv2.VideoCapture(src_name)

  # inference stats
  fps = 0                   # camera fps
  inf_fps = 0               # inference fps
  dropped_fps = 0           # dropped frame fps
  cam_start_time = time()

  # ovms connection
  channel = grpc.insecure_channel("{}:{}".format(ip, port))
  stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

  request = predict_pb2.PredictRequest()
  # Note: Pls maintain the same name while launching ovms docker container
  request.model_spec.name = network_name

  global exit_ok
  while exit_ok == False:
    ret, frame = cam.read()

    if src_type == "Video":
      # restart the video file when it reaches the end
      if not ret:
        cam.set(cv2.CAP_PROP_POS_FRAMES, 0)
        continue
      # normalize the video frame dimension to that of the camera
      else:
        # to maintain the frame inferencing parity with the cameras, lets sleep
        # here to maintain cam_fps speed
        sleep((1000 / CAM_FPS) / 1000)
        # enable below line to keep video file & camera output window dimensions the same
        # frame = cv2.resize(frame, (CAM_WIDTH, CAM_HEIGHT))

    fps = fps + 1
    if (time() - cam_start_time) * 1000 >= 1000:
      log.warning('{}{} fps: {}, Inf fps: {}, dropped fps: {}'
                  .format(src_type, thr_id, fps, inf_fps, dropped_fps))
      fps = 0
      inf_fps = 0
      dropped_fps = 0
      cam_start_time = time()

    # resize the frame to what network input layer expects it to be
    image = cv2.resize(frame, (input_dimension, input_dimension))
    image = image.transpose(2, 0, 1).reshape(1, 3, input_dimension, input_dimension)

    inf_time = time()
    # send the input as protobuf
    request.inputs[input_layer].CopyFrom(
        tf_contrib_util.make_tensor_proto(image, shape=None))

    try:
      result = stub.Predict(request, 10.0)
    except Exception as e:
      log.error('Caught exception {}'.format(e))
      cam.release()
      return
    duration = time() - inf_time

    # decode the received output as protobuf
    res = tf_contrib_util.make_ndarray(result.outputs[output_layer])

    if not res.any():
      log.error('Thr{}: Predictions came back with wrong output layer name'.format(thr_id))
      dropped_fps = dropped_fps + 1
      disp_buf[thr_id] = frame
    else:
      log.debug('Predictions came back fine')
      inf_fps = inf_fps + 1
      disp_buf[thr_id] = parse_output(thr_id, res, frame)

  # while exit_ok == False

  cam.release()
  log.warning('Exiting thread {}'.format(thr_id))
示例#23
0
def initialize_tf():
    initialization_list = np.zeros((1, 1), dtype=float)
    tf_contrib_util.make_ndarray(initialization_list,
                                 shape=initialization_list.shape,
                                 dtype=types_pb2.DT_FLOAT)
    pass
示例#24
0
for x in range(0, imgs.shape[0] - batch_size + 1, batch_size):
    iteration += 1
    request = predict_pb2.PredictRequest()
    request.model_spec.name = "face-detection"
    img = imgs[x:(x + batch_size)]
    print("\nRequest shape", img.shape)
    request.inputs["data"].CopyFrom(
        tf_contrib_util.make_tensor_proto(img, shape=(img.shape)))
    start_time = datetime.datetime.now()
    result = stub.Predict(
        request, 10.0)  # result includes a dictionary with all model outputs
    end_time = datetime.datetime.now()

    duration = (end_time - start_time).total_seconds() * 1000
    processing_times = np.append(processing_times, np.array([int(duration)]))
    output = tf_contrib_util.make_ndarray(result.outputs["detection_out"])
    print("Response shape", output.shape)
    for y in range(0, img.shape[0]
                   ):  # iterate over responses from all images in the batch
        img_out = img[y, :, :, :]

        print("image in batch item", y, ", output shape", img_out.shape)
        img_out = img_out.transpose(1, 2, 0)
        for i in range(
                0, 200 * batch_size - 1
        ):  # there is returned 200 detections for each image in the batch
            detection = output[:, :, i, :]
            # each detection has shape 1,1,7 where last dimension represent:
            # image_id - ID of the image in the batch
            # label - predicted class ID
            # conf - confidence for the predicted class
def _make_ndarray(tensor):
    if _TENSORFLOW_AVAILABLE:
    	return make_ndarray(tensor)
	return MakeNdarray(tensor)
        img = imgs[x:(x + batch_size)]
        if args.get('labels_numpy_path') is not None:
            lb = lbs[x:(x + batch_size)]
        request.inputs[args['input_name']].CopyFrom(tf_contrib_util.make_tensor_proto(img, shape=(img.shape)))
        start_time = datetime.datetime.now()
        result = stub.Predict(request, 10.0) # result includes a dictionary with all model outputs
        end_time = datetime.datetime.now()
        if args['output_name'] not in result.outputs:
            print("Invalid output name", args['output_name'])
            print("Available outputs:")
            for Y in result.outputs:
                print(Y)
            exit(1)
        duration = (end_time - start_time).total_seconds() * 1000
        processing_times = np.append(processing_times,np.array([int(duration)]))
        output = tf_contrib_util.make_ndarray(result.outputs[args['output_name']])

        nu = np.array(output)
        # for object classification models show imagenet class
        print('Iteration {}; Processing time: {:.2f} ms; speed {:.2f} fps'.format(iteration,round(np.average(duration), 2),
                                                                round(1000 * batch_size / np.average(duration), 2)
                                                                ))
        # Comment out this section for non imagenet datasets
        print("imagenet top results in a single batch:")
        for i in range(nu.shape[0]):
            single_result = nu[[i],...]
            ma = np.argmax(single_result)
            mark_message = ""
            if args.get('labels_numpy_path') is not None:
                total_executed += 1
                if ma == lb[i]: