Exemple #1
0
def do_inference(server, batch_size, num_tests,img_path):    

    channel = grpc.insecure_channel(server)            
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)      
    request = predict_pb2.PredictRequest()                      
    request.model_spec.name = 'ssd_inception_v2_coco'                        
    request.model_spec.signature_name = 'serving_default'
    print("Image path",img_path)
    #post process the image
    IMAGENET_MEAN = (103.939, 116.779, 123.68)
    IMAGENET_MEAN = (0,0,0)
    image,org= decode_image_opencv(img_path,max_height=800,swapRB=True,
        imagenet_mean=IMAGENET_MEAN)
    #image,org = decode_image_tf_reader(img_path,max_height=800)
    image = image.astype(np.uint8)
    global _draw
    _draw = org.copy()
    print ("in image shape",image.shape)
    
    
    global _start
    _start = time.time()
    global _response_awaiting
    _response_awaiting =True
    for i in range(num_tests):
      #print("Going to send the request")
      # batching  
      # If using anything other than decode_opencv uncomment line below   
      #input = np.expand_dims(image, axis=0)
      #('Input shape=', (1, 480, 640, 3))
      input = image # comment this if using anything other than retinanet
      print("Input shape=",input.shape )
      inputs = input
      for _ in range(batch_size-1):
        inputs = np.append(inputs, input, axis=0)
      
      print("Input-s shape",inputs.shape)
      request.inputs['inputs'].CopyFrom(tf.contrib.util.make_tensor_proto
           (inputs, shape=inputs.shape))
      # request.inputs['input_image'].CopyFrom(tf.contrib.util.make_tensor_proto
            # (inputs, shape=inputs.shape))     
      # call back way - this is faster
      result_future = stub.Predict.future(request, 60.25)  # Intial takes time  
      result_future.add_done_callback(_callback)

      # request reponse way - this is slower
      # result = stub.Predict(request, 10.25)  #  seconds  
      # parse_result(result)
      _response_awaiting = True

      #print("Send the request")
      # End for loop

    while(_response_awaiting):
      time.sleep(.000010)
    print("Response Received Exiting")
Exemple #2
0
def main(_):
    channel = grpc.insecure_channel(FLAGS.server)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

    # model_name = 'ssd_inception_v2_coco'
    model_name = 'ssd_resnet50'
    # model_name = 'ssd_mobilenet'

    thread_num = 1
    run_num = 100
    batch_size = 1

    image, org = decode_image_opencv(FLAGS.image)
    image = image.astype(np.uint8)
    inputs = image
    for i in range(batch_size - 1):
        inputs = np.append(inputs, image, axis=0)

    request = predict_pb2.PredictRequest()
    request.model_spec.name = model_name
    request.model_spec.signature_name = 'serving_default'
    request.inputs['inputs'].CopyFrom(
        tf.contrib.util.make_tensor_proto(inputs, shape=inputs.shape))

    # warmup
    warmup_num = 3
    for i in range(warmup_num):

        start = time.time()
        result = stub.Predict(request, 10.0)
        end = time.time()
        duration = end - start
        print("warmup duration = %f" % duration)
    time.sleep(2.0)

    start = time.time()

    thread_pool = []
    for i in range(thread_num):
        t = threading.Thread(target=send_request,
                             args=(
                                 model_name,
                                 stub,
                                 inputs,
                                 run_num,
                                 batch_size,
                                 i,
                             ))
        thread_pool.append(t)
        t.start()

    for t in thread_pool:
        t.join()

    end = time.time()
    print("overall time = %f" % (end - start))
def main(_):
  s = open('mscoco_complete_label_map.pbtxt', 'r').read()
  mymap = labelmap.StringIntLabelMap()
  global _label_map 
  _label_map = text_format.Parse(s, mymap)

  channel = grpc.insecure_channel(FLAGS.server)
  stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

  run_num = 20

  image, org = decode_image_opencv(FLAGS.image)
  image = image.astype(np.uint8)

  request_array = []
  # batch_size_array = [1, 1, 4, 4, 1, 4]
  batch_size_array = [1, 2, 4, 8, 16, 32, 64, 128]
  # batch_size_array = [32, 16, 8, 4, 2, 1]
  # batch_size_array = [10, 9, 8, 7, 6, 5]

  for batch_size in batch_size_array:
    inputs = image
    for i in range(batch_size - 1):
      inputs = np.append(inputs, image, axis = 0)

    request = predict_pb2.PredictRequest()    
    request.model_spec.name = model_name
    request.model_spec.signature_name = 'serving_default'
    request.inputs['inputs'].CopyFrom(tf.contrib.util.make_tensor_proto(inputs, shape=inputs.shape))

    request_array.append(request)

  print("Done with input preparation")
  raw_input("Press Enter to continue...\n")

  for i in range(len(batch_size_array)):
    batch_size = batch_size_array[i]
    request = request_array[i]

    durationSum = 0.0

    for j in range(run_num):
      start = time.time()
      result = stub.Predict(request, 10.0)
      end = time.time()
      duration = end - start
      print("duration = %f" % duration)
      if (j != 0):
        durationSum += duration

    print("average duration (warm-up excluded) for batch size of %d = %f" % (batch_size, durationSum / (run_num - 1)))
def main(_):
  s = open('mscoco_complete_label_map.pbtxt', 'r').read()
  mymap = labelmap.StringIntLabelMap()
  global _label_map 
  _label_map = text_format.Parse(s, mymap)

  channel = grpc.insecure_channel(FLAGS.server)
  stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

  # durationSum = 0.0
  thread_num = 1
  run_num = 100
  batch_size = 1

  image, org = decode_image_opencv(FLAGS.image)
  image = image.astype(np.uint8)
  inputs = image
  for i in range(batch_size - 1):
    inputs = np.append(inputs, image, axis = 0)

  # request = predict_pb2.PredictRequest()    
  # request.model_spec.name = model_name
  # request.model_spec.signature_name = 'serving_default'
  # request.inputs['inputs'].CopyFrom(tf.contrib.util.make_tensor_proto(inputs, shape=inputs.shape))

  # # warmup
  # warmup_num = 3
  # for i in range(warmup_num):

  #   start = time.time()
  #   result = stub.Predict(request, 10.0)
  #   end = time.time()
  #   duration = end - start
  #   print("warmup duration = %f" % duration)
  # # time.sleep(2.0)
  raw_input("Press Enter to continue...")

  start = time.time()

  jobs = []
  for i in range(thread_num):
    p = multiprocessing.Process(target = send_request, args = (inputs, run_num, batch_size, i,))
    jobs.append(p)
    p.start()

  for p in jobs:
    p.join()

  end = time.time()
  print("overall time = %f" % (end - start))
Exemple #5
0
def main(_):
    s = open('mscoco_complete_label_map.pbtxt', 'r').read()
    mymap = labelmap.StringIntLabelMap()
    global _label_map
    _label_map = text_format.Parse(s, mymap)

    channel = grpc.insecure_channel(FLAGS.server)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

    image, org = decode_image_opencv(FLAGS.image)
    image = image.astype(np.uint8)

    model_1 = 'ssd_inception_v2_coco'
    model_2 = 'ssd_mobilenet'

    # thread_pool = []
    t1 = threading.Thread(target=send_request,
                          args=(
                              stub,
                              image,
                              model_1,
                              10,
                          ))
    t2 = threading.Thread(target=send_request,
                          args=(
                              stub,
                              image,
                              model_2,
                              1,
                          ))

    t1.start()
    t2.start()

    t1.join()
    t2.join()
Exemple #6
0
def main(_):
    s = open('mscoco_complete_label_map.pbtxt', 'r').read()
    mymap = labelmap.StringIntLabelMap()
    global _label_map
    _label_map = text_format.Parse(s, mymap)

    channel = grpc.insecure_channel(FLAGS.server)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

    durationSum = 0.0
    run_num = 10
    batch_size = 64

    image, org = decode_image_opencv(FLAGS.image)
    # _draw = org.copy()

    image = image.astype(np.uint8)
    inputs = image
    for i in range(batch_size - 1):
        inputs = np.append(inputs, image, axis=0)

    request = predict_pb2.PredictRequest()
    request.model_spec.name = model_name
    request.model_spec.signature_name = 'serving_default'
    request.inputs['inputs'].CopyFrom(
        tf.contrib.util.make_tensor_proto(inputs, shape=inputs.shape))

    for i in range(run_num):

        start = time.time()
        result = stub.Predict(request, 10.0)
        end = time.time()

        if (i != 0):
            duration = end - start
            durationSum += duration
            print("duration = %f" % duration)
            # print(result)

        # boxes = result.outputs['detection_boxes']
        # scores = result.outputs['detection_scores']
        # labels = result.outputs['detection_classes']
        # num_detections= result.outputs['num_detections']

        # boxes= tf.make_ndarray(boxes)
        # scores= tf.make_ndarray(scores)
        # labels= tf.make_ndarray(labels)
        # num_detections= tf.make_ndarray(num_detections)

        # print("boxes output",(boxes).shape)
        # print("scores output",(scores).shape)
        # print("labels output",(labels).shape)
        # print('num_detections',num_detections[0])

        # # visualize detections hints from
        # # # https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb

        # for box, score, label in zip(boxes[0], scores[0], labels[0]):
        #   # scores are sorted so we can break
        #   if score < 0.3:
        #       break
        #   #dim = image.shape[0:2]
        #   dim = _draw.shape
        #   #print("Label-raw",labels_to_names[label-1]," at ",box," Score ",score)
        #   box = box_normal_to_pixel(box, dim)
        #   b = box.astype(int)
        #   class_label = get_label(int(label))
        #   print("Label",class_label ," at ",b," Score ",score)
        #   # draw the image and write out
        #   cv2.rectangle(_draw,(b[0],b[1]),(b[2],b[3]),(0,0,255),1)
        #   cv2.putText(_draw,class_label + "-"+str(round(score,2)), (b[0]+2,b[1]+8),\
        #      cv2.FONT_HERSHEY_SIMPLEX, .45, (0,0,255))

        # cv2.imshow("test", _draw)
        # cv2.waitKey(0)

    print("average duration for batch size of %d = %f" %
          (batch_size, durationSum / (run_num - 1)))
Exemple #7
0
def main(_):
    s = open('mscoco_complete_label_map.pbtxt', 'r').read()
    mymap = labelmap.StringIntLabelMap()
    global _label_map
    _label_map = text_format.Parse(s, mymap)

    channel = grpc.insecure_channel(FLAGS.server)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

    # durationSum = 0.0
    thread_num = int(sys.argv[1])
    # run_num = 10
    batch_size = int(sys.argv[2])

    run_num = 1024 / (thread_num * batch_size)
    print("[INFO] thread_num = %d, batch_size = %d, run_num = %d" %
          (thread_num, batch_size, run_num))

    image, org = decode_image_opencv(FLAGS.image)
    image = image.astype(np.uint8)

    t0 = time.time()

    inputs = image
    for i in range(batch_size - 1):
        inputs = np.append(inputs, image, axis=0)

    t1 = time.time()
    print("\n[INFO] preparing input data takes %.3f sec\n" % (t1 - t0))

    request = predict_pb2.PredictRequest()
    request.model_spec.name = model_name
    request.model_spec.signature_name = 'serving_default'
    request.inputs['inputs'].CopyFrom(
        tf.contrib.util.make_tensor_proto(inputs, shape=inputs.shape))

    # warmup
    warmup_num = 3
    for i in range(warmup_num):

        start = time.time()
        result = stub.Predict(request, 10.0)
        end = time.time()
        duration = end - start
        print("warmup duration = %f" % duration)
    # time.sleep(2.0)
    raw_input("Press Enter to continue...")

    start = time.time()

    thread_pool = []
    for i in range(thread_num):
        t = threading.Thread(target=send_request,
                             args=(
                                 stub,
                                 inputs,
                                 run_num,
                                 batch_size,
                                 i,
                             ))
        thread_pool.append(t)
        t.start()

    for t in thread_pool:
        t.join()

    end = time.time()
    print("overall time = %f" % (end - start))
def prepareInputs(batch_size):
    image, org = decode_image_opencv(FLAGS.image)
    image = image.astype(np.uint8)

    # much more efficient way to build a batch...
    return np.tile(image, (batch_size, 1, 1, 1))
Exemple #9
0
def main(_):
  if (sys.argv[1] == "mobilenet"):
    model_name = "ssd_mobilenet_v1_coco"
  elif (sys.argv[1] == "inception"):
    model_name = "ssd_inception_v2_coco"
  else:
    model_name = "ssd_resnet50_v1_fpn"

  s = open('/home/yitao/Documents/fun-project/tensorflow-related/traffic-jammer/single_dnn_client/obj_det/mscoco_complete_label_map.pbtxt','r').read()
  mymap =labelmap.StringIntLabelMap()
  global _label_map 
  _label_map = text_format.Parse(s,mymap)

  channel = grpc.insecure_channel(FLAGS.server)
  stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)

  run_num = 10
  batch_size = 1
  for i in range(run_num):
    start = time.time()

    image, org = decode_image_opencv(FLAGS.image)
    _draw = org.copy()

    image = image.astype(np.uint8)
    inputs = image
    for i in range(batch_size - 1):
      inputs = np.append(inputs, image, axis = 0)

    request = predict_pb2.PredictRequest()    
    request.model_spec.name = model_name
    request.model_spec.signature_name = 'serving_default'
    request.inputs['inputs'].CopyFrom(tf.contrib.util.make_tensor_proto
           (inputs, shape=inputs.shape))

    result = stub.Predict(request, 10.0)
    # print(result)
    boxes = result.outputs['detection_boxes']
    scores = result.outputs['detection_scores']
    labels = result.outputs['detection_classes']
    num_detections= result.outputs['num_detections']

    # print("???")
    # print(boxes)

    boxes= tf.make_ndarray(boxes)
    scores= tf.make_ndarray(scores)
    labels= tf.make_ndarray(labels)
    num_detections= tf.make_ndarray(num_detections)

    # print("boxes output",(boxes).shape)
    # print("scores output",(scores).shape)
    # print("labels output",(labels).shape)
    # print('num_detections',num_detections[0])

    # # visualize detections hints from 
    # # # https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb

    # for box, score, label in zip(boxes[0], scores[0], labels[0]):
    #   # scores are sorted so we can break
    #   if score < 0.3:
    #       break
    #   #dim = image.shape[0:2]
    #   dim = _draw.shape
    #   #print("Label-raw",labels_to_names[label-1]," at ",box," Score ",score)
    #   box = box_normal_to_pixel(box, dim)
    #   b = box.astype(int)
    #   class_label = get_label(int(label))
    #   print("Label",class_label ," at ",b," Score ",score)
    #   # draw the image and write out
    #   cv2.rectangle(_draw,(b[0],b[1]),(b[2],b[3]),(0,0,255),1)
    #   cv2.putText(_draw,class_label + "-"+str(round(score,2)), (b[0]+2,b[1]+8),\
    #      cv2.FONT_HERSHEY_SIMPLEX, .45, (0,0,255))

    # cv2.imshow("test", _draw)
    # cv2.waitKey(0)

    end = time.time()
    duration = end - start
    print("duration = %s sec" % str(duration))