Example #1
0
 def __init__(self):
     print "init dualcross"
     self.gateway_channel = implementations.insecure_channel('localhost', 50051)
     self.gateway = beta_create_BfGatewayService_stub(self.gateway_channel)
     self.datafeed_channel = implementations.insecure_channel('localhost',50052)
     self.datafeed = beta_create_BfDatafeedService_stub(self.datafeed_channel)
     self.connectivity = interfaces.ChannelConnectivity.IDLE
Example #2
0
def main():
    print("Testing namenode...")
    port = 5000
    ip = "127.0.0.1"
    
    channel = implementations.insecure_channel(str(ip),int(port))
    stub = namenode_pb2.beta_create_NameNode_stub(channel)
    
    #parameters required for request
    pfn = "./test.txt"
    file_size = 1337
    ts = "123121234"
    
    f = open(pfn, 'r')
    d = f.read()
    f.close()
    
    #req =namenode_pb2.StoreRequest(file_path=pfn,file_size=file_size,timestamp=ts)
    #response = stub.Store(req,10)
    req = namenode_pb2.ReadRequest(file_path=pfn,timestamp=ts)
    response = stub.Read(req,10)
    datanodes = json.loads(response.datanodes)
    c = (datanodes)
    dn_channel = implementations.insecure_channel('localhost',5000)
    dn_stub = datanode_pb2.beta_create_DataNode_stub(dn_channel)
    pathy = hashlib.sha1(pfn).hexdigest()
    dn_req = datanode_pb2.ReadRequest(blockname=pathy,timestamp=ts)
    response = dn_stub.Read(dn_req,10)
    print(response.data)
Example #3
0
 def __init__(self):
     print "init datarecorder"
     self.gateway_channel = implementations.insecure_channel('localhost', 50051)
     self.gateway = beta_create_BfGatewayService_stub(self.gateway_channel)
     self.datafeed_channel = implementations.insecure_channel('localhost',50052)
     self.datafeed = beta_create_BfDatafeedService_stub(self.datafeed_channel)
     self._service = beta_create_BfProxyService_server(self)
     self._service.add_insecure_port('[::]:50060')
Example #4
0
 def __init__(self):
     print "init robot"
     self.gateway_channel = implementations.insecure_channel('localhost', 50051)
     self.gateway = bfgateway_pb2.beta_create_BfGatewayService_stub(self.gateway_channel)
     self.datafeed_channel = implementations.insecure_channel('localhost', 50052)
     self.datafeed = bfdatafeed_pb2.beta_create_BfDatafeedService_stub(self.datafeed_channel)
     self._service = bfrobot_pb2.beta_create_BfRobotService_server(self)
     self._service.add_insecure_port('[::]:50053')
Example #5
0
    def __init__(self):
        print "init datarecorder"
        self.gateway_channel = implementations.insecure_channel('localhost', 50051)
        self.gateway = beta_create_BfGatewayService_stub(self.gateway_channel)
        self.datafeed_channel = implementations.insecure_channel('localhost',50052)
        self.datafeed = beta_create_BfDatafeedService_stub(self.datafeed_channel)
        self.connectivity = interfaces.ChannelConnectivity.IDLE

        # --------------------------------------------------
        # 1Min Bar的时间戳
        self.last_dt_bar = dt.datetime.now()
        # 当前累积的Bar
        self.CntBar = BfBarData()
        self.last_volume = 1
def run():
  channel = implementations.insecure_channel('localhost', 50051)
  stub = ss_pb2.beta_create_ImageAnalysis_stub(channel)
  img_region = ss_pb2.ImageRegion()
  # img_file = open("/home/mythxcq/source_codes/caffe/examples/images/cat.jpg", "rb")
  img_file = open("/home/mythxcq/2.jpg", "rb")
  img_region.img = img_file.read()
  # img_region.x = 0
  # img_region.w = 1000
  # img_region.y = 0
  # img_region.h = 1000
  img_file.close()
  # string_buffer = StringIO.StringIO(img_region.img)
  # img = caffe.io.load_image(string_buffer)
  # inputs = [img]
  # model_def = "/home/mythxcq/caffe_person_classification_models/google_net/deploy_112.prototxt"
  # pretrained_model = "/home/mythxcq/caffe_person_classification_models/google_net/finetune_person_googlenet_112.caffemodel"
  # mean_file = ""
  # mean = np.load(mean_file)
  # mean = np.empty((3,112,112),dtype=np.float32)
  # mean[0] = 104
  # mean[1] = 117
  # mean[2] = 123
  # global classifier
  # mean = np.array([104,117,123])
  # classifier = caffe.Classifier(model_def, pretrained_model)
  # predictions = classifier.predict(inputs, False)
  # print(predictions)

  response = [stub.ImageClassify.future(img_region, _TIMEOUT_SECONDS) for i in range(5) ]

  print("send all tasks")

  for res in response:
      print(res.result().type)
Example #7
0
  def __init__(self, server, config, hist):
    # Create the stub
    host, port = server.split(':')
    port = int(port)
    if config.HasField('security_params'):
      creds = implementations.ssl_channel_credentials(
          resources.test_root_certificates())
      channel = test_utilities.not_really_secure_channel(
          host, port, creds, config.security_params.server_host_override)
    else:
      channel = implementations.insecure_channel(host, port)

    connected_event = threading.Event()
    def wait_for_ready(connectivity):
      if connectivity == grpc.ChannelConnectivity.READY:
        connected_event.set()
    channel.subscribe(wait_for_ready, try_to_connect=True)
    connected_event.wait()

    if config.payload_config.WhichOneof('payload') == 'simple_params':
      self._generic = False
      self._stub = services_pb2.beta_create_BenchmarkService_stub(channel)
      payload = messages_pb2.Payload(
          body='\0' * config.payload_config.simple_params.req_size)
      self._request = messages_pb2.SimpleRequest(
          payload=payload,
          response_size=config.payload_config.simple_params.resp_size)
    else:
      self._generic = True
      self._stub = implementations.generic_stub(channel)
      self._request = '\0' * config.payload_config.bytebuf_params.req_size

    self._hist = hist
    self._response_callbacks = []
def main():
  host = "0.0.0.0"
  port = 8502
  model_name = "default"
  model_version = -1
  signature_name = ""
  request_timeout = 10.0

  # Generate inference data
  image_b64_string = base64.urlsafe_b64encode(open("./0.jpg", "rb").read())
  images_tensor_proto = tf.contrib.util.make_tensor_proto(
      [image_b64_string], dtype=tf.string)

  # Create gRPC client
  channel = implementations.insecure_channel(host, port)
  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
  request = predict_pb2.PredictRequest()
  request.model_spec.name = model_name
  if model_version > 0:
    request.model_spec.version.value = model_version
  if signature_name != "":
    request.model_spec.signature_name = signature_name
  request.inputs["images"].CopyFrom(images_tensor_proto)

  # Send request
  start_time = time.time()
  for i in range(10):
    result = stub.Predict(request, request_timeout)
  end_time = time.time()
  print("Cost time: {}".format(end_time - start_time))

  print(result)
  def testRegress(self):
    """Test PredictionService.Regress implementation."""
    model_path = self._GetSavedModelBundlePath()

    atexit.register(self.TerminateProcs)
    model_server_address = self.RunServer(PickUnusedPort(), 'default',
                                          model_path)

    print 'Sending Regress request...'
    # Prepare request
    request = regression_pb2.RegressionRequest()
    request.model_spec.name = 'default'
    request.model_spec.signature_name = 'regress_x_to_y'

    example = request.input.example_list.examples.add()
    example.features.feature['x'].float_list.value.extend([2.0])

    # Send request
    host, port = model_server_address.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    result = stub.Regress(request, RPC_TIMEOUT)  # 5 secs timeout
    # Verify response
    self.assertEquals(1, len(result.result.regressions))
    expected_output = 3.0
    self.assertEquals(expected_output, result.result.regressions[0].value)
    self._VerifyModelSpec(result.model_spec, request.model_spec.name,
                          request.model_spec.signature_name,
                          self._GetModelVersion(model_path))
def run(gobgpd_addr, vrf_name, route_dist, import_rt, export_rt):
    channel = implementations.insecure_channel(gobgpd_addr, 50051)
    with gobgp_pb2.beta_create_GobgpApi_stub(channel) as stub:

        bin_rd = str_to_bin_for_rd(route_dist)
        import_rts = []
        bin_import_rt = str_to_bin_for_rt(import_rt)
        import_rts.append(str(bin_import_rt))

        export_rts = []
        bin_export_rt = str_to_bin_for_rt(export_rt)
        export_rts.append(str(bin_export_rt))

        vrf = {}
        vrf['name'] = vrf_name
        vrf['rd'] = str(bin_rd)
        vrf['import_rt'] = import_rts
        vrf['export_rt'] = export_rts


        ret = stub.ModVrf(gobgp_pb2.ModVrfArguments(operation=Operation_ADD, vrf=vrf), _TIMEOUT_SECONDS)
        if ret.code == 0:
            print "Success!"
        else:
            print "Error!"
Example #11
0
    def test_immediately_connectable_channel_connectivity(self):
        server = implementations.server({})
        port = server.add_insecure_port('[::]:0')
        server.start()
        channel = implementations.insecure_channel('localhost', port)
        callback = _Callback()

        try:
            ready_future = utilities.channel_ready_future(channel)
            ready_future.add_done_callback(callback.accept_value)
            self.assertIsNone(
                ready_future.result(timeout=test_constants.LONG_TIMEOUT))
            value_passed_to_callback = callback.block_until_called()
            self.assertIs(ready_future, value_passed_to_callback)
            self.assertFalse(ready_future.cancelled())
            self.assertTrue(ready_future.done())
            self.assertFalse(ready_future.running())
            # Cancellation after maturity has no effect.
            ready_future.cancel()
            self.assertFalse(ready_future.cancelled())
            self.assertTrue(ready_future.done())
            self.assertFalse(ready_future.running())
        finally:
            ready_future.cancel()
            server.stop(0)
def run(gobgpd_addr, vrf_name, prefix, nexthop):
    channel = implementations.insecure_channel(gobgpd_addr, 50051)
    with gobgp_pb2.beta_create_GobgpApi_stub(channel) as stub:

        subnet = IPNetwork(prefix)
        ipaddr = subnet.ip
        masklen = subnet.prefixlen

        nlri = IPAddrPrefix(addr=ipaddr, length=masklen)
        bin_nlri = nlri.serialize()

        nexthop = BGPPathAttributeNextHop(value=nexthop)
        bin_nexthop = nexthop.serialize()

        origin = BGPPathAttributeOrigin(value=2)
        bin_origin = origin.serialize()

        pattrs = []
        pattrs.append(str(bin_nexthop))
        pattrs.append(str(bin_origin))

        path = {}
        path['nlri'] = str(bin_nlri)
        path['pattrs'] = pattrs

        uuid = stub.ModPath(gobgp_pb2.ModPathArguments(resource=Resource_VRF, name=vrf_name, path=path), _TIMEOUT_SECONDS)

        if uuid:
            print "Success!"
        else:
            print "Error!"
Example #13
0
def _get_service(service_url, service_class):
    channel = implementations.insecure_channel(service_url, api_settings.DEFAULT_GRPC_PORT)

    stub = import_class(_PROTO_GENERATED_PATH + service_class)
    service = stub(channel)

    return service
Example #14
0
def main():
  host = FLAGS.host
  port = FLAGS.port
  model_name = FLAGS.model_name
  model_version = FLAGS.model_version
  request_timeout = FLAGS.request_timeout

  # Create gRPC client and request
  channel = implementations.insecure_channel(host, port)
  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
  request = predict_pb2.PredictRequest()
  request.model_spec.name = model_name
  if model_version > 0:
    request.model_spec.version.value = model_version

  inputs_np = numpy.asarray([sys.argv[1]])
  inputs_tensor_proto = tf.contrib.util.make_tensor_proto(inputs_np,
                                                          dtype=tf.float32)
  request.inputs['x_observed'].CopyFrom(inputs_tensor_proto)

  # Send request
  result = stub.Predict(request, request_timeout)
  print(result)
  
  result_np = tf.contrib.util.make_ndarray(result.outputs['y_pred'])
  print('\n%s\n' % result_np) 
Example #15
0
 def test_connect(self):
     channel = implementations.insecure_channel('localhost', 9713)
     print "Connected"
     stub = agro_pb2.beta_create_Scheduler_stub(channel)
     
     task_ids = []
     for i in range(5):
         task = agro_pb2.Task()
         task_id = str(uuid.uuid4())
         task.id = task_id
         task_ids.append(task_id)
         task.command = "/bin/echo"
         
         task.args.add( arg="Testing" )
         task.args.add( arg="Hello" )
         task.args.add( arg="World" )
         task.args.add( arg="%s" % (i) )            
         task.container = "ubuntu"
         task.tags.extend( ['testing'] )
         print "Adding task"
         stub.AddTask(task, 10)
     
     for a in stub.SearchTasks(agro_pb2.TagArray(tags=[]), 10):
         print "Found", a
     
     count = 0
     c = pyagro.wait(stub, task_ids)
     assert(c == 0)
     print "Quiting"
     channel = None
     stub = None
     
     #import pdb; pdb.set_trace()
     
def run(gobgpd_addr):
    channel = implementations.insecure_channel(gobgpd_addr, 50051)
    with gobgp_pb2.beta_create_GobgpApi_stub(channel) as stub:
        vrfs = stub.GetVrfs(gobgp_pb2.Arguments(), _TIMEOUT_SECONDS)
        for vrf in vrfs:
            print(" Vrf.name : %s" % ( vrf.name))
            routeDist = _RouteDistinguisher.parser(vrf.rd)
            if routeDist.type == 0:
                print(" Vrf.rd   : %s:%s" % ( routeDist.admin, routeDist.assigned))
            else:
                print(" Vrf.rd   : ???")

            import_rt = vrf.import_rt
            for import_route_target in import_rt:
                import_rt_tmp = _ExtendedCommunity.parse(import_route_target)
                import_rt = import_rt_tmp[0]

                if isinstance(import_rt, BGPTwoOctetAsSpecificExtendedCommunity):
                    print(" Vrf.import_rt   : %s:%s" % ( import_rt.as_number, import_rt.local_administrator))
                else:
                    print(" Vrf.import_rt   : ???")

            export_rt = vrf.export_rt
            for export_route_target in export_rt:
                export_rt_tmp = _ExtendedCommunity.parse(export_route_target)
                export_rt = export_rt_tmp[0]

                if isinstance(export_rt, BGPTwoOctetAsSpecificExtendedCommunity):
                    print(" Vrf.export_rt   : %s:%s" % ( export_rt.as_number, export_rt.local_administrator))
                else:
                    print(" Vrf.export_rt   : ???")
            print "----------------------------"
def run(gobgpd_addr, vrf_name):
    channel = implementations.insecure_channel(gobgpd_addr, 50051)
    with gobgp_pb2.beta_create_GobgpApi_stub(channel) as stub:
        rib = stub.GetRib(gobgp_pb2.Table(type=Resource_VRF, family=RF_IPv4_UC, name=vrf_name), _TIMEOUT_SECONDS)

        destinations_target = rib.destinations
        for destination_target in destinations_target:
            paths_target = destination_target.paths
            for path_target in paths_target:

                nlri = LabelledVPNIPAddrPrefix.parser(path_target.nlri)
                print (" Rib.prefix     : %s" % nlri[0].prefix)
                print (" Rib.route_dist : %s" % nlri[0].route_dist)
                print (" Rib.label_list : %s" % nlri[0].label_list)
                for pattr in path_target.pattrs:
                    path_attr = _PathAttribute.parser(pattr)
                    if isinstance(path_attr[0], BGPPathAttributeOrigin):
                        print (" Rib.origin     : %s" % path_attr[0].value)
                    elif isinstance(path_attr[0], BGPPathAttributeAsPath):
                        if path_attr[0].type == 2:
                            print(" Rib.aspath     : %s" % path_attr[0].value)
                        else:
                            print(" Rib.aspath     : ???")
                    elif isinstance(path_attr[0], BGPPathAttributeMultiExitDisc):
                        print (" Rib.med        : %s" % path_attr[0].value)
                    elif isinstance(path_attr[0], BGPPathAttributeExtendedCommunities):
                        for community in path_attr[0].communities:
                            if isinstance(community, BGPTwoOctetAsSpecificExtendedCommunity):
                                print(" Rib.community  : %s:%s" % ( community.as_number,
                                                                   community.local_administrator))
                            else:
                                print(" Rib.community  : ???")
                    elif isinstance(path_attr[0], BGPPathAttributeMpReachNLRI):
                        print (" Rib.nexthop    : %s" % path_attr[0].next_hop)
                print "----------------------------"
Example #18
0
def main(argv):
    filename = argv[0]
    img = Image.open(filename)
    width, height = img.size

    # crop out the center 300x300
    crop_h = (width - image_size)/2
    crop_v = (height - image_size)/2
    img = img.crop((crop_h, crop_v, width-crop_h, height-crop_v))

    # resize the resulting image to 150x150
    img = img.resize((image_size, image_size))

    # convert to grayscale
    img = img.convert('L')

    arr = array(img).reshape(image_size * image_size).astype(float)
    print(arr) #debug

    # build the request
    request = tf_convnet_inference_pb2.BoxImageRequest()
    for pixel in arr:
        request.image_data.append(pixel)

    # call the gRPC server
    channel = implementations.insecure_channel(host, port)
    stub = tf_convnet_inference_pb2.beta_create_BoxImageService_stub(channel)
    result = stub.Classify(request, 8.0)

    print(result.value)
Example #19
0
  def test_immediately_connectable_channel_connectivity(self):
    server_completion_queue = _low.CompletionQueue()
    server = _low.Server(server_completion_queue, [])
    port = server.add_http2_port('[::]:0')
    server.start()
    server_completion_queue_thread = threading.Thread(
        target=_drive_completion_queue, args=(server_completion_queue,))
    server_completion_queue_thread.start()
    channel = implementations.insecure_channel('localhost', port)
    callback = _Callback()

    try:
      ready_future = utilities.channel_ready_future(channel)
      ready_future.add_done_callback(callback.accept_value)
      self.assertIsNone(
          ready_future.result(test_constants.SHORT_TIMEOUT))
      value_passed_to_callback = callback.block_until_called()
      self.assertIs(ready_future, value_passed_to_callback)
      self.assertFalse(ready_future.cancelled())
      self.assertTrue(ready_future.done())
      self.assertFalse(ready_future.running())
      # Cancellation after maturity has no effect.
      ready_future.cancel()
      self.assertFalse(ready_future.cancelled())
      self.assertTrue(ready_future.done())
      self.assertFalse(ready_future.running())
    finally:
      ready_future.cancel()
      server.shutdown()
      server_completion_queue.shutdown()
      server_completion_queue_thread.join()
def main():
  host = "0.0.0.0"
  port = 8502
  model_name = "default"
  model_version = -1
  signature_name = ""
  request_timeout = 10.0

  # Generate inference data
  keys = numpy.asarray([[1]])
  keys_tensor_proto = tf.contrib.util.make_tensor_proto(keys, dtype=tf.int32)
  features = numpy.asarray([[1, 2, 3, 4, 5, 6, 7, 8, 9]])
  features_tensor_proto = tf.contrib.util.make_tensor_proto(
      features, dtype=tf.float32)

  # Create gRPC client
  channel = implementations.insecure_channel(host, port)
  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
  request = predict_pb2.PredictRequest()
  request.model_spec.name = model_name
  if model_version > 0:
    request.model_spec.version.value = model_version
  if signature_name != "":
    request.model_spec.signature_name = signature_name
  request.inputs["keys"].CopyFrom(keys_tensor_proto)
  request.inputs["features"].CopyFrom(features_tensor_proto)

  # Send request
  start_time = time.time()
  for i in range(100):
    result = stub.Predict(request, request_timeout)
  end_time = time.time()
  print("Cost time: {}".format(end_time - start_time))

  print(result)
def run(gobgpd_addr, neighbor_address, local_as, peer_as):
    channel = implementations.insecure_channel(gobgpd_addr, 50051)
    with gobgp_pb2.beta_create_GobgpApi_stub(channel) as stub:

        conf = {}
        if local_as == peer_as:
            conf['peer_type'] = PEER_TYPE_INTERNAL
        else:
            conf['peer_type'] = PEER_TYPE_EXTERNAL

        conf['neighbor_address'] = neighbor_address
        conf['local_as'] = local_as
        conf['peer_as'] = peer_as

        families = []
        families.append(RF_IPv4_UC)

	

        peer = {}
        peer['conf'] = conf
	peer['families'] = families

        uuid = stub.ModNeighbor(gobgp_pb2.ModNeighborArguments(operation=Operation_ADD, peer=peer), _TIMEOUT_SECONDS)

        if uuid:
            print "Success!"
        else:
            print "Error!"
Example #22
0
def _stub(args):
    if args.oauth_scope:
        if args.test_case == "oauth2_auth_token":
            # TODO(jtattermusch): This testcase sets the auth metadata key-value
            # manually, which also means that the user would need to do the same
            # thing every time he/she would like to use and out of band oauth token.
            # The transformer function that produces the metadata key-value from
            # the access token should be provided by gRPC auth library.
            access_token = _oauth_access_token(args)
            metadata_transformer = lambda x: [("authorization", "Bearer %s" % access_token)]
        else:
            metadata_transformer = lambda x: [("authorization", "Bearer %s" % _oauth_access_token(args))]
    else:
        metadata_transformer = lambda x: []
    if args.use_tls:
        if args.use_test_ca:
            root_certificates = resources.test_root_certificates()
        else:
            root_certificates = None  # will load default roots.

        channel = test_utilities.not_really_secure_channel(
            args.server_host,
            args.server_port,
            implementations.ssl_channel_credentials(root_certificates),
            args.server_host_override,
        )
        stub = test_pb2.beta_create_TestService_stub(channel, metadata_transformer=metadata_transformer)
    else:
        channel = implementations.insecure_channel(args.server_host, args.server_port)
        stub = test_pb2.beta_create_TestService_stub(channel)
    return stub
  def VerifyPredictRequest(self,
                           model_server_address,
                           expected_output,
                           model_name='default',
                           specify_output=True):
    """Send PredictionService.Predict request and verify output."""
    print 'Sending Predict request...'
    # Prepare request
    request = predict_pb2.PredictRequest()
    request.model_spec.name = model_name
    request.inputs['x'].dtype = types_pb2.DT_FLOAT
    request.inputs['x'].float_val.append(2.0)
    dim = request.inputs['x'].tensor_shape.dim.add()
    dim.size = 1

    if specify_output:
      request.output_filter.append('y')
    # Send request
    host, port = model_server_address.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    result = stub.Predict(request, 5.0)  # 5 secs timeout
    # Verify response
    self.assertTrue('y' in result.outputs)
    self.assertIs(types_pb2.DT_FLOAT, result.outputs['y'].dtype)
    self.assertEquals(1, len(result.outputs['y'].float_val))
    self.assertEquals(expected_output, result.outputs['y'].float_val[0])
Example #24
0
  def dial(self):
    if self.stub:
      self.stub.close()

    p = urlparse('http://' + self.addr)
    channel = implementations.insecure_channel(p.hostname, p.port)
    self.stub = vtgateservice_pb2.beta_create_Vitess_stub(channel)
Example #25
0
def _stub(args):
  if args.test_case == 'oauth2_auth_token':
    creds = oauth2client_client.GoogleCredentials.get_application_default()
    scoped_creds = creds.create_scoped([args.oauth_scope])
    access_token = scoped_creds.get_access_token().access_token
    call_creds = implementations.access_token_call_credentials(access_token)
  elif args.test_case == 'compute_engine_creds':
    creds = oauth2client_client.GoogleCredentials.get_application_default()
    scoped_creds = creds.create_scoped([args.oauth_scope])
    call_creds = implementations.google_call_credentials(scoped_creds)
  elif args.test_case == 'jwt_token_creds':
    creds = oauth2client_client.GoogleCredentials.get_application_default()
    call_creds = implementations.google_call_credentials(creds)
  else:
    call_creds = None
  if args.use_tls:
    if args.use_test_ca:
      root_certificates = resources.test_root_certificates()
    else:
      root_certificates = None  # will load default roots.

    channel_creds = implementations.ssl_channel_credentials(root_certificates)
    if call_creds is not None:
      channel_creds = implementations.composite_channel_credentials(
          channel_creds, call_creds)

    channel = test_utilities.not_really_secure_channel(
        args.server_host, args.server_port, channel_creds,
        args.server_host_override)
    stub = test_pb2.beta_create_TestService_stub(channel)
  else:
    channel = implementations.insecure_channel(
        args.server_host, args.server_port)
    stub = test_pb2.beta_create_TestService_stub(channel)
  return stub
Example #26
0
def main():
    logging.basicConfig(stream=sys.stderr, level=logging.INFO)
    try:
        master = subprocess.Popen(['python', 'typhoon/master.py'])
        time.sleep(1)
        worker = subprocess.Popen(['./build/latest/typhoon_worker'], env={ 'MASTER_ADDRESS': '127.0.0.1:29999' })
        graph = test_kmer()

        channel = implementations.insecure_channel('localhost', 29999)
        time.sleep(1)
        stub = typhoon_pb2.beta_create_TyphoonMaster_stub(channel)
        response = stub.execute(graph, 3600)
        logging.info('Response: %s', response)
        
        ready = False
        while not ready:
            ready = True
            empty = typhoon_pb2.EmptyMessage()
            status = stub.status(empty, 100)
#             logging.info('Status: %s', status)
            job = status.job[0]
            for t in job.task:
                if t.status != typhoon_pb2.SUCCESS:
                    ready = False
            
            time.sleep(1)
                    
        logging.info('Success!')
    except:
        logging.info('Error.', exc_info=1)
    finally:
        master.kill()
        worker.kill()
        os._exit(1)
Example #27
0
def connect():
    # create a channel
    channel = implementations.insecure_channel('localhost', 50051)

    # crete a stub
    stub = structure_pb2.beta_create_StructService_stub(channel)
    return stub, channel
Example #28
0
def do_inference(hostport, input):
  """Tests mnist_inference service with concurrent requests.

  Args:
    hostport: Host:port address of the mnist_inference service.
    work_dir: The full path of working directory for test data set.
    concurrency: Maximum number of concurrent requests.
    num_tests: Number of test images to use.

  Returns:
    The classification error rate.

  Raises:
    IOError: An error occurred processing test data set.
  """
  host, port = hostport.split(':')
  channel = implementations.insecure_channel(host, int(port))
  stub = mnist_inference_pb2.beta_create_MnistService_stub(channel)
  result = {'active': 0, 'error': 0, 'done': 0}
  request = inference_pb2.VerifyRequest()
  for pixel in data.flatten():
    request.image_data.append(pixel.item())
  result_future = stub.Classify(request, 5.0)
  exception = result_future.exception()
  if exception:
    print exception
  else:
    sys.stdout.write('.')
    sys.stdout.flush()
    response = numpy.array(result_future.message)
    print(response)
 def __init__(self, host, port, timeout, user, password, creds=None, options=None):
     """This class creates grpc calls using python.
         :param username: Username for device login
         :param password: Password for device login
         :param host: The ip address for the device
         :param port: The port for the device
         :param timeout: how long before the rpc call timesout
         :param creds: Input of the pem file
         :param options: TLS server name
         :type password: str
         :type username: str
         :type server: str
         :type port: int
         :type timeout:int
         :type creds: str
         :type options: str
     """
     if creds != None:
         self._target = '%s:%d' % (host, port)
         self._creds = implementations.ssl_channel_credentials(creds)
         self._options = options
         channel = grpc.secure_channel(
         self._target, self._creds, (('grpc.ssl_target_name_override', self._options,),))
         self._channel = implementations.Channel(channel)
     else:
         self._host = host
         self._port = port
         self._channel = implementations.insecure_channel(self._host, self._port)
     self._stub = ems_grpc_pb2.beta_create_gRPCConfigOper_stub(self._channel)
     self._timeout = int(timeout)
     self._metadata = [('username', user), ('password', password)]
Example #30
0
def main():
    parser = argparse.ArgumentParser(description="Translation client example")
    parser.add_argument("--model_name", required=True,
                        help="model name")
    parser.add_argument("--host", default="localhost",
                        help="model server host")
    parser.add_argument("--port", type=int, default=9000,
                        help="model server port")
    parser.add_argument("--timeout", type=float, default=10.0,
                        help="request timeout")
    parser.add_argument("--concurrency", type=int, default=10,
                        help="number of concurrent requests")
    parser.add_argument('--spm_model', type=str,
                        help="sentencepiece model file")
    parser.add_argument('input', type=str, help="string to split")
    args = parser.parse_args()

    channel = implementations.insecure_channel(args.host, args.port)
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    sp = spm.SentencePieceProcessor()
    sp.Load(args.spm_model)
    future = translate(stub, args.model_name, sp, args.input,
                       timeout=args.timeout)
    output = parse_translation_result(future.result(), sp)
    print("Input:", args.input)
    print("Split:", output)
Example #31
0
    def predict_from_tf_serving(self, line):
        from grpc.beta import implementations
        from tensorflow_serving.apis import predict_pb2
        from tensorflow_serving.apis import prediction_service_pb2_grpc

        # def pretty_print(line, preds):
        #     words = line.strip().split()
        #     lengths = [max(len(w), len(p)) for w, p in zip(words, preds)]
        #     padded_words = [w + (l - len(w)) * ' ' for w, l in zip(words, lengths)]
        #     padded_preds = [p.decode() + (l - len(p)) * ' ' for p, l in zip(preds, lengths)]
        #     # print('words: {}'.format(' '.join(padded_words)))
        #     # print('preds: {}'.format(' '.join(padded_preds)))
        #     # res1 = 'words: {}'.format(' '.join(padded_words))
        #     # res2 = 'preds: {}'.format(' '.join(padded_preds))
        #     res1 = ' '.join(padded_words)
        #     res2 = ' '.join(padded_preds)
        #     return res1, res2
        #
        # def predict_input_fn(line):
        #     # Words
        #     words = [w.encode() for w in line.strip().split()]
        #     nwords = len(words)
        #
        #     # Wrapping in Tensors
        #     # words = tf.constant([words], dtype=tf.string)
        #     # nwords = tf.constant([nwords], dtype=tf.int32)
        #
        #     # return (words, nwords), None
        #     return words, nwords
        def pretty_print(line, preds):
            line = repr(line).replace('\\', '/')[1:-1]
            line = list(line.replace(' ', ''))
            words = [w for w in line]
            lengths = [max(len(w), len(p)) for w, p in zip(words, preds)]
            padded_words = [w + (l - len(w)) * ' ' for w, l in zip(words, lengths)]
            padded_preds = [p + (l - len(p)) * ' ' for p, l in zip(preds, lengths)]
            print('words: {}'.format('\t'.join(padded_words)))
            print('preds: {}'.format('\t'.join(padded_preds)))

        def predict_input_fn(line):
            # Words
            # line = ''.join([' ' + c + ' ' if len(c.encode()) > 1 else c for c in line]).split()
            line = repr(line).replace('\\', '/')[1:-1]
            line = list(line.replace(' ', ''))
            words = [w.encode() for w in line]
            nwords = len(words)

            # Wrapping in Tensors
            # words = tf.constant(words, shape=(1, nwords), dtype = tf.string)
            # nwords = tf.constant([nwords], shape=(1,), dtype=tf.int32)

            # return (words, nwords), None
            return words, nwords

        def build_sentence(data, tags):
            res = ''
            sub_res = []
            for word, tag in zip(data.strip().split(), tags.strip().split()):

                if tag == 'B':
                    if sub_res:
                        sub_str = ''.join(sub_res)
                        res = res + sub_str + ' '
                        # idx = i
                        sub_res = []
                    else:
                        # idx = i
                        sub_res.append(word)
                elif tag == 'S':
                    # res[i] = word
                    # ' '.join(res, word)
                    res = res + word + ' '
                    # idx = -1
                elif tag == 'M':
                    sub_res.append(word)

                    # idx = -1

                elif tag == 'E':
                    if len(sub_res) > 0:
                        sub_res.append(word)
                        sub_str = ''.join(sub_res)
                        res = res + sub_str + ' '

                        sub_res = []
                    else:
                        # res[idx] = word
                        res = res + word + ' '

            return res.strip()

        # @timecost
        # def predict(testStr):
        host = '43.247.185.201'
        # port='8031'
        port = '8036'
        # port='30000'
        channel = implementations.insecure_channel(host, int(port))
        stub = prediction_service_pb2_grpc.PredictionServiceStub(channel._channel)

        request = predict_pb2.PredictRequest()
        request.model_spec.name = "saved_model"
        words, nwords = predict_input_fn(line)
        request.inputs['words'].CopyFrom(
            tf.contrib.util.make_tensor_proto(words, shape=(1, nwords), dtype=tf.string))
        # tf.contrib.util.make_tensor_proto(words, shape=words.shape, dtype=tf.string))
        request.inputs['nwords'].CopyFrom(
            tf.contrib.util.make_tensor_proto([nwords], shape=(1,), dtype=tf.int32))

        future = stub.Predict.future(request, 10.0)
        result = future.result()
        result_list = tf.make_ndarray(result.outputs["pred_ids"]).tolist()
        tags_id = [self.tags_from_table[id] for id in result_list[0]]
        pretty_print(line, tags_id)
Example #32
0
def _create_stub(server):
  host, port = server.split(":")
  channel = implementations.insecure_channel(host, int(port))
  # TODO(bgb): Migrate to GA API.
  return prediction_service_pb2.beta_create_PredictionService_stub(channel)
Example #33
0
import sys

from grpc.beta import implementations
import numpy
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
import threading

hostport = "0.0.0.0:8500"
host, port = hostport.split(':')
print(host, port)

# 创建 python grpc 代码调用。全局变量。不能每次都创建。
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)


# 调用 grpc 代码,将图片转换成数组,让后放到 grpc 调用。
def do_inference(hostport, img_obj):
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'mnist'
    request.model_spec.signature_name = 'predict_images'
    # image, label = test_data_set.next_batch(1)
    label = np.array([1], dtype=np.uint8)

    img_array = np.array(img_obj, dtype=np.float32)
    img_array = img_array.reshape(img_array.size)

    # 新方法。很接近原始数据。
Example #34
0
    for s in sentence_words:
        print(s)
        for i, w in enumerate(words):
            if w.strip() == s.strip():
                bow[i] = 1

    return (np.array(bow))


query = "hello this is a new query"

x = get_tf_record(query)

print(x)

channel = implementations.insecure_channel(TF_MODEL_SERVER_HOST,
                                           TF_MODEL_SERVER_PORT)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

request = predict_pb2.PredictRequest()
request.model_spec.name = "helix"
request.model_spec.signature_name = "predict_class"
request.inputs['query'].CopyFrom(
    tf.contrib.util.make_tensor_proto(x, shape=[1, len(words)]))
#request.inputs['classes'].CopyFrom(
#   tf.contrib.util.make_tensor_proto(np.array(1,2,3), shape=[3]))

result = stub.Predict(request, 10.0)  # 10 secs timeout

print(result)
arr = tf.contrib.util.make_ndarray(result.outputs["classes"])[0]
print(arr)
def application(environ, start_response):

  request_method = environ['REQUEST_METHOD'].upper()

  response_body = dict()

  if request_method == 'GET':

    response_body.update(
      {
        'service': 'slim-unified-client-rest',
        'message': 'No support on GET, please use POST.',
        'usage': {
          'endpoint': 'POST /',
          'payload': {
            'format': 'application/json',
            'body': {
              'host': 'optional, host for tensorflow serving model server, default "127.0.0.1"',
              'port': 'optional, port for tensorflow serving model server, default "9000"',
              'model_name': 'optional, tensorflow serving model name, default "slim_inception_resnet_v2", ' +
                'all available models: slim_inception_resnet_v2 at port 9000, and slim_inception_v4 at port 9090',
              'image_urls': 'required, image urls in list'
            }
          },
          'returns': {
            'classes': 'top 5 classes of each input image_urls, in shape `n x 5`',
            'scores': 'top 5 classes scores (probabilities) of each input image_urls, in shape `n x 5`',
            'prelogits': 'a numeric vector of 1536 of each input image_urls, in shape `n x 1536`, ' +
              'this vector can be viewed as features of each input image_urls for transfer learning or etc.'
          }
        }
      }
    )

  if request_method == 'POST':

    # the environment variable CONTENT_LENGTH may be empty or missing
    try:

      request_body_size = int(environ.get('CONTENT_LENGTH', 0))

      # the request body is expected to be json
      request_body = json.loads(
        environ['wsgi.input'].read(request_body_size)
      )

      # connect to tfs-slim
      host = request_body.get('host', '127.0.0.1')
      port = request_body.get('port', '9000')
      model_name = request_body.get('model_name', 'slim_inception_resnet_v2')
      image_urls = request_body.get('image_urls', [])
      # print('channel:', host+':'+port)
      # print('model_name:', model_name)
      # print('image_urls:', image_urls)

      assert len(image_urls) > 0, "payload should contains a image_urls as a list of image urls."
      assert len(image_urls) <= 128, "payload should contains a image_urls as a list of image urls, " + \
                                     "with length less than or equal to 128."

      # fetch image urls into bytes, in parallel
      # multiprocessing.pool.ThreadPool is undocumented, as its implementation has never been completed
      # and in modern computer architecture, starting a process has a similar cost as starting a thread
      # as a result, often it is preferred to use multiprocessing.Pool, with the following exception:
      # CPU bound jobs -> multiprocessing.Pool
      # IO bound jobs -> multiprocessing.pool.ThreadPool
      _thread_pool = ThreadPool(len(image_urls))
      image_fetch_results = _thread_pool.map(fetch_url, image_urls)
      _thread_pool.close() # close the thread pool explicitly to avoid `can't start new thread`

      image_bytes = [
        result[1] for result in image_fetch_results if result[2] is None
      ]
      # for result in image_fetch_results:
      #   if result[2] is not None:
      #     print('Error in Fetch URL: {}'.format(result[2]))

      # establish a stub with tensorflow serving service
      channel = implementations.insecure_channel(host, int(port))
      stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
      request = predict_pb2.PredictRequest()
      request.model_spec.name = model_name
      request.model_spec.signature_name = 'predict_images'
      request.inputs['images'].CopyFrom(
        tf.contrib.util.make_tensor_proto(
          image_bytes, shape=[len(image_bytes)]
        )
      )
      result = stub.Predict(request, 60.0)  # 60 secs timeout

      # post-processing the result protobuf into json

      # stringVal have to go the hard way
      result_classes_tensorproto = text_format.MessageToString(
        result.outputs['classes']
      )
      result_classes_tensorproto = result_classes_tensorproto.strip().split('\n')
      result_classes_value = [
        x.split(": ")[1][1:-1] for x in result_classes_tensorproto if 'string_val' in x
      ]  # use ...[1:-1] to remove double qoute around synset names
      # assume in classes are in 2 dims
      result_classes_shape = [
        int(x.split(": ")[1]) for x in result_classes_tensorproto if 'size' in x
      ]
      assert len(result_classes_shape) == 2
      result_classes = [
        result_classes_value[i:i+result_classes_shape[1]] for i in range(
          0, len(result_classes_value), result_classes_shape[1]
        )
      ]

      # floatVal is able to go the easy way
      result_scores_tensorproto = json_format.MessageToDict(
        result.outputs['scores']
      )
      result_scores_value = result_scores_tensorproto['floatVal']
      result_scores_shape = [
        int(x['size']) for x in result_scores_tensorproto['tensorShape']['dim']
      ]
      result_scores = np.reshape(
        result_scores_value, result_scores_shape
      ).tolist()

      # prelogits as image feature for further development
      result_prelogits_tensorproto = json_format.MessageToDict(
        result.outputs['prelogits']
      )
      result_prelogits_value = result_prelogits_tensorproto['floatVal']
      result_prelogits_shape = [
        int(x['size']) for x in result_prelogits_tensorproto['tensorShape']['dim']
      ]
      result_prelogits = np.reshape(
        result_prelogits_value, result_prelogits_shape
      ).tolist()

      response_body.update(
        {
          'data': {
            'image_urls': [
              x for x, y in zip(image_urls, image_fetch_results) if y[2] is None
            ],
            'classes': result_classes,
            'scores': result_scores,
            'prelogits': result_prelogits
          },
          'message': {
            'image_urls_fetch_failed': [
              x for x, y in zip(image_urls, image_fetch_results) if y[2] is not None
            ]
          }
        }
      )

    except Exception as e:

      response_body.update(
        {
          'error': {
            'type': str(type(e)),
            'args': str(e.args)
          }
        }
      )

  # convert response body into str
  response_body = json.dumps(response_body)

  # construct start_response with status, and response headers
  status = '200 OK'

  response_headers = [
    ('Content-Type', 'application/json'),
    ('Content-Length', str(len(response_body)))
  ]

  start_response(status, response_headers)

  # return: wrap response body into a list
  return [response_body]
def web_client_serving():
    args_batch_size = 10
    args_model_name = "aver_ende"
    args_host = "localhost"
    args_tf_port= 9000
    args_redis_port = 6379
    args_timeout = 100
    args_src = None
    args_tgt = None
    # Redis
    red0 = redis.Redis(host=args_host, port=args_redis_port, db=0) # for hash caching
    red1 = redis.Redis(host=args_host, port=args_redis_port, db=1) # for message queue
    # TensorFlow Serving
    channel = implementations.insecure_channel(args_host, args_tf_port)
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    # Fast Execution
    fast_execution = 0
    fast_model_name = "toy_ende"
    fast_timeout = 100000
    # Looping User Serving
    while True:
        try:
            if red1.get("timeout_exist"):
                print("Fast Execution for Timeout user !")
                fast_execution = 1
                user_to_serve = red1.lpop("timeout_user_list")
                if user_to_serve == None:
                    print("Well.. False Alarm. No user there.")
                    fast_execution = 0
                    red1.delete("timeout_exist")
                    print("Waiting for new users ...")
                    while not red1.get("timeout_exist"):
                        user_to_serve = red1.blpop('web_user_list', 100)[1] # pick the first user, if none then wait
                    print("Serving: "+user_to_serve)
            else:
                print("Waiting for new users ...")
                while not red1.get("timeout_exist"):
                    user_to_serve = red1.blpop('web_user_list', 100)[1] # pick the first user, if none then wait
                print("Serving: "+user_to_serve)
            src_list_id = user_to_serve + "_src" # e.g. "web_1_src"
            tgt_list_id = user_to_serve + "_tgt" # e.g. "web_1_tgt"

            #: retrieve all queries of this user
            queries = red1.lrange(src_list_id, 0, -1)
            #: try caching first
            length_of_queries = len(queries)
            queries_for_loop = queries[:]
            for item in queries_for_loop:
                # print(str(length_of_queries)+repr(item))
                result = red0.hget(item, args_model_name)
                if result == None:
                    # print(str(length_of_queries))
                    break
                    # no hope, go to TensorFlow Serving :(
                else:
                    red1.rpush(tgt_list_id, result) # for users to retrieve from list
                    queries.remove(item)
                    length_of_queries = length_of_queries - 1

            if length_of_queries > 0:
                batch_tokens = []
                for query in queries:
                    batch_token = [str(item) for item in query.split()]
                    batch_tokens.append(batch_token)
                    # red1.rpush(tgt_list_id, str(batch_token)) # too good to be true :)
                
                # batch_tokens = [
                #     ["Hello", "world", "!"],
                #     ["My", "name", "is", "John", "."],
                #     ["I", "live", "on", "the", "West", "coast", "."]]

                #: ready for TensorFlow Serving
                tf_start_time = time.time()
                futures = []
                for tokens in batch_tokens:
                    if 0 == len(tokens):
                        continue
                    if fast_execution:
                        future = translate(stub, fast_model_name, tokens, timeout=fast_timeout)
                        futures.append(future)
                    else:
                        future = translate(stub, args_model_name, tokens, timeout=args_timeout)
                        futures.append(future)
                
                for tokens, future in zip(batch_tokens, futures):
                    result_tokens = parse_translation_result(future.result())
                    #: get results from tensorflow serving
                    #: result_tokens = ["Hallo", "Welt", "!"]
                    query = ' '.join(tokens)
                    result = ' '.join(result_tokens)
                    if not fast_execution: # only store full execution
                        red0.hset(query, args_model_name, result) # cache result
                        red0.expire(query, 1200) # key expires after 20 minutes
                    red1.rpush(tgt_list_id, result) # return to users
                    if not fast_execution:
                        print(tgt_list_id + '||' + result + "|| Latency: " + str(time.time() - tf_start_time))
                    else:
                        print("Fast Execution "+tgt_list_id + '||' + result + "|| Latency: " + str(time.time() - tf_start_time))
                        fast_execution = 0
                        red1.delete("timeout_exist")
            print("Well served: "+user_to_serve)
        except:
            red1.lpush('web_user_list', user_to_serve)
            print("Fail to serve: "+ str(user_to_serve))
Example #37
0
            result[n_sample] = dest
            pool.add_task(work, src, n_sample, counter)
            n_sample += 1
            if n_sample % 100 == 0:
                bleu, _ = counter.get()
                print '%d %.3f' % (n_sample, (0. + bleu) / n_sample)
            if n_sample == total:
                break
        pool.wait_completion()
        bleu, _ = counter.get()
        print '%d %.3f' % (n_sample, (0. + bleu) / n_sample)


if __name__ == '__main__':

    argparser = argparse.ArgumentParser(sys.argv[0])
    argparser.add_argument('--port', type=int, default=30031)
    argparser.add_argument('--host', type=str, default='localhost')
    argparser.add_argument('--num_thread', type=int, default=1)
    argparser.add_argument('--test_file', type=str)
    argparser.add_argument('--num_sample', type=int, default=100)
    argparser.add_argument('--verbose', type=int, default=1)
    argparser.add_argument('--num_skip', type=int, default=0)
    args = argparser.parse_args()

    global channel
    global results

    channel = implementations.insecure_channel(args.host, args.port)
    test()
    def predict(self, sents):
        _, self.sents, self.segs, self.tags = self.sents2id(sents)
        # hostport = '192.168.31.186:6000'

        # host, port = hostport.split(':')
        # channel = implementations.insecure_channel(host, int(port))
        channel = implementations.insecure_channel(TF_SERVING_HOST,
                                                   TF_SERVING_PORT)

        stub = prediction_service_pb2.beta_create_PredictionService_stub(
            channel)
        # build request
        request = predict_pb2.PredictRequest()
        request.model_spec.name = self.model_name
        request.model_spec.signature_name = self.signature_name
        request.inputs['input_w'].CopyFrom(
            tf.contrib.util.make_tensor_proto(self.sents, dtype=tf.int32))
        request.inputs['input_seg'].CopyFrom(
            tf.contrib.util.make_tensor_proto(self.segs, dtype=tf.int32))
        request.inputs['target'].CopyFrom(
            tf.contrib.util.make_tensor_proto(self.tags, dtype=tf.int32))
        request.inputs['dropout'].CopyFrom(
            tf.contrib.util.make_tensor_proto(1.0, dtype=tf.float32))
        model_results = stub.Predict(request, 60.0)

        trans = tensor_util.MakeNdarray(model_results.outputs["trans"])
        scores = tensor_util.MakeNdarray(model_results.outputs["scores"])
        lengths = tensor_util.MakeNdarray(model_results.outputs["lengths"])
        batch_paths = self.decode(scores, lengths, trans)
        tags = [self.id_to_tag[idx] for idx in batch_paths[0]]
        item = self.result_to_json(sents, tags)
        lbl_list = ["O"] * len(sents)
        for lbldict in item["entities"]:
            start, end, lbl = lbldict["start"], lbldict["end"], lbldict["type"]
            lbl_list[start:end] = [lbl] * (end - start)
        ner_str = ""
        for c, lbl in zip(sents, lbl_list):
            ner_str += c + "/" + lbl + " "
        ner_str = ner_str.rstrip()

        year_dict = {"YEAR": 1}
        year_str = self.str_spec(sents, lbl_list, year_dict)
        # print("year_str:{}".format(year_str))
        month_dict = {"MONTH": 1}
        month_str = self.str_spec(sents, lbl_list, month_dict)
        # print("month_str:{}".format(month_str))
        day_dict = {"DAY": 1}
        day_str = self.str_spec(sents, lbl_list, day_dict)
        # print("day_str:{}".format(day_str))
        part_dict = {"PART": 1}
        part_str = self.str_spec(sents, lbl_list, part_dict)
        # print("part_str:{}".format(part_str))
        speed_dict = {"SPEED": 1}
        speed_str = self.str_spec(sents, lbl_list, speed_dict)
        # print("speed_str:{}".format(speed_str))
        type_dict = {"TYPE": 1}
        type_str = self.str_spec(sents, lbl_list, type_dict)
        # print("type_str:{}".format(type_str))

        loc_dict = {"SLOC": 1, "ELOC": 2}
        loc_str = self.str_spec(sents, lbl_list, loc_dict)
        # print("loc_str:{}".format(loc_str))
        time_dict = {"STIME": 1, "ETIME": 2}
        time_str = self.str_spec(sents, lbl_list, time_dict)
        # print("time_str:{}".format(time_str))

        return loc_str, time_str, year_str, month_str, day_str, part_str, speed_str, type_str
def index():
    #host, port, image = parse_args()
    try:
        host = "localhost"
        port = 9000
        channel = implementations.insecure_channel(host, int(port))
        stub = prediction_service_pb2.beta_create_PredictionService_stub(
            channel)

        # dataset = input_data.read_data_sets('/tmp/Mnist', one_hot=True)
        # batch = dataset.train.next_batch(1)
        # print(batch[1])
        if (request.method == 'GET'):
            #img = [[0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
            numpyarray = np.array(img, np.float32)
            print("Expected result: %d" % (2))
        if (request.method == 'POST'):
            modelUrl = request.form.get('modelUrl')
            imgUrl = request.form.get('imageUrl')
            modelName = request.form.get('modelName')
            imageSize = request.form.get('imageSize')
            checkClearModelPath = 1
            modelFolder = "/home/ubuntu/model"
            if (os.listdir(modelFolder) != []):
                #checkClearModelPath = subprocess.check_call(["sudo", "rm", "-r", "/home/ubuntu/model/*"])
                for the_file in os.listdir(modelFolder):
                    file_path = os.path.join(modelFolder, the_file)
                    try:
                        if os.path.isfile(file_path):
                            os.unlink(file_path)
                        elif os.path.isdir(file_path):
                            shutil.rmtree(file_path)
                    except Exception as e:
                        raise Exception(str(e))

            #if(checkClearModelPath == 0):
            downloadModel = subprocess.check_call(
                ["wget", "-O", "/home/ubuntu/model/model.zip", modelUrl])
            if (downloadModel == 0):
                unzipModel = subprocess.check_call([
                    "unzip", "/home/ubuntu/model/model.zip", "-d",
                    "/home/ubuntu/model/"
                ])
                if (unzipModel == 0):
                    try:
                        thread.start_new_thread(runServingService,
                                                (modelName, ))
                    except Exception as e:
                        raise Exception(str(e))

            checkRunningProcess = ""
            while ("tensorflow_model_server" not in checkRunningProcess):
                checkRunningProcess = subprocess.check_output(['ps', '-au'])
                print(checkRunningProcess)

            time.sleep(1.5)
            downloadImg = subprocess.check_call(
                ["wget", "-O", "/home/ubuntu/serveimg/img.jpg", imgUrl])
            if (downloadImg == 0):
                print("downloaded image")
                img = cv2.imread('/home/ubuntu/serveimg/img.jpg')
                imgarray = []
                for i in range(0, len(img)):
                    for j in range(0, len(img[i])):
                        tmp = img[i][j]
                        px = 0
                        for item in tmp:
                            if (item > 5):
                                px = 1
                                break
                        imgarray.append(px)
                numpyarray = np.array(imgarray, np.float32)
            else:
                raise Exception("cannot download image")

        start = time.time()

        servingrequest = predict_pb2.PredictRequest()

        servingrequest.model_spec.name = modelName

        servingrequest.model_spec.signature_name = 'predict_images'

        #servingrequest.inputs['images'].CopyFrom(make_tensor_proto(numpyarray, shape=[1, 784]))
        servingrequest.inputs['images'].CopyFrom(
            make_tensor_proto(numpyarray, shape=[1, imageSize]))

        result = stub.Predict(servingrequest, 60.0)  # 60 secs timeout

        end = time.time()

        time_diff = end - start

        print(result.outputs['scores'].float_val)
        print(result)

        print('time elapased: {}'.format(time_diff))

        result_list = result.outputs['scores'].float_val

        max_val = max(result_list)

        num_result = 0
        for i in range(0, len(result_list)):
            if (result_list[i] == max_val):
                num_result = i

        return str(num_result)
    except Exception as e:
        with open('/home/ubuntu/myproject/log.txt', 'a+') as f:
            error = "\n\n Internal Server Error \n" + str(
                traceback.format_exc())
            f.write(error)
            f.close()
Example #40
0
def getGRPCChannel(host='localhost', port=7050):
    channel = implementations.insecure_channel(host, port)
    print("Returning GRPC for address: {0}:{1}".format(host,port))
    return channel
Example #41
0
 def __init__(self, host, port):
     channel = implementations.insecure_channel(host, port)
     self.client = Transactor_pb2.beta_create_TransactorService_stub(channel)
Example #42
0
def get_stub(hostport):
    host, port = hostport.split(':')
    channel = implementations.insecure_channel(host, int(port))
    return prediction_service_pb2.beta_create_PredictionService_stub(channel)
def main(_):
    host, port = FLAGS.server.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

    request = predict_pb2.PredictRequest()
    request.model_spec.name = FLAGS.model
    request.model_spec.signature_name = 'serving_default'

    # true label value, this is taken from the eval.py portion
    label = 1

    # hard coded inputs, however, the hard coded inputs can be easily converted into flexible inputs as well
    # these inputs are derived from observing the signature definition using the saved_model_cli show command
    request.inputs['C1'].CopyFrom(
        tf.contrib.util.make_tensor_proto("68fd1e64", shape=[1]))
    request.inputs['C10'].CopyFrom(
        tf.contrib.util.make_tensor_proto("547c0ffe", shape=[1]))
    request.inputs['C11'].CopyFrom(
        tf.contrib.util.make_tensor_proto("bc8c9f21", shape=[1]))
    request.inputs['C12'].CopyFrom(
        tf.contrib.util.make_tensor_proto("60ab2f07", shape=[1]))
    request.inputs['C13'].CopyFrom(
        tf.contrib.util.make_tensor_proto("46f42a63", shape=[1]))
    request.inputs['C14'].CopyFrom(
        tf.contrib.util.make_tensor_proto("07d13a8f", shape=[1]))
    request.inputs['C15'].CopyFrom(
        tf.contrib.util.make_tensor_proto("18231224", shape=[1]))
    request.inputs['C16'].CopyFrom(
        tf.contrib.util.make_tensor_proto("e6b6bdc7", shape=[1]))
    request.inputs['C17'].CopyFrom(
        tf.contrib.util.make_tensor_proto("e5ba7672", shape=[1]))
    request.inputs['C18'].CopyFrom(
        tf.contrib.util.make_tensor_proto("74ef3502", shape=[1]))
    request.inputs['C19'].CopyFrom(
        tf.contrib.util.make_tensor_proto("0", shape=[1]))
    request.inputs['C2'].CopyFrom(
        tf.contrib.util.make_tensor_proto("2c16a946", shape=[1]))
    request.inputs['C20'].CopyFrom(
        tf.contrib.util.make_tensor_proto("0", shape=[1]))
    request.inputs['C21'].CopyFrom(
        tf.contrib.util.make_tensor_proto("5316a17f", shape=[1]))
    request.inputs['C22'].CopyFrom(
        tf.contrib.util.make_tensor_proto("0", shape=[1]))
    request.inputs['C23'].CopyFrom(
        tf.contrib.util.make_tensor_proto("32c7478e", shape=[1]))
    request.inputs['C24'].CopyFrom(
        tf.contrib.util.make_tensor_proto("9117a34a", shape=[1]))
    request.inputs['C25'].CopyFrom(
        tf.contrib.util.make_tensor_proto("0", shape=[1]))
    request.inputs['C26'].CopyFrom(
        tf.contrib.util.make_tensor_proto("", shape=[1]))
    request.inputs['C3'].CopyFrom(
        tf.contrib.util.make_tensor_proto("503b9dbc", shape=[1]))
    request.inputs['C4'].CopyFrom(
        tf.contrib.util.make_tensor_proto("e4dbea90", shape=[1]))
    request.inputs['C5'].CopyFrom(
        tf.contrib.util.make_tensor_proto("f3474129", shape=[1]))
    request.inputs['C6'].CopyFrom(
        tf.contrib.util.make_tensor_proto("13718bbd", shape=[1]))
    request.inputs['C7'].CopyFrom(
        tf.contrib.util.make_tensor_proto("38eb9cf4", shape=[1]))
    request.inputs['C8'].CopyFrom(
        tf.contrib.util.make_tensor_proto("1f89b562", shape=[1]))
    request.inputs['C9'].CopyFrom(
        tf.contrib.util.make_tensor_proto("a73ee510", shape=[1]))
    request.inputs['I1'].CopyFrom(
        tf.contrib.util.make_tensor_proto(1.0, shape=[1]))
    request.inputs['I10'].CopyFrom(
        tf.contrib.util.make_tensor_proto(1.0, shape=[1]))
    request.inputs['I11'].CopyFrom(
        tf.contrib.util.make_tensor_proto(1.0, shape=[1]))
    request.inputs['I12'].CopyFrom(
        tf.contrib.util.make_tensor_proto(0.0, shape=[1]))
    request.inputs['I13'].CopyFrom(
        tf.contrib.util.make_tensor_proto(0.0, shape=[1]))
    request.inputs['I2'].CopyFrom(
        tf.contrib.util.make_tensor_proto(4.0, shape=[1]))
    request.inputs['I3'].CopyFrom(
        tf.contrib.util.make_tensor_proto(2.0, shape=[1]))
    request.inputs['I4'].CopyFrom(
        tf.contrib.util.make_tensor_proto(0.0, shape=[1]))
    request.inputs['I5'].CopyFrom(
        tf.contrib.util.make_tensor_proto(0.0, shape=[1]))
    request.inputs['I6'].CopyFrom(
        tf.contrib.util.make_tensor_proto(0.0, shape=[1]))
    request.inputs['I7'].CopyFrom(
        tf.contrib.util.make_tensor_proto(1.0, shape=[1]))
    request.inputs['I8'].CopyFrom(
        tf.contrib.util.make_tensor_proto(0.0, shape=[1]))
    request.inputs['I9'].CopyFrom(
        tf.contrib.util.make_tensor_proto(0.0, shape=[1]))

    result_future = stub.Predict.future(request, 5.0)
    prediction = result_future.result().outputs

    # Uncomment this if you want to see the output of the entire TensorProto
    # print('Prediction: ' + str(prediction))

    # True label value
    print('True label: ' + str(label))

    # converting the tensorproto to an Ndarray for extracting output
    probList = tensor_util.MakeNdarray(prediction['probabilities'])[0]
    if probList[0] < probList[1]:
        print("Prediction: 1")
    else:
        print("Prediction: 0")
Example #44
0
 def __init__(self, host, port):
     self.channel = implementations.insecure_channel(host, port)
     self.stub = graphresponse_pb2.beta_create_Dgraph_stub(self.channel)
Example #45
0
def main(_):
    host, port = FLAGS.server.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

    # Send request
    #with open(FLAGS.image, 'rb') as f:
    # See prediction_service.proto for gRPC request/response details.
    #data = f.read()
    request = predict_pb2.PredictRequest()
    images = load_and_align_data(['images/12.jpg'], 160, 44, 0.9)

    # Call Facenet model to make prediction on the image
    request.model_spec.name = 'face128'
    request.model_spec.signature_name = 'calculate_embeddings'
    request.inputs['images'].CopyFrom(
        tf.contrib.util.make_tensor_proto(images, dtype=tf.float32))
    request.inputs['phase'].CopyFrom(
        tf.contrib.util.make_tensor_proto(False, dtype=tf.bool))

    start_time = time.time()
    result = stub.Predict(request, 60.0)  # 60 secs timeout

    # Convert to friendly python object
    results_dict = {}
    for key in result.outputs:
        tensor_proto = result.outputs[key]
        nd_array = tf.contrib.util.make_ndarray(tensor_proto)
        results_dict[key] = nd_array

#storing embeddings
    emb = results_dict.get("embeddings")
    feature_time = time.time() - start_time

    # calculate distance between images
    nrof_embeds = labels.size
    dist_array = np.zeros((nrof_embeds, 1))
    for i in range(nrof_embeds):
        tmp = embeds[i, :] - emb[0, :]
        sum_squared = np.dot(tmp.T, tmp)
        dist = math.sqrt(sum_squared)  # AVGTIME = 0.09sec
        dist_array[i][0] = dist

# arranging distance in ascending order
    pred_array = dist_array.argmin(0)

    # threshold distance to 0.8
    if dist_array[pred_array[0]][0] < 0.8:
        pred_label = labels[pred_array[0]]
        pred_face = class_names[int(pred_label)]

    else:
        pred_face = 'Unknown'

    print('Face name:  {}'.format(pred_face))
    print('Face Dist:  {}'.format(dist_array[pred_array[0]][0]))
    print(' ')
    distance_time = time.time() - start_time - feature_time

    print('Feature Calculation time:  {}'.format(feature_time))
    print('Distance Calculation time: {}'.format(distance_time))
    print('Image Recognition time:    {}'.format(time.time() - start_time))
    print(' ')
Example #46
0
def create_channel_for_batching_server():
    channel = implementations.insecure_channel('localhost', 9003)
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    return stub
Example #47
0
def main():
    # Connect to server
    client = InfluxDBClient(host=HOST,
                            port=PORT,
                            username=USER,
                            password=PASSWORD,
                            database=DBNAME)
    print("connect to Influxdb", DBNAME, HOST, PORT)
    # Time
    dt1 = datetime.now()
    dt1 = dt1 - timedelta(hours=6)
    dt = dt1.isoformat()
    # Query to database
    query = 'SELECT "value_gasflow", "value_eta_a", "value_ngp", "value_npt" FROM "example"."autogen"."unit3" WHERE time > now() - 4h'
    data = DataFrameClient(host=HOST, \
                                       username=USER, \
                                       password=PASSWORD, \
                                       database=DBNAME)
    dict_query = data.query(query)
    df = pandas.DataFrame(data=dict_query['unit3'])
    index = df.index
    empty_df = pandas.DataFrame(
        columns=['gas_fuel_flow', 'hpc_eta', 'ngp', 'npt', 'prediction'],
        index=index)
    empty_df.gas_fuel_flow = df['value_gasflow']
    empty_df.hpc_eta = df['value_eta_a']
    empty_df.ngp = df['value_ngp']
    empty_df.npt = df['value_npt']
    strafe = numpy.array(empty_df.values[:, 0:3])
    out_pp = numpy.float32(strafe)
    # Prepare request
    request = predict_pb2.PredictRequest()
    request.model_spec.name = 'deka'
    request.inputs['inputs'].dtype = types_pb2.DT_FLOAT
    request.inputs['inputs'].CopyFrom(
        tf.contrib.util.make_tensor_proto(out_pp))
    request.output_filter.append('outputs')
    # Send request
    host, port = FLAGS.server.split(':')
    channel = implementations.insecure_channel(host, int(port))
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    prediction = stub.Predict(request, 5.0)  # 5 secs timeout
    floats = prediction.outputs['outputs'].float_val
    predicted_array = numpy.array(floats)
    empty_df.prediction = predicted_array
    #print(empty_df)
    json_body = [{
        "measurement": "prediction",
        "tags": {
            "type": "npt predict"
        },
        "time": dt,
        "fields": {
            "gas_fuel_flow": empty_df.gas_fuel_flow[-1],
            "hpc_eta": empty_df.hpc_eta[-1],
            "ngp": empty_df.ngp[-1],
            "npt": empty_df.npt[-1],
            "prediction": empty_df.prediction[-1]
        }
    }]
    client.write_points(json_body, database="example")
    #print(json_body)

    client.close()
Example #48
0
            pass_flag = 1

    else:
        if eval(test_condition):
            pass_count += 1
            logger.info("\nRESULT ===>\n{0} -> PASS\n".format(log_str))
        else:
            fail_count += 1
            logger.info("\nRESULT ===>\n{0} -> FAIL\n".format(log_str))
            pass_flag = 1


try:

    # Channel Creation and Authentication
    channel = implementations.insecure_channel(host=device, port=port)
    stub = authentication_service_pb2.beta_create_Login_stub(channel)
    login_response = stub.LoginCheck(
        authentication_service_pb2.LoginRequest(user_name=APP_USER,
                                                password=APP_PASSWORD,
                                                client_id=client_id),
        login_timeout)

    # Service stub creation
    AclService_stub = beta_create_AclService_stub(channel)

    # All the valid combinations

    logger.info('All the valid combinations')

    api_request = AccessList(ace_list=[
Example #49
0
import tensorflow as tf
from grpc.beta import implementations
from tensorflow_serving.apis import predict_pb2, prediction_service_pb2
import os

channel = implementations.insecure_channel("10.100.51.111", 8877)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
model_name = "deepseg_test"


def serving(source):
    inputs = source
    inputs_length = len(source)
    req = predict_pb2.PredictRequest()
    req.model_spec.name = model_name
    req.inputs["inputs"].CopyFrom(
        tf.make_tensor_proto([source], shape=[1, inputs_length]))
    req.inputs["inputs_length"].CopyFrom(
        tf.make_tensor_proto([inputs_length], shape=[1]))
    response = stub.Predict.future(req, 10)
    res = response.result()
    outputs = tf.make_ndarray(res.outputs["predict_tags"])[0]
    outputs = [b.decode("utf-8") for b in outputs]
    return translate(inputs, outputs)


def translate(src_list, tag_list):
    if not src_list or not tag_list:
        return False, None
    if len(src_list) != len(tag_list):
        return False, None
Example #50
0
from grpc.beta import implementations
import tensorflow as tf

from tensorflow_serving.apis import predict_pb2, prediction_service_pb2_grpc

# 获取stub
channel = implementations.insecure_channel('localhost', 8500)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel._channel)

# 模型签名
request = predict_pb2.PredictRequest()
request.model_spec.name = 'ner'
# request.model_spec.version = 'latest'
request.model_spec.signature_name = 'predict'

# 构造入参
x_data = [[
    13, 45, 13, 13, 49, 1, 49, 196, 594, 905, 48, 231, 318, 712, 1003, 477,
    259, 291, 287, 161, 65, 62, 82, 68, 2, 10
]]
drop_out = 1
sequence_length = [26]
request.inputs['input'].CopyFrom(tf.make_tensor_proto(x_data, dtype=tf.int32))
request.inputs['sequence_length'].CopyFrom(
    tf.make_tensor_proto(sequence_length, dtype=tf.int32))
request.inputs['drop_out'].CopyFrom(
    tf.make_tensor_proto(drop_out, dtype=tf.float32))

result = stub.Predict(request, 10.0)  # 10 secs timeout

print(result)
Example #51
0
class ModelHandler(tornado.web.RequestHandler):
    __metaclass__ = abc.ABCMeta
    serverlg.info(
        '[ModelServer] [Initialization: service %s, schedule %d] [%s]' %
        (options.service, options.schedule,
         time.strftime('%Y-%m-%d %H:%M:%S')))
    for conf_name in schedule:
        #may be deprecated
        graph = create(conf_name)
        graph.apply_deploy_conf(schedule[conf_name])

        host, port = schedule[conf_name]["tf_server"].split(":")
        channel = implementations.insecure_channel(host, int(port))
        stub = prediction_service_pb2.beta_create_PredictionService_stub(
            channel)
        schedule[conf_name]["graph_stub"] = (graph, stub)

    @abc.abstractmethod
    def handle(self):
        query = self.get_argument('query', None)
        if not query:
            ret = {}
            ret["status"] = "missing params"
            serverlg.info(
                '[chatbot] [ERROR: missing params] [REQUEST] [%s] [%s]' %
                (time.strftime('%Y-%m-%d %H:%M:%S'), self.request.uri))
            self.write(json.dumps(ret, ensure_ascii=False))
            self.finish()
        results = []
        debug_infos = []

        graph_stubs = [schedule[name]["graph_stub"] for name in schedule]
        # Multi model compatible, but here just one model exists
        multi_models = []
        for graph, stub in graph_stubs:
            multi_models.append(
                self.run_model(graph, stub, [query.encode("utf-8")]))
        outs = yield multi(multi_models)
        serverlg.info(outs)

        raise gen.Return(None)

    @abc.abstractmethod
    def form_multi_results(self, model_name, model_out):
        return

    def set_default_header(self):
        self.set_header('Access-Control-Allow-Origin', "*")

    @tornado.gen.coroutine
    def run_model(self, graph, stub, records, use_seg=True):
        # Use model specific preprocess
        feed_data = graph.preproc(records, use_seg=use_seg, for_deploy=True)
        # make request
        request = predict_pb2.PredictRequest()
        request.model_spec.name = graph.name
        values = feed_data.values()
        N = len(values[0]) if len(values[0]) < 2 else 2
        see_feed = {k: v[0:N] for k, v in feed_data.items()}
        serverlg.info('[DispatcherServer] [sample %d/%d] %s' %
                      (N, len(values), str(see_feed)))
        for key, value in feed_data.items():
            v = np.array(value)
            value_tensor = tensor_util.make_tensor_proto(value, shape=v.shape)
            # For compatibility to the old placeholder key
            request.inputs[key].CopyFrom(value_tensor)

        # query the model
        #result = stub.Predict(request, 4.0)
        result = yield fwrap(stub.Predict.future(request, 3.0))
        out = {}
        for key, value in result.outputs.items():
            out[key] = tensor_util.MakeNdarray(value)
        model_results = graph.after_proc(out)

        raise gen.Return(model_results)

    @tornado.web.asynchronous
    @tornado.gen.coroutine
    def post(self):
        serverlg.info('[DispatcherServer] [BEGIN] [REQUEST] [%s] [%s]' %
                      (time.strftime('%Y-%m-%d %H:%M:%S'), self.request.uri))
        gc.disable()

        # prepare locks, events, and results container for coroutine
        #self.results = [None] * len(deployments)
        #self.model_results = []
        #self.evt = Event()
        #self.lock = locks.Lock()

        # query all models
        #for name in self.servings:
        model_results, debug_infos, desc = yield self.handle()
        results = self.form_multi_results(model_results, debug_infos)

        # wait until told to proceed
        #yield self.evt.wait()

        #self.run()

        # form response
        ret = {"status": "ok", "result": results}
        #self.write(json.dumps(ret))
        self.write(ret)
        #self.finish()

    @tornado.web.asynchronous
    @tornado.gen.coroutine
    def get(self):
        gc.disable()
        serverlg.info('[DispatcherServer] [BEGIN] [REQUEST] [%s] [%s]' %
                      (time.strftime('%Y-%m-%d %H:%M:%S'), self.request.uri))
        # preproc
        model_results, debug_infos, desc = yield self.handle()
        results = self.form_multi_results(model_results, debug_infos)

        # form response
        ret = {"status": "ok", "result": results, "desc": desc}
        self.write(json.dumps(ret, ensure_ascii=False))
Example #52
0
def run():
    channel = implementations.insecure_channel('localhost', 50051)
    stub = helloworld_pb2.beta_create_Greeter_stub(channel)
    response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'),
                             _TIMEOUT_SECONDS)
    print("Greeter client received: " + response.message)
def main(unused_argv):

    test_file_path = FLAGS.test_file_path
    id_data_dir = FLAGS.id_data_dir

    batch_size = FLAGS.batch_size
    seed_num = FLAGS.seed_num
    max_timesteps = FLAGS.max_timesteps
    vocab_size = FLAGS.vocab_size
    test_size = FLAGS.test_size
    use_local = FLAGS.use_local

    DR_path = os.path.join(id_data_dir, 'DataReader.pkl')
    with open(DR_path, 'rb') as f:
        DR = pickle.load(f)

    input_pinyin_data, input_word_data, target_data = DR.make_data_from_dataframe(
        file_path=test_file_path, build_dictionary=False, max_rows=test_size)

    np.random.seed(seed_num)
    np.random.shuffle(input_pinyin_data)
    np.random.seed(seed_num)
    np.random.shuffle(input_word_data)
    np.random.seed(seed_num)
    np.random.shuffle(target_data)

    test_data_full = batch_generator_triple_with_length(
        input_pinyin_data, input_word_data, target_data, batch_size,
        max_timesteps, DR.word2id, DR.pinyin2id)

    n_iter_per_epoch = len(input_pinyin_data) // (batch_size)

    for t in range(1, n_iter_per_epoch + 1):
        batch_full = next(test_data_full)
        src_pinyin_list, src_word_list, src_length_list, tgt_list, tgt_length_list = batch_full
        src_pinyin_list = np.asarray(src_pinyin_list, dtype=np.int32)
        src_word_list = np.asarray(src_word_list, dtype=np.int32)
        src_length_list = np.asarray(src_length_list, dtype=np.int32)
        tgt_list = np.asarray(tgt_list, dtype=np.int32)

        hostport = '0.0.0.0:9012'
        host, port = hostport.split(':')

        channel = implementations.insecure_channel(host, int(port))
        #channel = insecure_channel(host,int(port))

        stub = prediction_service_pb2.beta_create_PredictionService_stub(
            channel)
        request = predict_pb2.PredictRequest()

        request.model_spec.name = 'spell'
        request.model_spec.signature_name = "predict"

        request.inputs['pinyin_ids'].CopyFrom(
            tf.contrib.util.make_tensor_proto(
                src_pinyin_list,
                shape=[src_pinyin_list.shape[0], src_pinyin_list.shape[1]]))
        request.inputs['word_ids'].CopyFrom(
            tf.contrib.util.make_tensor_proto(
                src_word_list,
                shape=[src_word_list.shape[0], src_word_list.shape[1]]))
        request.inputs['input_lengths'].CopyFrom(
            tf.contrib.util.make_tensor_proto(src_length_list))
        request.inputs['keep_ratio'].CopyFrom(
            tf.contrib.util.make_tensor_proto(FLAGS.keep_ratio))

        print('Predict:')
        proba = stub.Predict(request, 50.0)
        results = {}
        for key in proba.outputs:
            tensor_proto = proba.outputs[key]
            nd_array = tf.contrib.util.make_ndarray(tensor_proto)
            results[key] = nd_array

        #import pdb
        #pdb.set_trace()

        predict_ids = np.argmax(results['predict'], axis=1)
        print(predict_ids)
Example #54
0
def getGRPCChannel(ipAddress):
    channel = implementations.insecure_channel(ipAddress, 5005)
    print("Returning GRPC for address: {0}".format(ipAddress))
    return channel
Example #55
0
            except:
                LOG.debug("SocketIO: Write failed")
        requestQ.task_done()


try:
    CONNECTION_LIST = list()
    for i in range(1):
        t = Thread(target=allRouteApis)
        t.setDaemon(True)
        t.start()
    dispatch_thread = Thread(target=sendtoPS)
    dispatch_thread.setDaemon(True)
    dispatch_thread.start()

    channel = implementations.insecure_channel(host=HOST, port=GRPC_PORT)
    stub = authentication_service_pb2.beta_create_Login_stub(channel)
    login_response = stub.LoginCheck(
        authentication_service_pb2.LoginRequest(user_name=USER,
                                                password=PASSWORD,
                                                client_id=CLIENT_ID), TIMEOUT)
    LOG.info("Connected to the JET GRPC request response server")

    bgp = bgp_route_service_pb2.beta_create_BgpRoute_stub(channel)
    prpd = prpd_service_pb2.beta_create_Base_stub(channel)

    purgeTime = 30
    # Variables for RouteGet Operation
    eod = 0
    count = 0
    getroutelist = []
Example #56
0
from grpc.beta import implementations
import tensorflow as tf

# TensorFlow serving stuff to send messages
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from tensorflow.contrib.util import make_tensor_proto

app = Flask(__name__)

crash_vocab = [
    'Crash', 'Earthquake', 'Explosion', 'Fire', 'Floods', 'Terrorism',
    'Typhoon', 'None'
]

vector_channel = implementations.insecure_channel('35.184.14.40', 80)
disaster_channel = implementations.insecure_channel('0.0.0.0', 8500)

vector_stub = prediction_service_pb2.beta_create_PredictionService_stub(
    vector_channel)
disaster_stub = prediction_service_pb2.beta_create_PredictionService_stub(
    disaster_channel)


def sendVectorRequest(sents):
    req = predict_pb2.PredictRequest()
    req.model_spec.name = 'serving_saved_model'
    req.model_spec.signature_name = 'serving_default'
    req.inputs['text'].CopyFrom(
        make_tensor_proto(sents, shape=[len(sents)], dtype=tf.string))
    result = vector_stub.Predict(req, 60.0)
                    str(ipaddress.ip_address(result.Prefix)), result.PrefixLen,
                    result.ErrStatus.Status)
        os._exit(0)


#
# Setup the GRPC channel with the server, and issue RPCs
#
if __name__ == '__main__':
    from util import util
    server_ip, server_port = util.get_server_ip_port()

    print "Using GRPC Server IP(%s) Port(%s)" % (server_ip, server_port)

    # Create the channel for gRPC.
    channel = implementations.insecure_channel(server_ip, server_port)

    # Spawn a thread to Initialize the client and listen on notifications
    # The thread will run in the background
    client_init.global_init(channel)

    # Create another channel for gRPC requests.
    channel = implementations.insecure_channel(server_ip, server_port)

    # Send an RPC for VRF registrations
    vrf.vrf_operation(channel, sl_common_types_pb2.SL_REGOP_REGISTER)

    # RPC EOF to cleanup any previous stale routes
    vrf.vrf_operation(channel, sl_common_types_pb2.SL_REGOP_EOF)

    # RPC route operations
Example #58
0
 def __init__(self):
     print "init GatewayClient"
     self.gateway_channel = implementations.insecure_channel(
         'localhost', 50051)
     self.gateway = beta_create_BfGatewayService_stub(self.gateway_channel)
     self.connectivity = interfaces.ChannelConnectivity.IDLE
Example #59
0
 def setUp(self):
     self._server = implementations.server({})
     port = self._server.add_insecure_port('[::]:0')
     channel = implementations.insecure_channel('localhost', port)
     self._generic_stub = implementations.generic_stub(channel)
     self._server.start()
        'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[data]))
    }
    example = tf.train.Example(features=tf.train.Features(
        feature=feature_dict))
    serialized = example.SerializeToString()

    request.inputs['images'].CopyFrom(
        tf.contrib.util.make_tensor_proto(serialized,
                                          shape=[1],
                                          dtype=tf.string))

    result_future = stub.Predict.future(request, 10.0)
    prediction = result_future.result()

    # print(prediction.outputs['classes'].int64_val)
    # print(prediction.outputs['probabilities'].float_val)

    pred_class = (prediction.outputs['classes'].int64_val)[0]
    pred_probs = prediction.outputs['probabilities'].float_val
    pred_class_prob = pred_probs[pred_class]

    return pred_class, pred_class_prob


channel = implementations.insecure_channel(IP_ADDR, PORT)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

dog_path = os.path.expanduser(IMAGE_PATH)
output = make_request(stub, dog_path, MODEL_NAME)
print(output)