def __init__(self, sdk_harness_factory=None):
      self.sdk_harness_factory = sdk_harness_factory
      self.control_server = grpc.server(
          futures.ThreadPoolExecutor(max_workers=10))
      self.control_port = self.control_server.add_insecure_port('[::]:0')

      self.data_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
      self.data_port = self.data_server.add_insecure_port('[::]:0')

      self.control_handler = streaming_rpc_handler(
          beam_fn_api_pb2_grpc.BeamFnControlServicer, 'Control')
      beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server(
          self.control_handler, self.control_server)

      self.data_plane_handler = data_plane.GrpcServerDataChannel()
      beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
          self.data_plane_handler, self.data_server)

      # TODO(robertwb): Is sharing the control channel fine?  Alternatively,
      # how should this be plumbed?
      self.state_handler = FnApiRunner.GrpcStateServicer()
      beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server(
          self.state_handler, self.control_server)

      logging.info('starting control server on port %s', self.control_port)
      logging.info('starting data server on port %s', self.data_port)
      self.data_server.start()
      self.control_server.start()

      self.worker = (self.sdk_harness_factory or sdk_worker.SdkHarness)(
          'localhost:%s' % self.control_port)
      self.worker_thread = threading.Thread(
          name='run_worker', target=self.worker.run)
      logging.info('starting worker')
      self.worker_thread.start()
Esempio n. 2
0
    def test_reconnect(self):
        server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
        handler = grpc.method_handlers_generic_handler('test', {
            'UnaryUnary':
            grpc.unary_unary_rpc_method_handler(_handle_unary_unary)
        })
        sock_opt = _get_reuse_socket_option()
        port = _pick_and_bind_port(sock_opt)
        self.assertIsNotNone(port)

        server = grpc.server(server_pool, (handler,))
        server.add_insecure_port('[::]:{}'.format(port))
        server.start()
        channel = grpc.insecure_channel('localhost:%d' % port)
        multi_callable = channel.unary_unary(_UNARY_UNARY)
        self.assertEqual(_RESPONSE, multi_callable(_REQUEST))
        server.stop(None)
        # By default, the channel connectivity is checked every 5s
        # GRPC_CLIENT_CHANNEL_BACKUP_POLL_INTERVAL_MS can be set to change
        # this.
        time.sleep(5.1)
        server = grpc.server(server_pool, (handler,))
        server.add_insecure_port('[::]:{}'.format(port))
        server.start()
        self.assertEqual(_RESPONSE, multi_callable(_REQUEST))
        server.stop(None)
        channel.close()
    def __init__(self, sdk_harness_factory=None):
      self.sdk_harness_factory = sdk_harness_factory
      self.state_handler = FnApiRunner.SimpleState()
      self.control_server = grpc.server(
          futures.ThreadPoolExecutor(max_workers=10))
      self.control_port = self.control_server.add_insecure_port('[::]:0')

      self.data_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
      self.data_port = self.data_server.add_insecure_port('[::]:0')

      self.control_handler = streaming_rpc_handler(
          beam_fn_api_pb2.BeamFnControlServicer, 'Control')
      beam_fn_api_pb2.add_BeamFnControlServicer_to_server(
          self.control_handler, self.control_server)

      self.data_plane_handler = data_plane.GrpcServerDataChannel()
      beam_fn_api_pb2.add_BeamFnDataServicer_to_server(
          self.data_plane_handler, self.data_server)

      logging.info('starting control server on port %s', self.control_port)
      logging.info('starting data server on port %s', self.data_port)
      self.data_server.start()
      self.control_server.start()

      self.worker = (self.sdk_harness_factory or sdk_worker.SdkHarness)(
          'localhost:%s' % self.control_port)
      self.worker_thread = threading.Thread(target=self.worker.run)
      logging.info('starting worker')
      self.worker_thread.start()
Esempio n. 4
0
 def test_not_a_generic_rpc_handler_at_construction(self):
     with self.assertRaises(AttributeError) as exception_context:
         grpc.server(
             futures.ThreadPoolExecutor(max_workers=5),
             handlers=[
                 _ActualGenericRpcHandler(),
                 object(),
             ])
     self.assertIn('grpc.GenericRpcHandler',
                   str(exception_context.exception))
Esempio n. 5
0
 def __init__(self, port, adapter, log):
     self.port = port
     self.thread_pool = futures.ThreadPoolExecutor(max_workers=10)
     self.server = grpc.server(self.thread_pool)
     self.services = []
     self.adapter = adapter
     self.log = log
  def _stage_files(self, files):
    """Utility method to stage files.

      Args:
        files: a list of tuples of the form [(local_name, remote_name),...]
          describing the name of the artifacts in local temp folder and desired
          name in staging location.
    """
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    staging_service = TestLocalFileSystemArtifactStagingServiceServicer(
        self._remote_dir)
    beam_artifact_api_pb2_grpc.add_ArtifactStagingServiceServicer_to_server(
        staging_service, server)
    test_port = server.add_insecure_port('[::]:0')
    server.start()
    stager = portable_stager.PortableStager(
        artifact_service_channel=grpc.insecure_channel(
            'localhost:%s' % test_port),
        staging_session_token='token')
    for from_file, to_file in files:
      stager.stage_artifact(
          local_path_to_artifact=os.path.join(self._temp_dir, from_file),
          artifact_name=to_file)
    stager.commit_manifest()
    return staging_service.manifest.artifact, staging_service.retrieval_tokens
    def setUp(self):
        self._servicer = _Servicer()
        self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
        self._server = grpc.server(
            self._server_pool, handlers=(_generic_handler(self._servicer),))
        port = self._server.add_insecure_port('[::]:0')
        self._server.start()

        channel = grpc.insecure_channel('localhost:{}'.format(port))
        self._unary_unary = channel.unary_unary(
            '/'.join((
                '',
                _SERVICE,
                _UNARY_UNARY,)),
            request_serializer=_REQUEST_SERIALIZER,
            response_deserializer=_RESPONSE_DESERIALIZER,)
        self._unary_stream = channel.unary_stream('/'.join((
            '',
            _SERVICE,
            _UNARY_STREAM,)),)
        self._stream_unary = channel.stream_unary('/'.join((
            '',
            _SERVICE,
            _STREAM_UNARY,)),)
        self._stream_stream = channel.stream_stream(
            '/'.join((
                '',
                _SERVICE,
                _STREAM_STREAM,)),
            request_serializer=_REQUEST_SERIALIZER,
            response_deserializer=_RESPONSE_DESERIALIZER,)
  def run_server(self):
    """Start running the server.

    Blocks until `stop_server` is invoked.

    Raises:
      ValueError: If server stop has already been requested, or if the server
        has already started running.
    """
    self._server_lock.acquire()
    try:
      if self._stop_requested:
        raise ValueError("Server has already stopped")
      if self._server_started:
        raise ValueError("Server has already started running")

      self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
      debug_service_pb2_grpc.add_EventListenerServicer_to_server(self,
                                                                 self.server)
      self.server.add_insecure_port("[::]:%d" % self._server_port)
      self.server.start()
      self._server_started = True
    finally:
      self._server_lock.release()

    while not self._stop_requested:
      time.sleep(1.0)
    def test_immediately_connectable_channel_connectivity(self):
        recording_thread_pool = thread_pool.RecordingThreadPool(
            max_workers=None)
        server = grpc.server(
            recording_thread_pool, options=(('grpc.so_reuseport', 0),))
        port = server.add_insecure_port('[::]:0')
        server.start()
        channel = grpc.insecure_channel('localhost:{}'.format(port))
        callback = _Callback()

        ready_future = grpc.channel_ready_future(channel)
        ready_future.add_done_callback(callback.accept_value)
        self.assertIsNone(
            ready_future.result(timeout=test_constants.LONG_TIMEOUT))
        value_passed_to_callback = callback.block_until_called()
        self.assertIs(ready_future, value_passed_to_callback)
        self.assertFalse(ready_future.cancelled())
        self.assertTrue(ready_future.done())
        self.assertFalse(ready_future.running())
        # Cancellation after maturity has no effect.
        ready_future.cancel()
        self.assertFalse(ready_future.cancelled())
        self.assertTrue(ready_future.done())
        self.assertFalse(ready_future.running())
        self.assertFalse(recording_thread_pool.was_used())

        channel.close()
        server.stop(None)
Esempio n. 10
0
 def __init__(self, port, ponsim, x_pon_sim, device_type):
     self.port = port
     self.thread_pool = futures.ThreadPoolExecutor(max_workers=10)
     self.server = grpc.server(self.thread_pool)
     self.ponsim = ponsim
     self.x_pon_sim = x_pon_sim
     self.device_type = device_type
Esempio n. 11
0
    def testSecureNoCert(self):
        server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
        handler = grpc.method_handlers_generic_handler('test', {
            'UnaryUnary':
            grpc.unary_unary_rpc_method_handler(handle_unary_unary)
        })
        server = grpc.server(server_pool, (handler,))
        server_cred = grpc.ssl_server_credentials(_SERVER_CERTS)
        port = server.add_secure_port('[::]:0', server_cred)
        server.start()

        channel_creds = grpc.ssl_channel_credentials(
            root_certificates=_TEST_ROOT_CERTIFICATES)
        channel = grpc.secure_channel(
            'localhost:{}'.format(port),
            channel_creds,
            options=_PROPERTY_OPTIONS)
        response = channel.unary_unary(_UNARY_UNARY)(_REQUEST)
        server.stop(None)

        auth_data = pickle.loads(response)
        self.assertIsNone(auth_data[_ID])
        self.assertIsNone(auth_data[_ID_KEY])
        self.assertDictEqual({
            'transport_security_type': [b'ssl']
        }, auth_data[_AUTH_CTX])
Esempio n. 12
0
    def testSecureClientCert(self):
        server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
        handler = grpc.method_handlers_generic_handler('test', {
            'UnaryUnary':
            grpc.unary_unary_rpc_method_handler(handle_unary_unary)
        })
        server = grpc.server(server_pool, (handler,))
        server_cred = grpc.ssl_server_credentials(
            _SERVER_CERTS,
            root_certificates=_TEST_ROOT_CERTIFICATES,
            require_client_auth=True)
        port = server.add_secure_port('[::]:0', server_cred)
        server.start()

        channel_creds = grpc.ssl_channel_credentials(
            root_certificates=_TEST_ROOT_CERTIFICATES,
            private_key=_PRIVATE_KEY,
            certificate_chain=_CERTIFICATE_CHAIN)
        channel = grpc.secure_channel(
            'localhost:{}'.format(port),
            channel_creds,
            options=_PROPERTY_OPTIONS)

        response = channel.unary_unary(_UNARY_UNARY)(_REQUEST)
        server.stop(None)

        auth_data = pickle.loads(response)
        auth_ctx = auth_data[_AUTH_CTX]
        six.assertCountEqual(self, _CLIENT_IDS, auth_data[_ID])
        self.assertEqual('x509_subject_alternative_name', auth_data[_ID_KEY])
        self.assertSequenceEqual([b'ssl'], auth_ctx['transport_security_type'])
        self.assertSequenceEqual([b'*.test.google.com'],
                                 auth_ctx['x509_common_name'])
Esempio n. 13
0
  def _create_server(self, config):
    if config.async_server_threads == 0:
      # This is the default concurrent.futures thread pool size, but
      # None doesn't seem to work
      server_threads = multiprocessing.cpu_count() * 5
    else:
      server_threads = config.async_server_threads
    server = grpc.server((), futures.ThreadPoolExecutor(
        max_workers=server_threads))
    if config.server_type == control_pb2.ASYNC_SERVER:
      servicer = benchmark_server.BenchmarkServer()
      services_pb2.add_BenchmarkServiceServicer_to_server(servicer, server)
    elif config.server_type == control_pb2.ASYNC_GENERIC_SERVER:
      resp_size = config.payload_config.bytebuf_params.resp_size
      servicer = benchmark_server.GenericBenchmarkServer(resp_size)
      method_implementations = {
          'StreamingCall':
          grpc.stream_stream_rpc_method_handler(servicer.StreamingCall),
          'UnaryCall':
          grpc.unary_unary_rpc_method_handler(servicer.UnaryCall),
      }
      handler = grpc.method_handlers_generic_handler(
          'grpc.testing.BenchmarkService', method_implementations)
      server.add_generic_rpc_handlers((handler,))
    else:
      raise Exception('Unsupported server type {}'.format(config.server_type))

    if config.HasField('security_params'):  # Use SSL
      server_creds = grpc.ssl_server_credentials(
          ((resources.private_key(), resources.certificate_chain()),))
      port = server.add_secure_port('[::]:{}'.format(config.port), server_creds)
    else:
      port = server.add_insecure_port('[::]:{}'.format(config.port))

    return (server, port)
Esempio n. 14
0
def serve():
    parser = argparse.ArgumentParser()
    parser.add_argument('--port', help='the port on which to serve', type=int)
    parser.add_argument(
        '--use_tls',
        help='require a secure connection',
        default=False,
        type=resources.parse_bool)
    args = parser.parse_args()

    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    test_pb2_grpc.add_TestServiceServicer_to_server(methods.TestService(),
                                                    server)
    if args.use_tls:
        private_key = resources.private_key()
        certificate_chain = resources.certificate_chain()
        credentials = grpc.ssl_server_credentials((
            (private_key, certificate_chain),))
        server.add_secure_port('[::]:{}'.format(args.port), credentials)
    else:
        server.add_insecure_port('[::]:{}'.format(args.port))

    server.start()
    logging.info('Server serving.')
    try:
        while True:
            time.sleep(_ONE_DAY_IN_SECONDS)
    except BaseException as e:
        logging.info('Caught exception "%s"; stopping server...', e)
        server.stop(None)
        logging.info('Server stopped; exiting.')
Esempio n. 15
0
def _CreateService():
  """Provides a servicer backend and a stub.

  Returns:
    A _Service with which to test RPCs.
  """
  servicer_methods = _ServicerMethods()

  class Servicer(getattr(service_pb2, SERVICER_IDENTIFIER)):

    def UnaryCall(self, request, context):
      return servicer_methods.UnaryCall(request, context)

    def StreamingOutputCall(self, request, context):
      return servicer_methods.StreamingOutputCall(request, context)

    def StreamingInputCall(self, request_iter, context):
      return servicer_methods.StreamingInputCall(request_iter, context)

    def FullDuplexCall(self, request_iter, context):
      return servicer_methods.FullDuplexCall(request_iter, context)

    def HalfDuplexCall(self, request_iter, context):
      return servicer_methods.HalfDuplexCall(request_iter, context)

  server = grpc.server(
      (), futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
  getattr(service_pb2, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(), server)
  port = server.add_insecure_port('[::]:0')
  server.start()
  channel = grpc.insecure_channel('localhost:{}'.format(port))
  stub = getattr(service_pb2, STUB_IDENTIFIER)(channel)
  return _Service(servicer_methods, server, stub)
    def run(self):
        listenerPort = -1
        server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
        vrpAPI_pb2_grpc.add_SolverMessagesServicer_to_server(SolverMessagesImpl(self.simulationQueue, self.solutionsQueue, self.timeUnitQueue, self.eventSMQueue, self.eventLock), server)
        configPath = os.path.join(os.path.dirname(__file__), '../Config/config.txt')
        with open(configPath, 'r') as configFile:
            # loop to find the port in the config file
            line = configFile.readline()
            while listenerPort is -1:
                if line.startswith("SimulatorPort ="):
                    line = line.replace("SimulatorPort =", "")
                    line = line.replace("\n", "")
                    listenerPort = int(line)
                else:
                    line = configFile.readline()

        portAdded = server.add_insecure_port('localhost:' + str(listenerPort))
        if portAdded == listenerPort :
            server.start()
            keepLooping = True
            try:
                while keepLooping:
                    if not self.queue.empty():
                        msg = self.queue.get()
                        if msg == 'close':
                            keepLooping = False
                            server.stop(0)
                    time.sleep(1.5)
            except KeyboardInterrupt:
                server.stop(0)
        else:
            print("Error with the port of the simulator")
 def initialize(self, inputs: UnityInput) -> UnityOutput:
     try:
         # Establish communication grpc
         self.server = grpc.server(ThreadPoolExecutor(max_workers=10))
         self.unity_to_external = UnityToExternalServicerImplementation()
         add_UnityToExternalServicer_to_server(self.unity_to_external, self.server)
         self.server.add_insecure_port('[::]:'+str(self.port))
         self.server.start()
     except :
         raise UnityTimeOutException(
             "Couldn't start socket communication because worker number {} is still in use. "
             "You may need to manually close a previously opened environment "
             "or use a different worker number.".format(str(self.worker_id)))
     if not self.unity_to_external.parent_conn.poll(30):
         raise UnityTimeOutException(
             "The Unity environment took too long to respond. Make sure that :\n"
             "\t The environment does not need user interaction to launch\n"
             "\t The Academy and the External Brain(s) are attached to objects in the Scene\n"
             "\t The environment and the Python interface have compatible versions.")
     aca_param = self.unity_to_external.parent_conn.recv().unity_output
     self.is_open = True
     message = UnityMessage()
     message.header.status = 200
     message.unity_input.CopyFrom(inputs)
     self.unity_to_external.parent_conn.send(message)
     self.unity_to_external.parent_conn.recv()
     return aca_param
Esempio n. 18
0
def serve(settings):
    logger = logging.getLogger(__name__)
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=3))
    spider_pb2_grpc.add_SpiderRpcServicer_to_server(
            SpiderRpcServicer(),
            server
    )
    server.add_insecure_port(settings['GRPC_URI'])
    server.start()
    logger.info('grpc server running...')
    try:
        while True:
            time.sleep(60)
            if len(processes) == 0:
                continue
            p = processes[0]
            if p.is_alive():
                continue
            code = p.exitcode
            if code is None:
                p.start()
            else:
                p.join()
                processes.popleft()
                if len(processes) > 0:
                    processes[0].start()
    except KeyboardInterrupt:
        server.stop()
Esempio n. 19
0
 def setUp(self):
     self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
     self._server = grpc.server(
         self._server_pool, handlers=(_GenericHandler(),))
     port = self._server.add_insecure_port('[::]:0')
     self._server.start()
     self._channel = grpc.insecure_channel('localhost:%d' % port)
Esempio n. 20
0
 def start(cls, worker_threads=1):
   worker_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
   worker_address = 'localhost:%s' % worker_server.add_insecure_port('[::]:0')
   beam_fn_api_pb2_grpc.add_BeamFnExternalWorkerPoolServicer_to_server(
       cls(worker_threads), worker_server)
   worker_server.start()
   return worker_address, worker_server
Esempio n. 21
0
  def run(self):
    logging_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    logging_port = logging_server.add_insecure_port('[::]:0')
    logging_server.start()
    logging_servicer = BeamFnLoggingServicer()
    beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
        logging_servicer, logging_server)
    logging_descriptor = text_format.MessageToString(
        endpoints_pb2.ApiServiceDescriptor(url='localhost:%s' % logging_port))

    control_descriptor = text_format.MessageToString(
        endpoints_pb2.ApiServiceDescriptor(url=self._control_address))

    p = subprocess.Popen(
        self._worker_command_line,
        shell=True,
        env=dict(
            os.environ,
            CONTROL_API_SERVICE_DESCRIPTOR=control_descriptor,
            LOGGING_API_SERVICE_DESCRIPTOR=logging_descriptor))
    try:
      p.wait()
      if p.returncode:
        raise RuntimeError(
            'Worker subprocess exited with return code %s' % p.returncode)
    finally:
      if p.poll() is None:
        p.kill()
      logging_server.stop(0)
Esempio n. 22
0
 def test_shutdown_handler(self):
   server = grpc.server(logging_pool.pool(1))
   handler = _ShutDownHandler()
   server.add_shutdown_handler(handler.shutdown_handler)
   server.start()
   server.stop(0, shutdown_handler_grace=SHUTDOWN_GRACE).wait()
   self.assertEqual(SHUTDOWN_GRACE, handler.seen_handler_grace)
Esempio n. 23
0
 def start_grpc_server(self, port=0):
   self._server = grpc.server(futures.ThreadPoolExecutor(max_workers=3))
   port = self._server.add_insecure_port('localhost:%d' % port)
   beam_job_api_pb2_grpc.add_JobServiceServicer_to_server(self, self._server)
   self._server.start()
   logging.info('Grpc server started on port %s', port)
   return port
Esempio n. 24
0
 def setUpClass(cls):
     server = grpc.server(futures.ThreadPoolExecutor(max_workers=2))
     service_pb2.add_BESSControlServicer_to_server(
         DummyServiceImpl(),
         server)
     server.add_insecure_port('[::]:%d' % cls.PORT)
     server.start()
     cls.server = server
Esempio n. 25
0
 def setUp(self):
     self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
     test_pb2.add_TestServiceServicer_to_server(methods.TestService(),
                                                self.server)
     port = self.server.add_insecure_port('[::]:0')
     self.server.start()
     self.stub = test_pb2.TestServiceStub(
         grpc.insecure_channel('localhost:{}'.format(port)))
Esempio n. 26
0
def serve(port, pyinstance):
  server_instance = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
  InstanceCommunication_pb2_grpc.add_InstanceControlServicer_to_server(
    InstanceCommunicationServicer(pyinstance), server_instance)
  server_instance.add_insecure_port('[::]:%d' % port)
  Log.info("Serving InstanceCommunication on port %d" % int(port))
  server_instance.start()
  return server_instance
Esempio n. 27
0
def run_worker_server(port):
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=5))
    servicer = worker_server.WorkerServer()
    services_pb2_grpc.add_WorkerServiceServicer_to_server(servicer, server)
    server.add_insecure_port('[::]:{}'.format(port))
    server.start()
    servicer.wait_for_quit()
    server.stop(0)
Esempio n. 28
0
def test_server(max_workers=10):
    """Creates an insecure grpc server.

     These servers have SO_REUSEPORT disabled to prevent cross-talk.
     """
    return grpc.server(
        futures.ThreadPoolExecutor(max_workers=max_workers),
        options=(('grpc.so_reuseport', 0),))
Esempio n. 29
0
 def setUp(self):
   self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
   self._server = grpc.server((_GenericHandler(weakref.proxy(self)),),
                              self._server_pool)
   port = self._server.add_insecure_port('[::]:0')
   self._server.start()
   self._channel = grpc.insecure_channel('localhost:%d' % port,
                                         options=_CHANNEL_ARGS)
    def setUp(self):
        server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
        self._server = grpc.server(server_pool)
        reflection.enable_server_reflection(_SERVICE_NAMES, self._server)
        port = self._server.add_insecure_port('[::]:0')
        self._server.start()

        channel = grpc.insecure_channel('localhost:%d' % port)
        self._stub = reflection_pb2_grpc.ServerReflectionStub(channel)
Esempio n. 31
0
def launch_graphscope():
    args = parse_sys_args()
    logger.info("Launching with args %s", args)

    if args.enable_k8s:
        launcher = KubernetesClusterLauncher(
            namespace=args.k8s_namespace,
            service_type=args.k8s_service_type,
            gs_image=args.k8s_gs_image,
            etcd_image=args.k8s_etcd_image,
            zookeeper_image=args.k8s_zookeeper_image,
            gie_graph_manager_image=args.k8s_gie_graph_manager_image,
            coordinator_name=args.k8s_coordinator_name,
            coordinator_service_name=args.k8s_coordinator_service_name,
            etcd_cpu=args.k8s_etcd_cpu,
            etcd_mem=args.k8s_etcd_mem,
            zookeeper_cpu=args.k8s_zookeeper_cpu,
            zookeeper_mem=args.k8s_zookeeper_mem,
            gie_graph_manager_cpu=args.k8s_gie_graph_manager_cpu,
            gie_graph_manager_mem=args.k8s_gie_graph_manager_mem,
            engine_cpu=args.k8s_engine_cpu,
            engine_mem=args.k8s_engine_mem,
            vineyard_cpu=args.k8s_vineyard_cpu,
            vineyard_mem=args.k8s_vineyard_mem,
            vineyard_shared_mem=args.k8s_vineyard_shared_mem,
            image_pull_policy=args.k8s_image_pull_policy,
            image_pull_secrets=args.k8s_image_pull_secrets,
            volumes=args.k8s_volumes,
            num_workers=args.num_workers,
            instance_id=args.instance_id,
            log_level=args.log_level,
            timeout_seconds=args.timeout_seconds,
            waiting_for_delete=args.waiting_for_delete,
            delete_namespace=args.k8s_delete_namespace,
        )
    else:
        launcher = LocalLauncher(
            num_workers=args.num_workers,
            hosts=args.hosts,
            vineyard_socket=args.vineyard_socket,
            log_level=args.log_level,
            timeout_seconds=args.timeout_seconds,
        )

    coordinator_service_servicer = CoordinatorServiceServicer(
        launcher=launcher,
        dangling_seconds=args.timeout_seconds,
        log_level=args.log_level,
    )

    # after GraphScope ready, fetch logs via gRPC.
    sys.stdout.drop(False)

    # register gRPC server
    server = grpc.server(futures.ThreadPoolExecutor(os.cpu_count() or 1))
    coordinator_service_pb2_grpc.add_CoordinatorServiceServicer_to_server(
        coordinator_service_servicer, server)
    server.add_insecure_port("0.0.0.0:{}".format(args.port))
    logger.info("Coordinator server listen at 0.0.0.0:%d", args.port)

    server.start()
    try:
        # Grpc has handled SIGINT/SIGTERM
        server.wait_for_termination()
    except KeyboardInterrupt:
        del coordinator_service_servicer
Esempio n. 32
0
def serve():
    print('Configuring Server...')

    print('Detecting Workers...')
    FACE_WORKER_LIST = {}
    # Scan for faro workers
    import_dir = faro.__path__[0]
    scripts = os.listdir(os.path.join(import_dir, 'face_workers'))
    scripts = filter(lambda x: x.endswith('FaceWorker.py'), scripts)
    import importlib
    sys.path.append(os.path.join(import_dir, 'face_workers'))
    scripts = list(scripts)
    scripts.sort()

    # Scan for other workers
    if 'FARO_WORKER_PATH' in os.environ:
        worker_dirs = os.environ['FARO_WORKER_PATH'].split(":")
        print("Workers Dirs:", worker_dirs)
        for worker_dir in worker_dirs:

            #import_dir = faro.__path__[0]
            try:
                worker_scripts = os.listdir(worker_dir)
            except:
                print("ERROR - Could not read directory in FARO_WORKER_PATH:",
                      worker_dir)
                raise
            worker_scripts = list(
                filter(lambda x: x.endswith('FaceWorker.py'), worker_scripts))
            sys.path.append(worker_dir)
            scripts += list(worker_scripts)
            scripts.sort()

    for each in scripts:
        name = each[:-13].lower()
        try:
            module = importlib.import_module(each[:-3])
            class_obj = getattr(module, each[:-3])
            print("    Loaded: ", name, '-', class_obj)

            FACE_WORKER_LIST[name] = [class_obj, None, None]
            if 'getOptionsGroup' in dir(module):
                FACE_WORKER_LIST[name][1] = module.getOptionsGroup
            if 'getGalleryWorker' in dir(module):
                FACE_WORKER_LIST[name][2] = module.getGalleryWorker
        except Exception as e:
            print("Could not load worker ", name, ": ", e)
    options, _ = parseOptions(FACE_WORKER_LIST)

    if options.verbose:
        print("storage", os.environ['HOME'])
    if options.verbose:
        print("initializing gRPC server...")
    server = grpc.server(
        futures.ThreadPoolExecutor(max_workers=2 * options.worker_count),
        options=[('grpc.max_send_message_length', options.max_message_size),
                 ('grpc.max_receive_message_length', options.max_message_size)
                 ])
    if options.verbose:
        print('starting Face Service')
    face_client = FaceService(options)
    zcinfo = None
    if Zeroconf is not None:
        zcinfo = face_client.wsInfo
    print("Batch loading a watchlist.")
    #face_client.batchLoad("../tests/watchlist.csv", 'authorized')

    fs.add_FaceRecognitionServicer_to_server(face_client, server)

    server.add_insecure_port(options.port)
    print('Starting Server on port: %s' % options.port)
    server.start()
    try:
        while True:
            time.sleep(60)
    except KeyboardInterrupt:
        face_client.cleanexit()
        server.stop(0)
        print('Server Stopped.')
    try:
        if zcinfo is not None and Zeroconf is not None:
            zc = Zeroconf()
            zc.unregister_service(zcinfo)
            zc.close()
    except:
        pass
        self.stateErrors[task_pb2.TaskState.DONE] = "Task state done."
        self.stateErrors[task_pb2.TaskState.CANCELLED] = "Task state cancel"

    # This function will return next possible states for a given state.
    def nextStates(self, state: task_pb2.TaskState) -> [task_pb2.TaskState]:
        return self.states[state]
    
    # This function will return error msg for a given state.
    def getErrorMsg(self, state: task_pb2.TaskState) -> str:
        return self.stateErrors[state]


TASKFILE = "tasklist.protobuf"
if __name__ == "__main__":
    Path(TASKFILE).touch()
    logging.basicConfig(level=logging.DEBUG)

    with ThreadPoolExecutor(max_workers=1) as pool, TaskapiImpl(
        TASKFILE
    ) as taskapiImpl:
        taskserver = server(pool)
        task_pb2_grpc.add_TaskapiServicer_to_server(taskapiImpl, taskserver)
        taskserver.add_insecure_port("[::]:50051")
        try:
            taskserver.start()
            logging.info("Taskapi ready to serve requests")
            taskserver.wait_for_termination()
        except:
            logging.info("Shutting down server")
            taskserver.stop(None)
Esempio n. 34
0
def serve():
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    hello_pb2_grpc.add_GreeterServicer_to_server(Server(), server)
    server.add_insecure_port('[::]:9999')
    server.start()
    server.wait_for_termination()
Esempio n. 35
0
def serve():
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    hello_pb2_grpc.add_HelloServiceServicer_to_server(GreetServer(), server)
    server.add_insecure_port("[::]:50505")
    server.start()
    server.wait_for_termination()
Esempio n. 36
0
 def __init__(self):
     self.channel = grpc.insecure_channel('localhost:50051')
     self.stub = govox_pb2_grpc.GovoxStub(self.channel)
     self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
def serve():
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    batch_request_pb2_grpc.add_batchServicer_to_server(Batch(), server)
    server.add_insecure_port('[::]:50051')
    server.start()
    server.wait_for_termination()
Esempio n. 38
0
def create_server(server_address):
    server = grpc.server(futures.ThreadPoolExecutor())
    helloworld_pb2_grpc.add_GreeterServicer_to_server(LimitedGreeter(), server)
    port = server.add_insecure_port(server_address)
    return server, port
Esempio n. 39
0
    def __init__(self):
        self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
        add_DiscountServicer_to_server(DiscountServer(), self.server)

        self.server.add_insecure_port('[::]:{}'.format(PORT))
Esempio n. 40
0
    def __init__(self, name, empty_mconfig, loop=None):
        self._name = name
        self._port = 0
        self._get_status_callback = None
        self._get_operational_states_cb = None
        self._log_count_handler = MsgCounterHandler()

        # Init logging before doing anything
        logging.basicConfig(
            level=logging.INFO,
            format='[%(asctime)s %(levelname)s %(name)s] %(message)s',
        )
        # Add a handler to count errors
        logging.root.addHandler(self._log_count_handler)

        # Set gRPC polling strategy
        self._set_grpc_poll_strategy()

        # Load the managed config if present
        self._mconfig = empty_mconfig
        self._mconfig_metadata = None
        self._mconfig_manager = get_mconfig_manager()
        self.reload_mconfig()

        self._state = ServiceInfo.STARTING
        self._health = ServiceInfo.APP_UNHEALTHY
        if loop is None:
            loop = asyncio.get_event_loop()
        self._loop = loop
        self._start_time = int(time.time())
        self._register_signal_handlers()

        # Load the service config if present
        self._config = None
        self.reload_config()

        # Count errors
        self.log_counter = ServiceLogErrorReporter(
            loop=self._loop,
            service_config=self._config,
            handler=self._log_count_handler,
        )
        self.log_counter.start()

        # Operational States
        self._operational_states = []

        self._version = '0.0.0'
        # Load the service version if available
        try:
            # Check if service on docker
            if self._config and 'init_system' in self._config \
                    and self._config['init_system'] == 'docker':
                # image comes in form of "feg_gateway_python:<IMAGE_TAG>\n"
                # Skip the "feg_gateway_python:" part
                image = os.popen(
                    'docker ps --filter name=magmad --format "{{.Image}}" | '
                    'cut -d ":" -f 2', )
                image_tag = image.read().strip('\n')
                self._version = image_tag
            else:
                self._version = pkg_resources.get_distribution('orc8r').version
        except pkg_resources.ResolutionError as e:
            logging.info(e)

        if self._config and 'grpc_workers' in self._config:
            self._server = grpc.server(
                futures.ThreadPoolExecutor(
                    max_workers=self._config['grpc_workers'], ), )
        else:
            self._server = grpc.server(
                futures.ThreadPoolExecutor(max_workers=MAX_DEFAULT_WORKER), )
        add_Service303Servicer_to_server(self, self._server)
Esempio n. 41
0
    def authenticate(self, request, context):
        response = py.auth_pb2.AuthenticateResponse()
        result = response.result
        if request.provider == py.auth_pb2.GOOGLE:
            result.success = True
            response.sessionKey = "gg0123456789"
        elif request.provider == py.auth_pb2.FACEBOOK:
            result.success = True
            response.sessionKey = "fb0123456789"
        else:
            result.success = False
        print('authenticate() -> %s' % response.result.success)
        return response


db.init()
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
py.auth_pb2_grpc.add_FunnifyServiceServicer_to_server(FunnifyServiceServicer(), server)
print('Starting "Funnify-gRPC-Server" on port 50051.')
server.add_insecure_port('165.246.42.172:50051')
server.start()

# since server.start() will not block, a sleep-loop is added to keep alive
try:
    while True:
        time.sleep(86400)
except KeyboardInterrupt:
    server.stop(0)
    db.end()
    print('Server has stopped.')
Esempio n. 42
0
def run():
    if len(sys.argv) != 4:
        print("Error, correct usage is {} [my id] [my port] [k]".format(
            sys.argv[0]))
        sys.exit(-1)

    local_id = int(sys.argv[1])
    my_port = str(int(sys.argv[2]))  # add_insecure_port() will want a string
    k = int(sys.argv[3])
    my_hostname = socket.gethostname()  # Gets my host name
    my_address = socket.gethostbyname(
        my_hostname)  # Gets my IP address from my hostname

    # Create server
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=16))
    servicer = KadImplServicer()
    csci4220_hw4_pb2_grpc.add_KadImplServicer_to_server(servicer, server)
    server.add_insecure_port('[::]:' + my_port)
    server.start()

    servicer.k = k
    for i in range(n):
        servicer.k_buckets.append([])

    servicer.this_node = csci4220_hw4_pb2.Node(id=local_id,
                                               port=int(my_port),
                                               address=my_address)

    # Listen for commands from standard input
    while 1:
        for line in sys.stdin:
            command = line.split(" ")
            for i in range(len(command)):
                command[i] = command[i].strip()

            # BOOTSTRAP command
            if (command[0] == "BOOTSTRAP"):
                remote_addr = socket.gethostbyname(command[1])
                remote_port = int(command[2])
                with grpc.insecure_channel(remote_addr + ':' +
                                           str(remote_port)) as channel:
                    stub = csci4220_hw4_pb2_grpc.KadImplStub(channel)

                    nodeList = stub.FindNode(
                        csci4220_hw4_pb2.IDKey(node=servicer.this_node,
                                               idkey=servicer.this_node.id))

                    k_buckets_add(servicer, nodeList.responding_node)

                    # Add nodes in nodeList to k_buckets
                    for node in nodeList.nodes:
                        k_buckets_add(servicer, node)

                    print("After BOOTSTRAP(" +
                          str(nodeList.responding_node.id) +
                          "), k_buckets now look like:")
                    print_k_buckets(servicer.k_buckets)

            # FIND_NODE command
            if (command[0] == "FIND_NODE"):
                print("Before FIND_NODE command, k-buckets are:")
                print_k_buckets(servicer.k_buckets)

                key = int(command[1])
                found = 0

                if local_id == key:
                    print("Found destination id " + str(key))
                else:
                    for i in range(n):
                        for j in range(len(servicer.k_buckets[i])):
                            if found == 0:
                                with grpc.insecure_channel(
                                        servicer.k_buckets[i][j].address +
                                        ':' +
                                        str(servicer.k_buckets[i][j].port)
                                ) as channel:
                                    stub = csci4220_hw4_pb2_grpc.KadImplStub(
                                        channel)

                                    nodelist = stub.FindNode(
                                        csci4220_hw4_pb2.IDKey(
                                            node=servicer.this_node,
                                            idkey=key))

                                    for node in nodelist.nodes:
                                        k_buckets_add(servicer, node)
                                        if node.id == key:
                                            print("Found destination id " +
                                                  str(key))
                                            found = 1

                if found == 0:
                    print("Could not find destination id " + str(key))

                print("After FIND_NODE command, k-buckets are:")
                print_k_buckets(servicer.k_buckets)

            # FIND_VALUE command
            if (command[0] == "FIND_VALUE"):
                print("Before FIND_VALUE command, k-buckets are:")
                print_k_buckets(servicer.k_buckets)

                key = int(command[1])
                found = 0

                # First check if key is stored locally
                if key in servicer.values:
                    print('Found data "' + servicer.values[key] +
                          '" for key ' + str(key))
                    found = 1
                else:
                    # Find node with id closest to key
                    closest = None
                    for i in range(n):
                        for j in range(len(servicer.k_buckets[i])):
                            if closest == None:
                                closest = servicer.k_buckets[i][j]
                            if (servicer.k_buckets[i][j].id
                                    ^ key) < (closest.id ^ key):
                                closest = servicer.k_buckets[i][j]

                    # Check if bucket is empty
                    if closest != None:
                        # Ask closest node for value
                        with grpc.insecure_channel(
                                closest.address + ':' +
                                str(closest.port)) as channel:
                            stub = csci4220_hw4_pb2_grpc.KadImplStub(channel)

                            kv_wrapper = stub.FindValue(
                                csci4220_hw4_pb2.IDKey(node=servicer.this_node,
                                                       idkey=key))

                            k_buckets_add(servicer, kv_wrapper.responding_node)

                            # If value was found for key
                            if kv_wrapper.mode_kv:
                                print('Found value "' + kv_wrapper.kv.value +
                                      '" for key ' + str(key))
                                found = 1
                            else:
                                for node in kv_wrapper.nodes:
                                    k_buckets_add(servicer, node)
                                    # Correct node found, ask for value
                                    if node.id == key:
                                        with grpc.insecure_channel(
                                                node.address + ':' +
                                                str(node.port)) as channel:
                                            stub = csci4220_hw4_pb2_grpc.KadImplStub(
                                                channel)

                                            kv_wrapper1 = stub.FindValue(
                                                csci4220_hw4_pb2.IDKey(
                                                    node=servicer.this_node,
                                                    idkey=key))

                                            if kv_wrapper1.mode_kv:
                                                print('Found value "' +
                                                      kv_wrapper1.kv.value +
                                                      '" for key ' + str(key))
                                                found = 1

                if found == 0:
                    print("Could not find key " + str(key))

                print("After FIND_VALUE command, k-buckets are:")
                print_k_buckets(servicer.k_buckets)

            # STORE command
            if (command[0] == "STORE"):
                key = int(command[1])
                value = command[2]

                # Find node with id closest to key
                closest = servicer.this_node
                for i in range(n):
                    for j in range(len(servicer.k_buckets[i])):
                        if (servicer.k_buckets[i][j].id ^ key) < (closest.id
                                                                  ^ key):
                            closest = servicer.k_buckets[i][j]

                # Check if this is the closest node -> store locally
                if closest.id == servicer.this_node.id:
                    servicer.values[key] = value
                else:
                    # Send value to closest node
                    with grpc.insecure_channel(closest.address + ':' +
                                               str(closest.port)) as channel:
                        stub = csci4220_hw4_pb2_grpc.KadImplStub(channel)

                        stub.Store(
                            csci4220_hw4_pb2.KeyValue(node=servicer.this_node,
                                                      key=key,
                                                      value=value))

                print("Storing key " + str(key) + " at node " +
                      str(closest.id))

            # QUIT command
            if (command[0] == "QUIT"):
                for i in reversed(range(n)):
                    for node in reversed(servicer.k_buckets[i]):
                        with grpc.insecure_channel(node.address + ':' +
                                                   str(node.port)) as channel:
                            stub = csci4220_hw4_pb2_grpc.KadImplStub(channel)

                            print("Letting " + str(node.id) +
                                  " know I'm quitting.")
                            stub.Quit(
                                csci4220_hw4_pb2.IDKey(
                                    node=servicer.this_node,
                                    idkey=servicer.this_node.id))

                print("Shut down node " + str(local_id))
                sys.exit()
Esempio n. 43
0
def serve(max_workers=10, port=7777):
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers))
    grpc_bt_grpc.add_ForecastServicer_to_server(ForecastServicer(), server)
    server.add_insecure_port("[::]:{}".format(port))
    return server
Esempio n. 44
0
def serve():
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    status_pb2_grpc.add_StatusServicer_to_server(StatusServicer(), server)
    server.add_insecure_port(DOCKER_BRIDGE + ':' + RPC_PORT)
    server.start()
    server.wait_for_termination()
Esempio n. 45
0
def serve():
    server = grpc.server(ThreadPoolExecutor(max_workers=10))
    proto.helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
    server.add_insecure_port('[::]:50051')
    server.start()
    server.wait_for_termination()
Esempio n. 46
0
def serve() -> None:
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=2))
    client_pb2_grpc.add_ClientServicer_to_server(ResponseReceiver(), server)
    server.add_insecure_port(f'localhost:{CLIENT_PORT}')
    server.start()
    server.wait_for_termination()
Esempio n. 47
0
def serve(jobs):
    # Cache the jobs list for later processing.
    # We first have to translate given jobs to different structure.
    for job in jobs:
        # Create proto object
        p = plugin_pb2.Job()

        # Manual interaction
        if job.interaction != None:
            p.interaction.description = job.interaction.description
            p.interaction.type = job.interaction.inputType
            p.interaction.value = job.interaction.value

        # Arguments
        args = []
        if job.args:
            for arg in job.args:
                a = plugin_pb2.Argument()
                a.description = arg.description
                a.type = arg.inputType.value
                a.key = arg.key
                a.value = arg.value

                args.append(a)

        # Set the rest of the fields
        p.unique_id = fnv1a_32(bytes(job.title))
        p.title = job.title
        p.description = job.description
        p.args.extend(args)

        # Resolve dependencies
        if job.dependsOn:
            for depJob in job.dependsOn:
                for currJob in jobs:
                    if depJob.lower() == currJob.title.lower():
                        p.dependson.append(fnv1a_32(bytes(currJob.title)))
                        foundDep = True
                        break

                if not foundDep:
                    raise Exception("job '" + job.title +
                                    "' has dependency '" + depJob +
                                    "' which is not declared")

        # job wrapper object for this job
        w = JobWrapper(job.handler, p)
        cachedJobs.append(w)

    # Check if two jobs have the same title which is restricted
    for x, job in enumerate(cachedJobs):
        for y, innerJob in enumerate(cachedJobs):
            if x != y and job.job.unique_id == innerJob.job.unique_id:
                raise Exception(
                    "duplicate job found (two jobs with same title)")

    # get certificate path from environment variables
    certPath = os.environ['GAIA_PLUGIN_CERT']
    keyPath = os.environ['GAIA_PLUGIN_KEY']
    caCertPath = os.environ['GAIA_PLUGIN_CA_CERT']

    # check if all certs are available
    if not os.path.isfile(certPath):
        raise Exception("cannot find path to certificate")
    if not os.path.isfile(keyPath):
        raise Exception("cannot find path to key")
    if not os.path.isfile(caCertPath):
        raise Exception("cannot find path to root certificate")

    # Open files
    private_key = open(keyPath).read()
    certificate_chain = open(certPath).read()
    root_cert = open(caCertPath).read()

    # We need to build a health service to work with go-plugin
    health = HealthServicer()
    health.set("plugin",
               health_pb2.HealthCheckResponse.ServingStatus.Value('SERVING'))

    # Start the server.
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=20))
    private_key_certificate_chain_pairs = ((private_key, certificate_chain), )
    server_credentials = grpc.ssl_server_credentials(
        private_key_certificate_chain_pairs, root_cert, True)
    plugin_pb2_grpc.add_PluginServicer_to_server(GRPCServer(), server)
    health_pb2_grpc.add_HealthServicer_to_server(health, server)
    port = server.add_secure_port('127.0.0.1:0', server_credentials)
    server.start()

    # Output information
    print("1|2|tcp|127.0.0.1:" + str(port) + "|grpc")
    sys.stdout.flush()

    try:
        while True:
            time.sleep(_ONE_DAY_IN_SECONDS)
    except KeyboardInterrupt:
        server.stop(0)
Esempio n. 48
0
def serve():
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    API_gRPC.add_APIServicer_to_server(APIServicer(), server)
    server.add_insecure_port('[::]:8080')
    server.start()
    server.wait_for_termination()
Esempio n. 49
0
                    if response is None:
                        _server._status(rpc_event, state, None)
                        break
                    else:
                        serialized_response = _server._serialize_response(
                            rpc_event, state, response, response_serializer
                        )
                        print(response)
                        if serialized_response is not None:
                            print("Serialized Correctly")
                            proceed = _server._send_response(
                                rpc_event, state, serialized_response
                            )
                            if not proceed:
                                break
                        else:
                            break
                else:
                    break


_server._unary_response_in_pool = _unary_response_in_pool
_server._stream_response_in_pool = _stream_response_in_pool


if __name__ == "__main__":
    import grpc

    server = grpc.server(AsyncioExecutor())
    # Add Servicer and Start Server Here
Esempio n. 50
0
 def _start(self):
     self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
     tpc_pb2_grpc.add_TwoPhaseCommitServicer_to_server(self, self.server)
     self.server.add_insecure_port('[::]:{}'.format(self.port))
     self.server.start()
     self.server.wait_for_termination()
Esempio n. 51
0
 def __init__(self):
     self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
     message_pb2_grpc.add_MessageServicer_to_server(MessageServicer(),
                                                    self.server)
     file_pb2_grpc.add_FileServicer_to_server(FileServicer(), self.server)
Esempio n. 52
0
 def test_server(self):
     grpc.server(
         futures.ThreadPoolExecutor(max_workers=1),
         options=TEST_CHANNEL_ARGS)
Esempio n. 53
0
 def start(self):
     self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=THREAD_CONCURRENCY_PER_SERVER))
     nic_simulator_grpc_mgmt_service_pb2_grpc.add_DualTorMgmtServiceServicer_to_server(self, self.server)
     self.server.add_insecure_port("%s:%s" % (self.binding_address, self.binding_port))
     self.server.start()
     self.server.wait_for_termination()
Esempio n. 54
0
def serve():
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    wordcloud_pb2_grpc.add_WordcloudServicer_to_server(Wordcloud(), server)
    server.add_insecure_port('[::]:50052')
    server.start()
    server.wait_for_termination()
Esempio n. 55
0
    def __init__(
        self,
        host="localhost",
        port=None,
        socket=None,
        max_workers=None,
        loadable_target_origin=None,
        heartbeat=False,
        heartbeat_timeout=30,
        lazy_load_user_code=False,
        ipc_output_file=None,
        fixed_server_id=None,
    ):
        check.opt_str_param(host, "host")
        check.opt_int_param(port, "port")
        check.opt_str_param(socket, "socket")
        check.opt_int_param(max_workers, "max_workers")
        check.opt_inst_param(loadable_target_origin, "loadable_target_origin",
                             LoadableTargetOrigin)
        check.invariant(
            port is not None if seven.IS_WINDOWS else True,
            "You must pass a valid `port` on Windows: `socket` not supported.",
        )
        check.invariant(
            (port or socket) and not (port and socket),
            "You must pass one and only one of `port` or `socket`.",
        )
        check.invariant(
            host is not None if port else True,
            "Must provide a host when serving on a port",
        )
        check.bool_param(heartbeat, "heartbeat")
        check.int_param(heartbeat_timeout, "heartbeat_timeout")
        self._ipc_output_file = check.opt_str_param(ipc_output_file,
                                                    "ipc_output_file")
        check.opt_str_param(fixed_server_id, "fixed_server_id")

        check.invariant(heartbeat_timeout > 0,
                        "heartbeat_timeout must be greater than 0")
        check.invariant(
            max_workers is None or max_workers > 1 if heartbeat else True,
            "max_workers must be greater than 1 or set to None if heartbeat is True. "
            "If set to None, the server will use the gRPC default.",
        )

        self.server = grpc.server(
            ThreadPoolExecutor(max_workers=max_workers),
            compression=grpc.Compression.Gzip,
        )
        self._server_termination_event = threading.Event()

        try:
            self._api_servicer = DagsterApiServer(
                server_termination_event=self._server_termination_event,
                loadable_target_origin=loadable_target_origin,
                heartbeat=heartbeat,
                heartbeat_timeout=heartbeat_timeout,
                lazy_load_user_code=lazy_load_user_code,
                fixed_server_id=fixed_server_id,
            )
        except Exception:
            if self._ipc_output_file:
                with ipc_write_stream(self._ipc_output_file) as ipc_stream:
                    ipc_stream.send(
                        GrpcServerLoadErrorEvent(
                            error_info=serializable_error_info_from_exc_info(
                                sys.exc_info())))
            raise

        # Create a health check servicer
        self._health_servicer = health.HealthServicer()
        health_pb2_grpc.add_HealthServicer_to_server(self._health_servicer,
                                                     self.server)

        add_DagsterApiServicer_to_server(self._api_servicer, self.server)

        if port:
            server_address = host + ":" + str(port)
        else:
            server_address = "unix:" + os.path.abspath(socket)

        # grpc.Server.add_insecure_port returns:
        # - 0 on failure
        # - port number when a port is successfully bound
        # - 1 when a UDS is successfully bound
        res = self.server.add_insecure_port(server_address)
        if socket and res != 1:
            if self._ipc_output_file:
                with ipc_write_stream(self._ipc_output_file) as ipc_stream:
                    ipc_stream.send(GrpcServerFailedToBindEvent())
            raise CouldNotBindGrpcServerToAddress(socket)
        if port and res != port:
            if self._ipc_output_file:
                with ipc_write_stream(self._ipc_output_file) as ipc_stream:
                    ipc_stream.send(GrpcServerFailedToBindEvent())
            raise CouldNotBindGrpcServerToAddress(port)
Esempio n. 56
0
def infinite_request_iterator():
    while True:
        yield REQUEST


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('scenario', type=str)
    parser.add_argument('--wait_for_interrupt',
                        dest='wait_for_interrupt',
                        action='store_true')
    args = parser.parse_args()

    if args.scenario == UNSTARTED_SERVER:
        server = grpc.server(DaemonPool(),
                             options=(('grpc.so_reuseport', 0), ))
        if args.wait_for_interrupt:
            time.sleep(WAIT_TIME)
    elif args.scenario == RUNNING_SERVER:
        server = grpc.server(DaemonPool(),
                             options=(('grpc.so_reuseport', 0), ))
        port = server.add_insecure_port('[::]:0')
        server.start()
        if args.wait_for_interrupt:
            time.sleep(WAIT_TIME)
    elif args.scenario == POLL_CONNECTIVITY_NO_SERVER:
        channel_config = grpc_gcp.api_config_from_text_pb('')
        channel = grpc_gcp.insecure_channel(
            'localhost:12345',
            options=((grpc_gcp.API_CONFIG_CHANNEL_ARG, channel_config), ))
Esempio n. 57
0
    except Exception, err:
        logger.error("could not enable debugger")
        logger.error(traceback.print_exc())
        pass

    port = os.environ.get('PORT', "8080")
    catalog_addr = os.environ.get('PRODUCT_CATALOG_SERVICE_ADDR', '')
    if catalog_addr == "":
        raise Exception(
            'PRODUCT_CATALOG_SERVICE_ADDR environment variable not set')
    logger.info("product catalog address: " + catalog_addr)
    channel = grpc.insecure_channel(catalog_addr)
    product_catalog_stub = demo_pb2_grpc.ProductCatalogServiceStub(channel)

    # create gRPC server
    server = grpc.server(futures.ThreadPoolExecutor(
        max_workers=10))  # ,interceptors=(tracer_interceptor,))

    # add class to gRPC server
    service = RecommendationService()
    demo_pb2_grpc.add_RecommendationServiceServicer_to_server(service, server)
    health_pb2_grpc.add_HealthServicer_to_server(service, server)

    # start server
    logger.info("listening on port: " + port)
    server.add_insecure_port('[::]:' + port)
    server.start()

    # keep alive
    try:
        while True:
            time.sleep(10000)
Esempio n. 58
0
 def __init__(self, routes: List[AbstractRoute]):
     self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
     self.server.add_insecure_port('[::]:50051')
     for route in routes:
         route.add_to_server(self.server)
Esempio n. 59
0
from concurrent import futures
from time import sleep

import grpc

import quote_pb2_grpc
from cowsay_client import CowsayClient
from cowsay_pb2_grpc import CowsayStub
from key_auth_server_interceptor import KeyAuthServerInterceptor
from quote_service import QuoteService
from tracer_client_interceptor import TracerClientInterceptor

if __name__ == '__main__':
    secret_key = "password"
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10),
                         interceptors=[KeyAuthServerInterceptor(secret_key)])

    tracer_client_interceptor = TracerClientInterceptor()
    cowsay_channel = grpc.insecure_channel('localhost:50050')
    cowsay_intercepted_channel = grpc.intercept_channel(
        cowsay_channel, tracer_client_interceptor)
    cowsay_stub = CowsayStub(cowsay_intercepted_channel)
    cowsay_client = CowsayClient(cowsay_stub)

    quote_pb2_grpc.add_QuoteServicer_to_server(
        QuoteService(cowsay_client, secret_key), server)
    server.add_insecure_port('[::]:50051')
    server.start()
    print('Quote Service has started with port 50051.')
    try:
        while True:
Esempio n. 60
0
def get(scene_class):
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    frameserver_pb2_grpc.add_FrameServerServicer_to_server(
        FrameServer(server, scene_class), server)
    server.add_insecure_port("localhost:50051")
    return server