def __init__(self, control_address, worker_count, credentials=None): self._worker_count = worker_count self._worker_index = 0 if credentials is None: logging.info('Creating insecure control channel.') self._control_channel = grpc.insecure_channel(control_address) else: logging.info('Creating secure control channel.') self._control_channel = grpc.secure_channel(control_address, credentials) grpc.channel_ready_future(self._control_channel).result(timeout=60) logging.info('Control channel established.') self._control_channel = grpc.intercept_channel( self._control_channel, WorkerIdInterceptor()) self._data_channel_factory = data_plane.GrpcClientDataChannelFactory( credentials) self._state_handler_factory = GrpcStateHandlerFactory() self.workers = queue.Queue() # one thread is enough for getting the progress report. # Assumption: # Progress report generation should not do IO or wait on other resources. # Without wait, having multiple threads will not improve performance and # will only add complexity. self._progress_thread_pool = futures.ThreadPoolExecutor(max_workers=1) self._process_thread_pool = futures.ThreadPoolExecutor( max_workers=self._worker_count) self._instruction_id_vs_worker = {} self._fns = {} self._responses = queue.Queue() self._process_bundle_queue = queue.Queue() self._unscheduled_process_bundle = set() logging.info('Initializing SDKHarness with %s workers.', self._worker_count)
def __init__(self, server, config, hist): # Create the stub if config.HasField('security_params'): creds = grpc.ssl_channel_credentials( resources.test_root_certificates()) channel = test_common.test_secure_channel( server, creds, config.security_params.server_host_override) else: channel = grpc.insecure_channel(server) # waits for the channel to be ready before we start sending messages grpc.channel_ready_future(channel).result() if config.payload_config.WhichOneof('payload') == 'simple_params': self._generic = False self._stub = services_pb2.BenchmarkServiceStub(channel) payload = messages_pb2.Payload( body='\0' * config.payload_config.simple_params.req_size) self._request = messages_pb2.SimpleRequest( payload=payload, response_size=config.payload_config.simple_params.resp_size) else: self._generic = True self._stub = GenericStub(channel) self._request = '\0' * config.payload_config.bytebuf_params.req_size self._hist = hist self._response_callbacks = []
def run(flags): # The deadline is absolute - time taken by wait_for_ready is not available to # the RPC. deadline = time.time() + flags.deadline_ms/1000.0 logging.info('Creating channel to greeter server at %s', flags.greeter_server) channel = grpc.insecure_channel(flags.greeter_server) if flags.wait_for_ready: logging.info('Waiting for channel to become ready...') try: grpc.channel_ready_future(channel).result(timeout=deadline - time.time()) except: # Ignoring timeouts and other errors here. pass logging.info('Creating Greeter stub.') stub = greeter_pb2_grpc.GreeterStub(channel) logging.info('Sending request') try: response = stub.SayHello(greeter_pb2.HelloRequest(name=flags.user, locale=flags.locale), timeout=deadline - time.time()) logging.info('Greeting received: %s', response.message) print ('Greeting received: %s' % response.message) except grpc.RpcError: logging.exception('Failed to fetch greeting:') print ('Failed to fetch greeting.')
def test_java_expansion(self): if not self.expansion_service_jar: raise unittest.SkipTest('No expansion service jar provided.') # The actual definitions of these transforms is in # org.apache.beam.runners.core.construction.TestExpansionService. TEST_COUNT_URN = "pytest:beam:transforms:count" TEST_FILTER_URN = "pytest:beam:transforms:filter_less_than" # Run as cheaply as possible on the portable runner. # TODO(robertwb): Support this directly in the direct runner. options = beam.options.pipeline_options.PipelineOptions( runner='PortableRunner', experiments=['beam_fn_api'], environment_type=python_urns.EMBEDDED_PYTHON, job_endpoint='embed') try: # Start the java server and wait for it to be ready. port = '8091' address = 'localhost:%s' % port server = subprocess.Popen( ['java', '-jar', self.expansion_service_jar, port]) with grpc.insecure_channel(address) as channel: grpc.channel_ready_future(channel).result() # Run a simple count-filtered-letters pipeline. with beam.Pipeline(options=options) as p: res = ( p | beam.Create(list('aaabccxyyzzz')) | beam.Map(unicode) # TODO(BEAM-6587): Use strings directly rather than ints. | beam.Map(lambda x: int(ord(x))) | beam.ExternalTransform(TEST_FILTER_URN, b'middle', address) | beam.ExternalTransform(TEST_COUNT_URN, None, address) # TODO(BEAM-6587): Remove when above is removed. | beam.Map(lambda kv: (chr(kv[0]), kv[1])) | beam.Map(lambda kv: '%s: %s' % kv)) assert_that(res, equal_to(['a: 3', 'b: 1', 'c: 2'])) # Test GenerateSequence Java transform with beam.Pipeline(options=options) as p: res = ( p | GenerateSequence(start=1, stop=10, expansion_service=address) ) assert_that(res, equal_to([i for i in range(1, 10)])) finally: server.kill()
def send_options_request(max_retries=5): num_retries = 0 while True: try: # This reports channel is READY but connections may fail # Seems to be only an issue on Mac with port forwardings if channel: grpc.channel_ready_future(channel).result() return job_service.DescribePipelineOptions( beam_job_api_pb2.DescribePipelineOptionsRequest()) except grpc._channel._Rendezvous as e: num_retries += 1 if num_retries > max_retries: raise e
def send_prepare_request(max_retries=5): num_retries = 0 while True: try: # This reports channel is READY but connections may fail # Seems to be only an issue on Mac with port forwardings grpc.channel_ready_future(channel).result() return job_service.Prepare( beam_job_api_pb2.PrepareJobRequest( job_name='job', pipeline=proto_pipeline, pipeline_options=job_utils.dict_to_struct(options))) except grpc._channel._Rendezvous as e: num_retries += 1 if num_retries > max_retries: raise e
def _create_client_stub( port, expect_success, root_certificates=None, private_key=None, certificate_chain=None,): channel = grpc.secure_channel('localhost:{}'.format(port), grpc.ssl_channel_credentials( root_certificates=root_certificates, private_key=private_key, certificate_chain=certificate_chain)) if expect_success: # per Nathaniel: there's some robustness issue if we start # using a channel without waiting for it to be actually ready grpc.channel_ready_future(channel).result(timeout=10) return services_pb2_grpc.FirstServiceStub(channel)
def test_immediately_connectable_channel_connectivity(self): recording_thread_pool = thread_pool.RecordingThreadPool( max_workers=None) server = grpc.server( recording_thread_pool, options=(('grpc.so_reuseport', 0),)) port = server.add_insecure_port('[::]:0') server.start() channel = grpc.insecure_channel('localhost:{}'.format(port)) callback = _Callback() ready_future = grpc.channel_ready_future(channel) ready_future.add_done_callback(callback.accept_value) self.assertIsNone( ready_future.result(timeout=test_constants.LONG_TIMEOUT)) value_passed_to_callback = callback.block_until_called() self.assertIs(ready_future, value_passed_to_callback) self.assertFalse(ready_future.cancelled()) self.assertTrue(ready_future.done()) self.assertFalse(ready_future.running()) # Cancellation after maturity has no effect. ready_future.cancel() self.assertFalse(ready_future.cancelled()) self.assertTrue(ready_future.done()) self.assertFalse(ready_future.running()) self.assertFalse(recording_thread_pool.was_used()) channel.close() server.stop(None)
def __init__(self, log_service_descriptor): super(FnApiLogRecordHandler, self).__init__() # Make sure the channel is ready to avoid [BEAM-4649] ch = grpc.insecure_channel(log_service_descriptor.url) grpc.channel_ready_future(ch).result(timeout=60) self._log_channel = grpc.intercept_channel(ch, WorkerIdInterceptor()) self._logging_stub = beam_fn_api_pb2_grpc.BeamFnLoggingStub( self._log_channel) self._log_entry_queue = queue.Queue() log_control_messages = self._logging_stub.Logging(self._write_log_entries()) self._reader = threading.Thread( target=lambda: self._read_log_control_messages(log_control_messages), name='read_log_control_messages') self._reader.daemon = True self._reader.start()
def _get_channel(target, args): if args.use_tls: if args.use_test_ca: root_certificates = resources.test_root_certificates() else: root_certificates = None # will load default roots. channel_credentials = grpc.ssl_channel_credentials( root_certificates=root_certificates) options = (('grpc.ssl_target_name_override', args.server_host_override,),) channel = grpc.secure_channel( target, channel_credentials, options=options) else: channel = grpc.insecure_channel(target) # waits for the channel to be ready before we start sending messages grpc.channel_ready_future(channel).result() return channel
def main(): logging.getLogger().setLevel(logging.INFO) parser = argparse.ArgumentParser() parser.add_argument('--input', dest='input', default='gs://dataflow-samples/shakespeare/kinglear.txt', help='Input file to process.') parser.add_argument('--output', dest='output', required=True, help='Output file to write results to.') parser.add_argument('--expansion_service_jar', dest='expansion_service_jar', required=True, help='Jar file for expansion service') known_args, pipeline_args = parser.parse_known_args() pipeline_options = PipelineOptions(pipeline_args) assert ( pipeline_options.view_as(StandardOptions).runner.lower() == "portablerunner"), "Only PortableRunner is supported." # We use the save_main_session option because one or more DoFn's in this # workflow rely on global context (e.g., a module imported at module level). pipeline_options.view_as(SetupOptions).save_main_session = True p = beam.Pipeline(options=pipeline_options) p.runner.init_dockerized_job_server() try: server = subprocess.Popen([ 'java', '-jar', known_args.expansion_service_jar, EXPANSION_SERVICE_PORT]) with grpc.insecure_channel(EXPANSION_SERVICE_ADDR) as channel: grpc.channel_ready_future(channel).result() run(p, known_args.input, known_args.output) finally: server.kill()
def reset(self): if self._channel is not None: # Client surrenders in the current game and starts next one. self._channel.close() self._channel = None # Get game server address and side id from master. start_game_request = master_pb2.StartGameRequest( game_version=config.game_version, username=self._username, token=self._token, model_name=self._model_name, include_rendering=self._include_rendering) response = self._reset_with_retries(start_game_request) self._game_id = response.game_id self._channel = utils.get_grpc_channel(response.game_server_address) grpc.channel_ready_future(self._channel).result() get_env_result_request = game_server_pb2.GetEnvResultRequest( game_version=config.game_version, game_id=self._game_id, username=self._username, token=self._token, model_name=self._model_name) obs = self._get_env_result(get_env_result_request, 'GetEnvResult') assert 'frame' not in obs[-1], obs # assert 0, obs return obs[0]
def __init__(self, args): self.args = args self.logger = set_logger(self.__class__.__name__, self.args.verbose) self.logger.info('setting up grpc insecure channel...') # A gRPC channel provides a connection to a remote gRPC server. self._channel = grpc.insecure_channel( '%s:%d' % (self.args.grpc_host, self.args.grpc_port), options={ 'grpc.max_send_message_length': -1, 'grpc.max_receive_message_length': -1, }.items(), ) self.logger.info('waiting channel to be ready...') grpc.channel_ready_future(self._channel).result() # create new stub self.logger.info('create new stub...') self._stub = gnes_pb2_grpc.GnesRPCStub(self._channel) # attache response handler self.handler._context = self self.logger.critical('gnes client ready at %s:%d!' % (self.args.grpc_host, self.args.grpc_port))
def wait_for_rpc(self): chan = grpc.insecure_channel("127.0.0.1:{}".format(self._config.rpc_port)) ready = grpc.channel_ready_future(chan) while not ready.done(): def test(conn): print(conn) pass chan.subscribe(test, True) time.sleep(1) self._rpc = relayerrpc_pb2_grpc.RelayerRPCStub(chan)
def func(address, process, timeout=20): listener = Thread(target=_listen, args=(process, )) listener.start() try: if process.returncode is not None: raise ValueError('subprocess terminated') with grpc.insecure_channel(address) as channel: future = grpc.channel_ready_future(channel) future.result(timeout=timeout) yield address finally: process.send_signal(signal.SIGINT) listener.join() print("processor exited with code: ", process.returncode)
def _connect_core(self, skip_if_connected=True): """ Connect to Core API """ if skip_if_connected and self._core_service_stub: return if not self.core_url: raise ValueError("Please set Feast Core URL.") if self.__core_channel is None: self.__core_channel = grpc.insecure_channel(self.core_url) try: grpc.channel_ready_future(self.__core_channel).result( timeout=GRPC_CONNECTION_TIMEOUT_DEFAULT) except grpc.FutureTimeoutError: print( f"Connection timed out while attempting to connect to Feast Core gRPC server {self.core_url}" ) sys.exit(1) else: self._core_service_stub = CoreServiceStub(self.__core_channel)
def run_server(bank_ip_address, merchant_ip_address, customer_port): # Declare the gRPC server with 10 max_workers server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) digitalCashServer = DigitalCashServer() # # Add FileService to the server. digitalCashService_pb2_grpc.add_digitalCashServiceServicer_to_server( digitalCashServer, server) # # Start the server on server_port. server.add_insecure_port('[::]:{}'.format(customer_port)) server.start() print("******* Customer server has been started on port {} *******".format( customer_port)) with grpc.insecure_channel(bank_ip_address) as channel: try: grpc.channel_ready_future(channel).result(timeout=1) except grpc.FutureTimeoutError: print("<--- Connection timeout. Unable to connect to port. --->") #exit() else: print("** Connected to customer server. **") stub = digitalCashService_pb2_grpc.digitalCashServiceStub(channel) response = stub.ping( digitalCashService_pb2.pingMessage(message="Trying to ping you!!")) handleUserInputs(merchant_ip_address, stub, digitalCashServer) # Keep the server running for '_ONE_DAY_IN_SECONDS' seconds. try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: server.stop(0)
def user_authentication(session_id: bytes, cert: bytes, ip: str, name: str) -> str: # Pin the robot certificate for opening the channel output = "" creds = grpc.ssl_channel_credentials(root_certificates=cert) output += f"Attempting to download guid from {name} at {ip}:443... \n" channel = grpc.secure_channel("{}:443".format(ip), creds, options=(( "grpc.ssl_target_name_override", name, ), )) # Verify the connection to Vector is able to be established (client-side) try: # Explicitly grab _channel._channel to test the underlying grpc channel directly grpc.channel_ready_future(channel).result(timeout=15) except grpc.FutureTimeoutError: output += f"\nUnable to connect to Vector\n Please be sure to connect via the Vector companion app first, and connect your computer to the same network as your Vector." try: interface = messaging.client.ExternalInterfaceStub(channel) request = messaging.protocol.UserAuthenticationRequest( user_session_id=session_id.encode("utf-8"), client_name=socket.gethostname().encode("utf-8"), ) response = interface.UserAuthentication(request) if (response.code != messaging.protocol.UserAuthenticationResponse.AUTHORIZED): # pylint: disable=no-member output += f"\nFailed to authorize request:\n Please be sure to first set up Vector using the companion app." except grpc.RpcError as e: output += f"\nFailed to authorize request:\n An unknown error occurred '{e}'" output += "Done. \n" return response.client_token_guid, output
def __init__( self, control_address, worker_count, credentials=None, worker_id=None, profiler_factory=None): self._alive = True self._worker_count = worker_count self._worker_index = 0 self._worker_id = worker_id if credentials is None: logging.info('Creating insecure control channel for %s.', control_address) self._control_channel = grpc.insecure_channel(control_address) else: logging.info('Creating secure control channel for %s.', control_address) self._control_channel = grpc.secure_channel(control_address, credentials) grpc.channel_ready_future(self._control_channel).result(timeout=60) logging.info('Control channel established.') self._control_channel = grpc.intercept_channel( self._control_channel, WorkerIdInterceptor(self._worker_id)) self._data_channel_factory = data_plane.GrpcClientDataChannelFactory( credentials) self._state_handler_factory = GrpcStateHandlerFactory() self._profiler_factory = profiler_factory self.workers = queue.Queue() # one thread is enough for getting the progress report. # Assumption: # Progress report generation should not do IO or wait on other resources. # Without wait, having multiple threads will not improve performance and # will only add complexity. self._progress_thread_pool = futures.ThreadPoolExecutor(max_workers=1) self._process_thread_pool = futures.ThreadPoolExecutor( max_workers=self._worker_count) self._instruction_id_vs_worker = {} self._fns = {} self._responses = queue.Queue() self._process_bundle_queue = queue.Queue() self._unscheduled_process_bundle = {} logging.info('Initializing SDKHarness with %s workers.', self._worker_count)
def run_sample(target_hz: int) -> synthesis_pb2.CompileResponse: logging.info( 'Running with target frequency %0.3fGHz. Range: [%0.3fGHz, %0.3fGHz]', target_hz / 1e9, start_hz / 1e9, limit_hz / 1e9) grpc.channel_ready_future(grpc_channel).result() stub = synthesis_service_pb2_grpc.SynthesisServiceStub(grpc_channel) request = synthesis_pb2.CompileRequest() request.module_text = verilog_text request.top_module_name = top_module_name request.target_frequency_hz = target_hz response = stub.Compile(request) synthesis_result = sweep_result.results.add() synthesis_result.target_frequency_hz = target_hz synthesis_result.response.CopyFrom(response) if response.slack_ps >= 0: logging.info(' PASSED TIMING') sweep_result.max_frequency_hz = max(sweep_result.max_frequency_hz, target_hz) else: logging.info(' FAILED TIMING (slack %dps)', response.slack_ps) return response
def __init__(self, status_address, bundle_process_cache=None, enable_heap_dump=False): """Initialize FnApiWorkerStatusHandler. Args: status_address: The URL Runner uses to host the WorkerStatus server. bundle_process_cache: The BundleProcessor cache dict from sdk worker. """ self._alive = True self._bundle_process_cache = bundle_process_cache ch = GRPCChannelFactory.insecure_channel(status_address) grpc.channel_ready_future(ch).result(timeout=60) self._status_channel = grpc.intercept_channel(ch, WorkerIdInterceptor()) self._status_stub = beam_fn_api_pb2_grpc.BeamFnWorkerStatusStub( self._status_channel) self._responses = queue.Queue() self._server = threading.Thread(target=lambda: self._serve(), name='fn_api_status_handler') self._server.daemon = True self._enable_heap_dump = enable_heap_dump self._server.start()
def __init__(self, args: 'argparse.Namespace'): self.args = args if not args.proxy and os.name != 'nt': os.unsetenv('http_proxy') os.unsetenv('https_proxy') self.logger = get_logger(self.__class__.__name__, **vars(args)) self.logger.debug('setting up grpc insecure channel...') # A gRPC channel provides a connection to a remote gRPC server. self._channel = grpc.insecure_channel( '%s:%d' % (args.host, args.port_grpc), options={ 'grpc.max_send_message_length': -1, 'grpc.max_receive_message_length': -1, }.items(), ) self.logger.debug('waiting channel to be ready...') try: grpc.channel_ready_future(self._channel).result( timeout=(args.timeout_ready / 1000) if args.timeout_ready > 0 else None) except grpc.FutureTimeoutError: self.logger.critical( 'can not connect to the server at %s:%d after %d ms, please double check the ' 'ip and grpc port number of the server' % (args.host, args.port_grpc, args.timeout_ready)) raise GRPCServerError('can not connect to the server at %s:%d' % (args.host, args.port_grpc)) # create new stub self.logger.debug('create new stub...') self._stub = jina_pb2_grpc.JinaRPCStub(self._channel) # attache response handler self.logger.success('connected to the gateway at %s:%d!' % (self.args.host, self.args.port_grpc)) self.is_closed = False
def wait_for_rpc(self): chan = grpc.insecure_channel("127.0.0.1:{}".format( self._config.rpc_port)) ready = grpc.channel_ready_future(chan) while not ready.done(): def test(_): pass chan.subscribe(test, True) time.sleep(1) self._rpc = beaconrpc_pb2_grpc.BlockchainRPCStub(chan)
def ping(self, timeout=2): try: ft = grpc.channel_ready_future(self._channel) ft.result(timeout=timeout) return True except grpc.FutureTimeoutError: raise NotConnectError( 'Fail connecting to server on {}. Timeout'.format(self._uri)) except grpc.RpcError as e: raise NotConnectError("Connect error: <{}>".format(e)) # Unexpected error except Exception as e: raise NotConnectError( "Error occurred when trying to connect server:\n" "\t<{}>".format(str(e)))
def _report_join(self, ips): for ip in ips: channel = grpc.insecure_channel('{ip}:{port}'.format( ip=ip, port=self._port)) try: grpc.channel_ready_future(channel).result(timeout=2) except Exception as e: logging.warning(str(e)) return False else: stub = communicate_pb2_grpc.ReportStub(channel) try: response = stub.GetGroupStatus( communicate_pb2.GroupStatusRequest(), timeout=2) except Exception as e: logging.warning(str(e)) else: logging.info('counter retreived') self._count = response.counter self._io_tool.register_ip(ip_address, self._count) return True logging.warning('counter not retreived') self._io_tool.register_ip(ip_address, self._count) return False
def init(): global HalChannel if IsHalDisabled(): return if 'MBT_GRPC_PORT' in os.environ: # If MBT toggle mode is enabled # assert GlobalOptions.mbt port = os.environ['MBT_GRPC_PORT'] elif 'HAL_GRPC_PORT' in os.environ: port = os.environ['HAL_GRPC_PORT'] else: port = '50054' logger.info("Creating GRPC channel to HAL on port %s" %(port)) server = 'localhost:' + port if 'HAL_GRPC_IP' in os.environ: server = os.environ['HAL_GRPC_IP'] + ':' + port HalChannel = grpc.insecure_channel(server) logger.info("HAL Server IP and Port = ", server) logger.info("Waiting for HAL to be ready ...") grpc.channel_ready_future(HalChannel).result() logger.info("Connected to HAL!") if GlobalOptions.mbt: SignalingClient.Connect() logger.info("Connected to the Model based tester") return
def create_secure_channel_and_connect( server_address: str, credentials: grpc.ChannelCredentials = grpc.local_channel_credentials( ), timeout: Optional[float] = None) -> Connection: """Creates a secure channel from server address and credentials and connects. We allow the created channel to have un-bounded message lengths, to support large observations. Args: server_address: URI server address to connect to. credentials: gRPC credentials necessary to connect to the server. timeout: Optional timeout in seconds to wait for channel to be ready. Default to waiting indefinitely. Returns: An instance of dm_env_rpc.Connection, where the channel is close upon the connection being closed. """ options = [('grpc.max_send_message_length', -1), ('grpc.max_receive_message_length', -1)] channel = grpc.secure_channel(server_address, credentials, options=options) grpc.channel_ready_future(channel).result(timeout) class _ConnectionWrapper(Connection): """Utility to ensure channel is closed when the connection is closed.""" def __init__(self, channel): super().__init__(channel) self._channel = channel def close(self): super().close() self._channel.close() return _ConnectionWrapper(channel)
def user_authentication(session_id: bytes, cert: bytes, ip: str, name: str) -> str: # Pin the robot certificate for opening the channel creds = grpc.ssl_channel_credentials(root_certificates=cert) print("Attempting to download guid from {} at {}:443...".format( colored(name, "cyan"), colored(ip, "cyan")), end="") sys.stdout.flush() channel = grpc.secure_channel("{}:443".format(ip), creds, options=(("grpc.ssl_target_name_override", name,),)) # Verify the connection to Vector is able to be established (client-side) try: # Explicitly grab _channel._channel to test the underlying grpc channel directly grpc.channel_ready_future(channel).result(timeout=15) except grpc.FutureTimeoutError: print(colored(" ERROR", "red")) sys.exit("\nUnable to connect to Vector\n" "Please be sure to connect via the Vector companion app first, and connect your computer to the same network as your Vector.") try: interface = messaging.client.ExternalInterfaceStub(channel) request = messaging.protocol.UserAuthenticationRequest( user_session_id=session_id.encode('utf-8'), client_name=socket.gethostname().encode('utf-8')) response = interface.UserAuthentication(request) if response.code != messaging.protocol.UserAuthenticationResponse.AUTHORIZED: # pylint: disable=no-member print(colored(" ERROR", "red")) sys.exit("\nFailed to authorize request:\n" "Please be sure to first set up Vector using the companion app.") except grpc.RpcError as e: print(colored(" ERROR", "red")) sys.exit("\nFailed to authorize request:\n" "An unknown error occurred '{}'".format(e)) print(colored(" DONE\n", "green")) return response.client_token_guid
def IniciarCliente(self): id = uuid.uuid1() print(id) channel = grpc.insecure_channel('localhost:50051') try: grpc.channel_ready_future(channel).result(timeout=10) except grpc.FutureTimeoutError: sys.exit('Error connecting to server') else: conn = grpc_chat.ChatAdminStub(channel) #request = structure.Usuario(id = id.hex) #request = structure.Usuario(usuario = "Choco") #request = structure.Usuario(activo = True) conn.Subscribirse(id=id.hex, usuario="Choco", activo=True) #request = structure.Usuario.id = id.hex #request = structure.Usuario.usuario = "Choco" #request = structure.Usuario.activo = True #structure._USUARIO.id = id.hex #structure._USUARIO.usuario = "Choco" #structure._USUARIO.activo = True #request = structure._USUARIO confirmacion = conn.Subscribirse(request) print(confirmacion)
def __init__(self, location: str) -> None: """Create an MMPClient. Args: location: A connection string of the form hostname:port """ channel = grpc.insecure_channel(location) ready = grpc.channel_ready_future(channel) try: ready.result(timeout=CONNECTION_TIMEOUT) except grpc.FutureTimeoutError: ready.cancel() channel.close() raise RuntimeError('Failed to connect to the MUSCLE manager') self.__client = mmp_grpc.MuscleManagerStub(channel)
def func(address, process): try: if process.returncode is not None: raise ValueError('subprocess terminated') with grpc.insecure_channel(address) as channel: future = grpc.channel_ready_future(channel) future.result(timeout=20) yield address finally: process.send_signal(signal.SIGINT) try: stdout, _ = process.communicate(timeout=1) print("processor exited with code: ", process.returncode) print(stdout.decode('utf-8')) except subprocess.TimeoutExpired: print("timed out waiting for processor to terminate")
def make_ready_client(channel, stop_event=None): channel_ready = grpc.channel_ready_future(channel) wait_secs = 0.5 start_time = time.time() while (stop_event is None) or (not stop_event.is_set()): try: channel_ready.result(timeout=wait_secs) break except grpc.FutureTimeoutError: logging.warning('Channel has not been ready for %.2f seconds', time.time() - start_time) if wait_secs < 5.0: wait_secs *= 1.2 except Exception as e: # pylint: disable=broad-except logging.warning('Waiting channel ready: %s', repr(e)) return tws_grpc.TrainerWorkerServiceStub(channel)
def get(self) -> NamedChannel[StubType]: """Returns a ready NamedChannel from the set, or a random channel if none are ready.""" with self.lock: # check each channel in the round-robin order channels = (self.named_channels[self.next_index:] + self.named_channels[0:self.next_index]) for i, named_channel in enumerate(channels): # call a private grpc channel method to see if the channel is working # TODO: use the public subscribe API, but that is more complicated try_to_connect = True state_code = named_channel.grpc_channel._channel.check_connectivity_state( try_to_connect) state = _connectivity_code_to_object(state_code) if state is grpc.ChannelConnectivity.READY: self.next_index = (self.next_index + 1 + i) % len( self.named_channels) return named_channel # for idle channels: the call to check_connectivity_state(try_to_connect=True) # causes it to start connecting, so wait for IDLE or CONNECTING channels for a bit. if state in (grpc.ChannelConnectivity.IDLE, grpc.ChannelConnectivity.CONNECTING): # wait to see if it manages to connect connected_future = grpc.channel_ready_future( named_channel.grpc_channel) try: connected_future.result( timeout=RoundRobinNamedChannels._CONNECT_TIMEOUT_S) # connected! self.next_index = (self.next_index + 1 + i) % len( self.named_channels) return named_channel except grpc.FutureTimeoutError as e: logging.debug( "failed to connect to channel=%s within timeout=%fs", named_channel.addr, RoundRobinNamedChannels._CONNECT_TIMEOUT_S, ) # not ready: check the other channels logging.debug("skipping channel=%s in state=%s", named_channel.addr, state.name) # we did not find any ready channel. Select one at random return random.choice(self.named_channels)
def __init__(self, **kwargs): super(OpenoltDevice, self).__init__() self.adapter_agent = kwargs['adapter_agent'] device = kwargs['device'] self.device_id = device.id self.host_and_port = device.host_and_port self.log = structlog.get_logger(id=self.device_id, ip=self.host_and_port) self.oper_state = 'unknown' self.nni_oper_state = dict() #intf_id -> oper_state self.onus = {} # Onu -> serial_number # Create logical device ld = LogicalDevice( desc=ofp_desc( hw_desc='FIXME', sw_desc='FIXME', serial_num='FIXME', dp_desc='n/a'), switch_features=ofp_switch_features( n_buffers=256, n_tables=2, capabilities=( OFPC_FLOW_STATS | OFPC_TABLE_STATS | OFPC_GROUP_STATS | OFPC_PORT_STATS)), root_device_id=self.device_id) # FIXME ld_initialized = self.adapter_agent.create_logical_device(ld, dpid='de:ad:be:ef:fe:ed') # FIXME self.logical_device_id = ld_initialized.id # Update device device.root = True device.vendor = 'Edgecore' device.model = 'ASFvOLT16' device.serial_number = self.host_and_port # FIXME device.parent_id = self.logical_device_id device.connect_status = ConnectStatus.REACHABLE device.oper_status = OperStatus.ACTIVATING self.adapter_agent.update_device(device) # Initialize gRPC self.channel = grpc.insecure_channel(self.host_and_port) self.channel_ready_future = grpc.channel_ready_future(self.channel) # Start indications thread self.indications_thread = threading.Thread(target=self.process_indications) self.indications_thread.daemon = True self.indications_thread.start()
def test_lonely_channel_connectivity(self): channel = grpc.insecure_channel('localhost:12345') callback = _Callback() ready_future = grpc.channel_ready_future(channel) ready_future.add_done_callback(callback.accept_value) with self.assertRaises(grpc.FutureTimeoutError): ready_future.result(test_constants.SHORT_TIMEOUT) self.assertFalse(ready_future.cancelled()) self.assertFalse(ready_future.done()) self.assertTrue(ready_future.running()) ready_future.cancel() value_passed_to_callback = callback.block_until_called() self.assertIs(ready_future, value_passed_to_callback) self.assertTrue(ready_future.cancelled()) self.assertTrue(ready_future.done()) self.assertFalse(ready_future.running())
def __init__(self): # TODO: Check connection health channel = grpc.insecure_channel( '%s:%d' % (Config.conf()['agentHost'], Config.conf()['agentPort'])) logging.info("Agent Connecting...") f = grpc.channel_ready_future(channel) try: f.result(timeout=Config.conf()['agentConnectTimeout']) logging.info("Agent Connected.") except Exception: logging.error("Agent failed to connect.") return None # The gRPC channel self.channel = channel # The kofserver gRPC stub self.stub = kofserver_pb2_grpc.KOFServerStub(self.channel)
def register_with_remote_thread(details): logging.debug("Remote: Attempting to register %s (%s)" % (details.hostname, details.ip_info.ip4_address)) with grpc.insecure_channel("%s:%d" % (details.ip_info.ip4_address, details.auth_port)) as channel: future = grpc.channel_ready_future(channel) try: future.result(timeout=5) stub = warp_pb2_grpc.WarpRegistrationStub(channel) ret = stub.RequestCertificate(warp_pb2.RegRequest(ip=details.ip_info.ip4_address, hostname=util.get_hostname()), timeout=5) details.locked_cert = ret.locked_cert.encode("utf-8") except Exception as e: future.cancel() logging.critical("Problem with remote registration thread: %s (%s:%d) - api version 2: %s" % (details.hostname, details.ip_info.ip4_address, details.auth_port, e))
def _connect(self, timeout=5): """Connect to dpf service within a given timeout""" stub = base_pb2_grpc.BaseServiceStub(self._server().channel) # verify connected if timeout is not None: state = grpc.channel_ready_future(self._server().channel) tstart = time.time() while (time.time() - tstart) < timeout and not state._matured: time.sleep(0.005) if not state._matured: raise IOError( f"Unable to connect to DPF instance at {self._server()._input_ip} " f"{self._server()._input_port}" ) return stub
def test_lonely_channel_connectivity(self): callback = _Callback() channel_config = grpc_gcp.api_config_from_text_pb('') channel = grpc_gcp.insecure_channel( 'localhost:12345', options=((grpc_gcp.API_CONFIG_CHANNEL_ARG, channel_config), )) ready_future = grpc.channel_ready_future(channel) ready_future.add_done_callback(callback.accept_value) with self.assertRaises(grpc.FutureTimeoutError): ready_future.result(timeout=test_constants.SHORT_TIMEOUT) self.assertFalse(ready_future.cancelled()) self.assertFalse(ready_future.done()) self.assertTrue(ready_future.running()) ready_future.cancel() value_passed_to_callback = callback.block_until_called() self.assertIs(ready_future, value_passed_to_callback) self.assertTrue(ready_future.cancelled()) self.assertTrue(ready_future.done()) self.assertFalse(ready_future.running())
def test_immediately_connectable_channel_connectivity(self): server = _server.Server(futures.ThreadPoolExecutor(max_workers=0), ()) port = server.add_insecure_port('[::]:0') server.start() channel = grpc.insecure_channel('localhost:{}'.format(port)) callback = _Callback() ready_future = grpc.channel_ready_future(channel) ready_future.add_done_callback(callback.accept_value) self.assertIsNone(ready_future.result(test_constants.SHORT_TIMEOUT)) value_passed_to_callback = callback.block_until_called() self.assertIs(ready_future, value_passed_to_callback) self.assertFalse(ready_future.cancelled()) self.assertTrue(ready_future.done()) self.assertFalse(ready_future.running()) # Cancellation after maturity has no effect. ready_future.cancel() self.assertFalse(ready_future.cancelled()) self.assertTrue(ready_future.done()) self.assertFalse(ready_future.running())
def keep_channel(): with grpc.insecure_channel( "%s:%d" % (self.ip_address, self.port)) as channel: future = grpc.channel_ready_future(channel) connect_retries = 0 while not self.need_shutdown: try: future.result(timeout=2) self.stub = warp_pb2_grpc.WarpStub(channel) break except grpc.FutureTimeoutError: if connect_retries < MAX_CONNECT_RETRIES: print("channel ready timeout, waiting 10s") time.sleep(PING_TIME) connect_retries += 1 continue else: self.set_remote_status(RemoteStatus.UNREACHABLE) print("Trying to remake channel") return True one_ping = False while not self.need_shutdown: try: self.stub.Ping(void, timeout=2) self.set_remote_status(RemoteStatus.ONLINE) if not one_ping: self.update_remote_machine_info() self.update_remote_machine_avatar() one_ping = True except grpc.RpcError as e: if e.code() in (grpc.StatusCode.DEADLINE_EXCEEDED, grpc.StatusCode.UNAVAILABLE): one_ping = False self.set_remote_status(RemoteStatus.UNREACHABLE) time.sleep(PING_TIME) return False
def run_pipeline(self, pipeline): portable_options = pipeline.options.view_as(PortableOptions) job_endpoint = portable_options.job_endpoint # TODO: https://issues.apache.org/jira/browse/BEAM-5525 # portable runner specific default if pipeline.options.view_as(SetupOptions).sdk_location == 'default': pipeline.options.view_as(SetupOptions).sdk_location = 'container' if not job_endpoint: docker = DockerizedJobServer() job_endpoint = docker.start() proto_pipeline = pipeline.to_runner_api() # Some runners won't detect the GroupByKey transform unless it has no # subtransforms. Remove all sub-transforms until BEAM-4605 is resolved. for _, transform_proto in list( proto_pipeline.components.transforms.items()): if transform_proto.spec.urn == common_urns.primitives.GROUP_BY_KEY.urn: for sub_transform in transform_proto.subtransforms: del proto_pipeline.components.transforms[sub_transform] del transform_proto.subtransforms[:] # TODO: Define URNs for options. # convert int values: https://issues.apache.org/jira/browse/BEAM-5509 options = {'beam:option:' + k + ':v1': (str(v) if type(v) == int else v) for k, v in pipeline._options.get_all_options().items() if v is not None} channel = grpc.insecure_channel(job_endpoint) grpc.channel_ready_future(channel).result() job_service = beam_job_api_pb2_grpc.JobServiceStub(channel) # Sends the PrepareRequest but retries in case the channel is not ready def send_prepare_request(max_retries=5): num_retries = 0 while True: try: # This reports channel is READY but connections may fail # Seems to be only an issue on Mac with port forwardings grpc.channel_ready_future(channel).result() return job_service.Prepare( beam_job_api_pb2.PrepareJobRequest( job_name='job', pipeline=proto_pipeline, pipeline_options=job_utils.dict_to_struct(options))) except grpc._channel._Rendezvous as e: num_retries += 1 if num_retries > max_retries: raise e prepare_response = send_prepare_request() if prepare_response.artifact_staging_endpoint.url: stager = portable_stager.PortableStager( grpc.insecure_channel(prepare_response.artifact_staging_endpoint.url), prepare_response.staging_session_token) retrieval_token, _ = stager.stage_job_resources( pipeline._options, staging_location='') else: retrieval_token = None run_response = job_service.Run( beam_job_api_pb2.RunJobRequest( preparation_id=prepare_response.preparation_id, retrieval_token=retrieval_token)) return PipelineResult(job_service, run_response.job_id)
def _setup_jasper(self): """Start up the jasper process manager.""" root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) proto_file = os.path.join(root_dir, "buildscripts", "resmokelib", "core", "jasper.proto") try: well_known_protos_include = pkg_resources.resource_filename("grpc_tools", "_proto") except ImportError: raise ImportError("You must run: sys.executable + '-m pip install grpcio grpcio-tools " "googleapis-common-protos' to use --spawnUsing=jasper.") # We use the build/ directory as the output directory because the generated files aren't # meant to because tracked by git or linted. proto_out = os.path.join(root_dir, "build", "jasper") utils.rmtree(proto_out, ignore_errors=True) os.makedirs(proto_out) # We make 'proto_out' into a Python package so we can add it to 'sys.path' and import the # *pb2*.py modules from it. with open(os.path.join(proto_out, "__init__.py"), "w"): pass ret = grpc_tools.protoc.main([ grpc_tools.protoc.__file__, "--grpc_python_out", proto_out, "--python_out", proto_out, "--proto_path", os.path.dirname(proto_file), "--proto_path", well_known_protos_include, os.path.basename(proto_file), ]) if ret != 0: raise RuntimeError("Failed to generated gRPC files from the jasper.proto file") sys.path.append(os.path.dirname(proto_out)) from jasper import jasper_pb2 from jasper import jasper_pb2_grpc jasper_process.Process.jasper_pb2 = jasper_pb2 jasper_process.Process.jasper_pb2_grpc = jasper_pb2_grpc curator_path = "build/curator" if sys.platform == "win32": curator_path += ".exe" git_hash = "d846f0c875716e9377044ab2a50542724369662a" curator_exists = os.path.isfile(curator_path) curator_same_version = False if curator_exists: curator_version = subprocess.check_output([curator_path, "--version"]).decode('utf-8').split() curator_same_version = git_hash in curator_version if curator_exists and not curator_same_version: os.remove(curator_path) self._resmoke_logger.info( "Found a different version of curator. Downloading version %s of curator to enable" "process management using jasper.", git_hash) if not curator_exists or not curator_same_version: if sys.platform == "darwin": os_platform = "macos" elif sys.platform == "win32": os_platform = "windows-64" elif sys.platform.startswith("linux"): os_platform = "ubuntu1604" else: raise OSError("Unrecognized platform. " "This program is meant to be run on MacOS, Windows, or Linux.") url = ("https://s3.amazonaws.com/boxes.10gen.com/build/curator/" "curator-dist-%s-%s.tar.gz") % (os_platform, git_hash) response = requests.get(url, stream=True) with tarfile.open(mode="r|gz", fileobj=response.raw) as tf: tf.extractall(path="./build/") jasper_port = config.BASE_PORT - 1 jasper_conn_str = "localhost:%d" % jasper_port jasper_process.Process.connection_str = jasper_conn_str jasper_command = [curator_path, "jasper", "grpc", "--port", str(jasper_port)] self._jasper_server = process.Process(self._resmoke_logger, jasper_command) self._jasper_server.start() channel = grpc.insecure_channel(jasper_conn_str) grpc.channel_ready_future(channel).result()
def _stub(server_host, server_port): target = '{}:{}'.format(server_host, server_port) channel = grpc.insecure_channel(target) grpc.channel_ready_future(channel).result() return test_pb2_grpc.TestServiceStub(channel)
def _create_client_stub(channel, expect_success): if expect_success: # per Nathaniel: there's some robustness issue if we start # using a channel without waiting for it to be actually ready grpc.channel_ready_future(channel).result(timeout=10) return services_pb2_grpc.FirstServiceStub(channel)
def run_pipeline(self, pipeline, options): portable_options = options.view_as(PortableOptions) job_endpoint = portable_options.job_endpoint # TODO: https://issues.apache.org/jira/browse/BEAM-5525 # portable runner specific default if options.view_as(SetupOptions).sdk_location == 'default': options.view_as(SetupOptions).sdk_location = 'container' if not job_endpoint: # TODO Provide a way to specify a container Docker URL # https://issues.apache.org/jira/browse/BEAM-6328 docker = DockerizedJobServer() job_endpoint = docker.start() job_service = None elif job_endpoint == 'embed': job_service = local_job_service.LocalJobServicer() else: job_service = None # This is needed as we start a worker server if one is requested # but none is provided. if portable_options.environment_type == 'LOOPBACK': portable_options.environment_config, server = ( BeamFnExternalWorkerPoolServicer.start( sdk_worker_main._get_worker_count(options))) globals()['x'] = server cleanup_callbacks = [functools.partial(server.stop, 1)] else: cleanup_callbacks = [] proto_pipeline = pipeline.to_runner_api( default_environment=PortableRunner._create_environment( portable_options)) # Some runners won't detect the GroupByKey transform unless it has no # subtransforms. Remove all sub-transforms until BEAM-4605 is resolved. for _, transform_proto in list( proto_pipeline.components.transforms.items()): if transform_proto.spec.urn == common_urns.primitives.GROUP_BY_KEY.urn: for sub_transform in transform_proto.subtransforms: del proto_pipeline.components.transforms[sub_transform] del transform_proto.subtransforms[:] # Preemptively apply combiner lifting, until all runners support it. # This optimization is idempotent. pre_optimize = options.view_as(DebugOptions).lookup_experiment( 'pre_optimize', 'combine').lower() if not options.view_as(StandardOptions).streaming: flink_known_urns = frozenset([ common_urns.composites.RESHUFFLE.urn, common_urns.primitives.IMPULSE.urn, common_urns.primitives.FLATTEN.urn, common_urns.primitives.GROUP_BY_KEY.urn]) if pre_optimize == 'combine': proto_pipeline = fn_api_runner_transforms.optimize_pipeline( proto_pipeline, phases=[fn_api_runner_transforms.lift_combiners], known_runner_urns=flink_known_urns, partial=True) elif pre_optimize == 'all': proto_pipeline = fn_api_runner_transforms.optimize_pipeline( proto_pipeline, phases=[fn_api_runner_transforms.annotate_downstream_side_inputs, fn_api_runner_transforms.annotate_stateful_dofns_as_roots, fn_api_runner_transforms.fix_side_input_pcoll_coders, fn_api_runner_transforms.lift_combiners, fn_api_runner_transforms.fix_flatten_coders, # fn_api_runner_transforms.sink_flattens, fn_api_runner_transforms.greedily_fuse, fn_api_runner_transforms.read_to_impulse, fn_api_runner_transforms.extract_impulse_stages, fn_api_runner_transforms.remove_data_plane_ops, fn_api_runner_transforms.sort_stages], known_runner_urns=flink_known_urns) elif pre_optimize == 'none': pass else: raise ValueError('Unknown value for pre_optimize: %s' % pre_optimize) if not job_service: channel = grpc.insecure_channel(job_endpoint) grpc.channel_ready_future(channel).result() job_service = beam_job_api_pb2_grpc.JobServiceStub(channel) else: channel = None # fetch runner options from job service # retries in case the channel is not ready def send_options_request(max_retries=5): num_retries = 0 while True: try: # This reports channel is READY but connections may fail # Seems to be only an issue on Mac with port forwardings if channel: grpc.channel_ready_future(channel).result() return job_service.DescribePipelineOptions( beam_job_api_pb2.DescribePipelineOptionsRequest()) except grpc._channel._Rendezvous as e: num_retries += 1 if num_retries > max_retries: raise e time.sleep(1) options_response = send_options_request() def add_runner_options(parser): for option in options_response.options: try: # no default values - we don't want runner options # added unless they were specified by the user add_arg_args = {'action' : 'store', 'help' : option.description} if option.type == beam_job_api_pb2.PipelineOptionType.BOOLEAN: add_arg_args['action'] = 'store_true'\ if option.default_value != 'true' else 'store_false' elif option.type == beam_job_api_pb2.PipelineOptionType.INTEGER: add_arg_args['type'] = int elif option.type == beam_job_api_pb2.PipelineOptionType.ARRAY: add_arg_args['action'] = 'append' parser.add_argument("--%s" % option.name, **add_arg_args) except Exception as e: # ignore runner options that are already present # only in this case is duplicate not treated as error if 'conflicting option string' not in str(e): raise logging.debug("Runner option '%s' was already added" % option.name) all_options = options.get_all_options(add_extra_args_fn=add_runner_options) # TODO: Define URNs for options. # convert int values: https://issues.apache.org/jira/browse/BEAM-5509 p_options = {'beam:option:' + k + ':v1': (str(v) if type(v) == int else v) for k, v in all_options.items() if v is not None} prepare_response = job_service.Prepare( beam_job_api_pb2.PrepareJobRequest( job_name='job', pipeline=proto_pipeline, pipeline_options=job_utils.dict_to_struct(p_options))) if prepare_response.artifact_staging_endpoint.url: stager = portable_stager.PortableStager( grpc.insecure_channel(prepare_response.artifact_staging_endpoint.url), prepare_response.staging_session_token) retrieval_token, _ = stager.stage_job_resources( options, staging_location='') else: retrieval_token = None try: state_stream = job_service.GetStateStream( beam_job_api_pb2.GetJobStateRequest( job_id=prepare_response.preparation_id)) # If there's an error, we don't always get it until we try to read. # Fortunately, there's always an immediate current state published. state_stream = itertools.chain( [next(state_stream)], state_stream) message_stream = job_service.GetMessageStream( beam_job_api_pb2.JobMessagesRequest( job_id=prepare_response.preparation_id)) except Exception: # TODO(BEAM-6442): Unify preparation_id and job_id for all runners. state_stream = message_stream = None # Run the job and wait for a result. run_response = job_service.Run( beam_job_api_pb2.RunJobRequest( preparation_id=prepare_response.preparation_id, retrieval_token=retrieval_token)) if state_stream is None: state_stream = job_service.GetStateStream( beam_job_api_pb2.GetJobStateRequest( job_id=run_response.job_id)) message_stream = job_service.GetMessageStream( beam_job_api_pb2.JobMessagesRequest( job_id=run_response.job_id)) return PipelineResult(job_service, run_response.job_id, message_stream, state_stream, cleanup_callbacks)
def run_pipeline(self, pipeline): portable_options = pipeline.options.view_as(PortableOptions) job_endpoint = portable_options.job_endpoint # TODO: https://issues.apache.org/jira/browse/BEAM-5525 # portable runner specific default if pipeline.options.view_as(SetupOptions).sdk_location == 'default': pipeline.options.view_as(SetupOptions).sdk_location = 'container' if not job_endpoint: docker = DockerizedJobServer() job_endpoint = docker.start() proto_context = pipeline_context.PipelineContext( default_environment=PortableRunner._create_environment( portable_options)) proto_pipeline = pipeline.to_runner_api(context=proto_context) if not self.is_embedded_fnapi_runner: # Java has different expectations about coders # (windowed in Fn API, but *un*windowed in runner API), whereas the # embedded FnApiRunner treats them consistently, so we must guard this # for now, until FnApiRunner is fixed. # See also BEAM-2717. for pcoll in proto_pipeline.components.pcollections.values(): if pcoll.coder_id not in proto_context.coders: # This is not really a coder id, but a pickled coder. coder = coders.registry.get_coder(pickler.loads(pcoll.coder_id)) pcoll.coder_id = proto_context.coders.get_id(coder) proto_context.coders.populate_map(proto_pipeline.components.coders) # Some runners won't detect the GroupByKey transform unless it has no # subtransforms. Remove all sub-transforms until BEAM-4605 is resolved. for _, transform_proto in list( proto_pipeline.components.transforms.items()): if transform_proto.spec.urn == common_urns.primitives.GROUP_BY_KEY.urn: for sub_transform in transform_proto.subtransforms: del proto_pipeline.components.transforms[sub_transform] del transform_proto.subtransforms[:] # TODO: Define URNs for options. # convert int values: https://issues.apache.org/jira/browse/BEAM-5509 options = {'beam:option:' + k + ':v1': (str(v) if type(v) == int else v) for k, v in pipeline._options.get_all_options().items() if v is not None} channel = grpc.insecure_channel(job_endpoint) grpc.channel_ready_future(channel).result() job_service = beam_job_api_pb2_grpc.JobServiceStub(channel) # Sends the PrepareRequest but retries in case the channel is not ready def send_prepare_request(max_retries=5): num_retries = 0 while True: try: # This reports channel is READY but connections may fail # Seems to be only an issue on Mac with port forwardings grpc.channel_ready_future(channel).result() return job_service.Prepare( beam_job_api_pb2.PrepareJobRequest( job_name='job', pipeline=proto_pipeline, pipeline_options=job_utils.dict_to_struct(options))) except grpc._channel._Rendezvous as e: num_retries += 1 if num_retries > max_retries: raise e prepare_response = send_prepare_request() if prepare_response.artifact_staging_endpoint.url: stager = portable_stager.PortableStager( grpc.insecure_channel(prepare_response.artifact_staging_endpoint.url), prepare_response.staging_session_token) retrieval_token, _ = stager.stage_job_resources( pipeline._options, staging_location='') else: retrieval_token = None run_response = job_service.Run( beam_job_api_pb2.RunJobRequest( preparation_id=prepare_response.preparation_id, retrieval_token=retrieval_token)) return PipelineResult(job_service, run_response.job_id)
def wait_for_ready(): with grpc.insecure_channel(EXPANSION_SERVICE_ADDR) as channel: grpc.channel_ready_future(channel).result()