def route_cmd(self, command, context): stub = None try: route = self._routes[command.destination] stub = self._children_stubs[route] logging.debug("Routing for node %d through child %d", command.destination, route) except KeyError: logging.debug("Routing for node %d through parent", command.destination) stub = self._parent_stub if stub is None: logging.info("Bad stub for %d from %d", command.destination, self._vmid) return agent_pb2.RouteMessageResult( source=command.destination, error=agent_pb2.GenericError( kind=agent_pb2.GenericError.GenericError, description='No route for vm{}'.format( command.destination))) try: # Route the command by executing a RPC on the next # hop. Cancel that RPC if the Route RPC from our client is # cancelled or timeouts future = stub.route_command.future(command, context.time_remaining()) context.add_callback(future.cancel) return future.result() except grpc.RpcError as e: if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED: # Timeouts should be coherent along a route so the parent should timeout # concurrently but we still return something just in case return agent_pb2.RouteMessageResult( source=command.destination, error=agent_pb2.GenericError( kind=agent_pb2.GenericError.Timeout, description="Agent did not answer before time limit")) else: return agent_pb2.RouteMessageResult( source=command.destination, error=agent_pb2.GenericError( kind=agent_pb2.GenericError.GenericError, description=str(e))) except grpc.FutureCancelledError as e: # Again should not be necessary as the parent should be # cancelled too but just in case return agent_pb2.RouteMessageResult( source=command.destination, error=agent_pb2.GenericError( kind=agent_pb2.GenericError.Cancelled, description="Route request cancelled"))
def get_output(): # Get messages from the generator for this stream for output_msg in output_handler(context): if isinstance(output_msg, agent_pb2.GenericError): ret_msg = agent_pb2.RouteMessageResult( source=self._vmid, error=output_msg) else: ret_msg = agent_pb2.RouteMessageResult( source=self._vmid) ret_msg.result.Pack(output_msg) yield ret_msg
def _process_local(self, command, context): req = getattr(agent_pb2, command.args.TypeName())() command.args.Unpack(req) result = self._handler(command.name, req, context) if isinstance(result, agent_pb2.GenericError): msg = agent_pb2.RouteMessageResult(source=self._vmid, error=result) else: msg = agent_pb2.RouteMessageResult(source=self._vmid) msg.result.Pack(result) return msg
def route_command(self, request, context): if not self._ready.wait(60): logging.error("Timeout while establishing tree relay") return agent_pb2.RouteMessageResult(error=agent_pb2.GenericError( kind=agent_pb2.GenericError.TimeoutError, description="Timeout while establishing tree relay")) if request.destination == self._vmid: resp = self._process_local(request, context) else: resp = self._relay.route_cmd(request, context) return resp
def route_stream(self, rng, init_cmd, stream_cmd, msg_iterator, cancel_cb=None): def route_iterator(): cmd = init_cmd for msg in msg_iterator: grpc_message = agent_pb2.McastMessage(destinations=str(rng), name=cmd) grpc_message.args.Pack(msg) yield grpc_message cmd = stream_cmd try: res = self._stub.route_stream(route_iterator()) if cancel_cb: res.add_callback(cancel_cb) def result_unpacker(): try: for r in res: try: logging.debug( "Stream client: unpacking a result %s", r) yield r.source, self._handle_route_result( init_cmd, r) except PcoccError as e: yield r.source, e except grpc.RpcError as e: logging.error( "Stream client interrupted due to GRPC error") yield -1, self._handle_route_result( init_cmd, self._handle_grpc_error(e, -1)) logging.debug("Stream client: No more results to unpack") return result_unpacker(), res except Exception as e: #FIXME: we should probably generate a more informative error return [ self._handle_route_result( init_cmd, agent_pb2.RouteMessageResult( source=-1, error=agent_pb2.GenericError( kind=agent_pb2.GenericError.GenericError, description="Unable to establish stream: {}". format(e)))) ]
def _handle_grpc_error(e, source): if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED: ex = agent_pb2.RouteMessageResult( source=source, error=agent_pb2.GenericError( kind=agent_pb2.GenericError.Timeout, description="Timeout while waiting for agent to answer")) elif e.code() == grpc.StatusCode.CANCELLED: ex = agent_pb2.RouteMessageResult( source=source, error=agent_pb2.GenericError( kind=agent_pb2.GenericError.Cancelled, description="RPC was cancelled")) else: logging.warning("RPC request failed with: %s", e.details()) ex = agent_pb2.RouteMessageResult( source=source, error=agent_pb2.GenericError( kind=agent_pb2.GenericError.GenericError, description="Transport error while " "relaying command: {}".format(e.details()))) return ex
def command(self, dest, cmd, data, timeout): logging.info("sending %s to %d", cmd, dest) try: grpc_message = agent_pb2.RouteMessage(destination=dest, name=cmd) grpc_message.args.Pack(data) except Exception as e: return self._handle_route_result( cmd, agent_pb2.RouteMessageResult( source=dest, error=agent_pb2.GenericError( kind=agent_pb2.GenericError.PayloadError, description="Unable to create message " "with payload: {}".format(e)))) try: res = self._stub.route_command(grpc_message, timeout=timeout) except grpc.RpcError as e: res = self._handle_grpc_error(e, dest) return self._handle_route_result(cmd, res)
def route_stream(self, request_iterator, context): #FIXME: Refactor this #First, see from the header message if we are part of the recipients init_msg = next(request_iterator) stream_local = False if self._vmid in RangeSet( init_msg.destinations.encode('ascii', 'ignore')): # If we are part of the recipients, use a tee to get a # local copy of the stream while forwarding it local_iter, forward_iter = mt_tee(request_iterator) # Unpack the header message to initialize the stream and # find out how to handle the next messages req = getattr(agent_pb2, init_msg.args.TypeName())() init_msg.args.Unpack(req) input_handler, output_handler, ret = self._stream_init_handler( init_msg.name, req, context) if isinstance(ret, agent_pb2.GenericError): ret_msg = agent_pb2.RouteMessageResult(source=self._vmid, error=ret) # If the header handling resulted in error, we stop # the stream handling on this node stream_local = False else: stream_local = True ret_msg = agent_pb2.RouteMessageResult(source=self._vmid) ret_msg.result.Pack(ret) logging.debug("Tbon: %d returning first reply for stream", self._vmid) yield ret_msg if stream_local: # Build input and output handlers for the following # messages based on the callbacks received from handling # the header message logging.debug("Tbon: %d continuing in local+relay mode", self._vmid) def get_output(): # Get messages from the generator for this stream for output_msg in output_handler(context): if isinstance(output_msg, agent_pb2.GenericError): ret_msg = agent_pb2.RouteMessageResult( source=self._vmid, error=output_msg) else: ret_msg = agent_pb2.RouteMessageResult( source=self._vmid) ret_msg.result.Pack(output_msg) yield ret_msg def send_input(): # Push everything from the local iter to the handler # for this stream for input_msg in local_iter: req = getattr(agent_pb2, input_msg.args.TypeName())() input_msg.args.Unpack(req) input_handler(input_msg.name, req, context) #Create a dedicated thread to block and push on the local iterator thread = threading.Thread(target=send_input) thread.start() # Forward the header message + following messages to the # next hops and yield everything they send us + what we # produce locally def new_iterin(): yield init_msg for i in forward_iter: yield i for e in mt_chain( [get_output(), self._relay.route_stream(new_iterin())]): yield e else: # We are not part of the recipients so just forward the # whole stream to the next hops and yield everything they # send us def new_iterin(): yield init_msg for i in request_iterator: yield i logging.debug("Tbon: %d continuing stream in relay mode", self._vmid) for e in self._relay.route_stream(new_iterin()): logging.debug("Node %d ouputing message %s from children rpcs", self._vmid, e) yield e logging.debug("Tbon: %d finished with stream", self._vmid)