def oursystem(ip, port, inputt):
    channel = grpc.insecure_channel('%s:%s' % (ip, port))
    stub = prediction_pb2_grpc.ProxyServerStub(channel)
    response = stub.downstream(
        prediction_pb2.request(
            input_=model_pb2.input(inputType='string', inputStream=inputt)))
    return response.status
def produce(ip, port, seq_id):
    channel = grpc.insecure_channel('%s:%s' % (ip, port))
    stub = prediction_pb2_grpc.ProxyServerStub(channel)
    time = Timestamp()
    time.GetCurrentTime()
    response = stub.outputstream(
        prediction_pb2.request(input_=model_pb2.input(
            inputType='string', inputStream="Produced output"),
                               src_uri="localhost",
                               seq=seq_id,
                               req_id=1,
                               timestamp=time))
    print('Response\n{res}'.format(res=response.status))

    return response.status
def withoutproxy(ip, port, inputt):

    channel = grpc.insecure_channel('%s:%s' % (ip, port))
    stub = model_pb2_grpc.PredictServiceStub(channel)
    response = stub.Predict(
        model_pb2.input(inputStream=inputt, inputType="String"))

    channel = grpc.insecure_channel('%s:%s' % (ip, port))
    stub = model_pb2_grpc.PredictionServerStub(channel)
    response = stub.downstream(
        prediction_pb2.request(
            input_=model_pb2.input(inputType='string', inputStream=inputt)))

    time.sleep(1)
    return response.outputStream
Exemple #4
0
def consume(ip, port, inputt):

    inputt = str(inputt)

    channel = grpc.insecure_channel('%s:%s' % (ip, port))
    stub = prediction_pb2_grpc.ProxyServerStub(channel)

    a = datetime.datetime.now()
    response = stub.downstream(
        prediction_pb2.request(
            input_=model_pb2.input(inputType='string', inputStream=inputt)))
    b = datetime.datetime.now()

    print('latency', (b - a).microseconds / 1000, "ms")

    return response.status
Exemple #5
0
    def downstream(self, request, context):  ## Consumer

        #logging.info("Start processing request...")

        #### received a request
        ####  stub.downstream(prediction_pb2.request(input_ = model_pb2.input(inputType = 'string', inputStream = stock_name),src_uri = "localhost", seq = 1, req_id =1, timestamp = Timestamp().GetCurrentTime()))

        ## Sequence Lock - Guarantee that sequence are consistent
        self.seq_mutex.acquire()
        temp_seq = self.seq_id
        self.seq_id = self.seq_id + 1
        self.seq_mutex.release()

        ##
        call_time = Timestamp()
        call_time.GetCurrentTime()
        ## Generate request
        temp_request = prediction_pb2.request(input_=request.input_,
                                              src_uri="front-end",
                                              seq=temp_seq,
                                              req_id=self.request_id,
                                              timestamp=call_time)

        ## Create conditional variable
        # Currently use seq id as thread id
        thread_id = temp_seq
        condition = threading.Event()  #Condition()
        self.event_dict[thread_id] = condition

        #logging.info(self.event_dict[thread_id])

        ## Send request to downstream proxy

        channel = grpc.insecure_channel(
            '%s:%s' % (self.entry_proxy_name, self.entry_proxy_port))
        stub = prediction_pb2_grpc.ProxyServerStub(channel)
        response = stub.downstream(temp_request)
        logging.info('Response\n{res}'.format(res=response.status))
        #
        logging.info("Received consume request%s" %
                     (request.input_.inputStream))

        #logging.info(type(self.event_dict.get(thread_id, None)))
        ## Wait for task finish
        event = self.event_dict.get(thread_id, None)
        #event.acquire()
        event.wait()
        #event.release()

        ## Produce return

        output = self.output_dict[thread_id]

        #ret_time = self.timestamp_dict[thread_id]

        ret_time = Timestamp()
        ret_time.GetCurrentTime()

        ## Generate return string
        ret = "Call Time: %s Return Time: %s Output: %s" % (
            str(call_time), str(ret_time), str(output))

        ## Clean cache
        del self.output_dict[thread_id]
        del self.event_dict[thread_id]
        del self.timestamp_dict[thread_id]

        logging.info("Finished processing request with ret: %s" % (ret))

        return model_pb2.response(status=ret)