コード例 #1
0
    def test_threaded_streamer(self):
        streamer = ThreadedStreamer(self.vision_model.batch_prediction, batch_size=8)
        single_predict = streamer.predict(self.input_batch)
        assert single_predict == self.single_output

        batch_predict = streamer.predict(self.input_batch * BATCH_SIZE)
        assert batch_predict == self.batch_output
コード例 #2
0
def test_threaded_streamer():
    streamer = ThreadedStreamer(vision_model.batch_prediction, batch_size=16)

    output_raw = vision_model.batch_prediction(input_batch)
    output = streamer.predict(input_batch)
    assert output_raw == output

    outputs_raw = vision_model.batch_prediction(input_batch * 55)
    outputs = streamer.predict(input_batch * 55)
    assert outputs_raw == outputs
コード例 #3
0
    def test_future_api(self):
        streamer = ThreadedStreamer(self.vision_model.batch_prediction, batch_size=8)

        xs = []
        for i in range(BATCH_SIZE):
            future = streamer.submit(self.input_batch)
            xs.append(future)
        batch_predict = []
        # Get all instances of future object and wait for asynchronous responses.
        for future in xs:
            batch_predict.extend(future.result())
        assert batch_predict == self.batch_output
コード例 #4
0
from service_streamer import ThreadedStreamer

app = Flask(__name__)
model = None
streamer = None


@app.route("/naive", methods=["POST"])
def naive_predict():
    inputs = request.form.getlist("s")
    outputs = model.predict(inputs)
    return jsonify(outputs)


@app.route("/stream", methods=["POST"])
def stream_predict():
    inputs = request.form.getlist("s")
    outputs = streamer.predict(inputs)
    return jsonify(outputs)


if __name__ == "__main__":
    model = Model()
    # start child thread as worker
    streamer = ThreadedStreamer(model.predict, batch_size=64, max_latency=0.1)

    # spawn child process as worker
    # streamer = Streamer(model.predict, batch_size=64, max_latency=0.1)

    app.run(port=5005, debug=False)
コード例 #5
0
    return jsonify({'result': outputs[0][0].tolist()})


def batch_prediction(image_bytes_batch):
    image_tensors = [
        preprocess_image(image, target_size=(416, 416))
        for image in image_bytes_batch
    ]
    tensor = np.concatenate(image_tensors, axis=0)
    outputs = face_mask.forward([tensor])

    return [outputs[0][i] for i in range(len(outputs[0]))]


streamer = ThreadedStreamer(batch_prediction, batch_size=16)


@app.route('/stream_predict', methods=['POST'])
def stream_predict():
    message = request.get_json(force=True)
    encoded = message['image']
    decoded = base64.b64decode(encoded)
    image = Image.open(io.BytesIO(decoded))
    results = streamer.predict([image])[0]

    return jsonify({'result': results.tolist()})


@app.route('/', methods=['GET'])
def index():
コード例 #6
0
    try:
        with open("config/jaeger-config.json", 'r') as f:
            jaeger_config_json = json.load(f)
    except Exception as e:
        logger.error(e)
        exit()

    port = int(config_json["translate-service"]["port"])
    model_dir = config_json["translate_model_dir"]
    logger.info("port:{}".format(port))
    logger.info("model:{}".format(model_dir))
    #model_dir = "/home/wzhang/lyx/DeathStarBench/socialNetwork/data/ende_ctranslate2"
    translate_func = TransFunc()
    jaeger_config = Config(config=jaeger_config_json, service_name="translate-service", validate=True)
    tracer = jaeger_config.initialize_tracer()
    tokenizer = pyonmttok.Tokenizer('conservative', joiner_annotate=True)
    streamer = ThreadedStreamer(translate_func.predict, batch_size=batch_size, max_latency=wait_time) 
    handler = TranslateHandler(tokenizer = tokenizer, streamer=streamer, tracer=tracer)
    processor = TranslateService.Processor(handler)
    transport = TSocket.TServerSocket("0.0.0.0", port)
    tfactory = TTransport.TFramedTransportFactory()
    pfactory = TBinaryProtocol.TBinaryProtocolFactory()

    server = TServer.TThreadedServer(
        processor, transport, tfactory, pfactory)
    
    logger.info("Starting the translate-service server...")
    server.serve()
    logger.info("translate-service server exit...")

コード例 #7
0
ファイル: app.py プロジェクト: svishnu88/deploy-pytorch-model
    return [get_preds(outp)]
        

img_transforms = get_transforms
imagenete_id_cat = json.load(open('imagenet_class_index.json'))

def get_img(url):
    response = requests.get(url)
    img = Image.open(BytesIO(response.content))
    return img

def get_preds(preds):
    idx = preds.max(1)[1].item()
    return imagenete_id_cat[str(idx)][1]

@app.route('/predict',methods=['POST'])
def predict():
    img =  get_img(request.args['url'])
    img_t = img_transforms()(img).unsqueeze(0).to(device)
    outp = model(img_t)
    return f'{get_preds(outp)}'

streamer = ThreadedStreamer(predict_batch,batch_size=64,max_latency=0.1)
@app.route("/stream", methods=["POST"])
def stream_predict():
    url = request.args['url']
    outp = streamer.predict([url])[0]
    return str(outp)

if __name__ == '__main__':
   app.run(debug=False)
コード例 #8
0
    if type(img) is list:
        img = img[0]
    result = inference_segmentor(model, img)
    return result


def get_random_img(n=1):
    img_table_file = base + 'img_anno.csv'
    # test_dir = 'output/ocr_parkinglot/'
    img_dir = base + 'images/'
    # anno_dir = base+'labels/'
    img_table = pd.read_csv(img_table_file)
    paths = img_table.dropna().sample(n).img.to_list()
    # img_name = path.split('/')[-1]
    # img_path = img_dir+img_name
    return paths


## Local test
# paths = get_random_img()
# img_path = paths[0]
# result = predict_save(model, img_path)
# polygons = polygonize(result, draw_img=img_path)

if __name__ == "__main__":
    # start child thread as worker
    streamer = ThreadedStreamer(predict, batch_size=4, max_latency=0.5)
    # spawn child process as worker for multiple GPU
    # streamer = Streamer(predict_save, batch_size=4, max_latency=0.1)
    app.run(port=5005, debug=False, host='0.0.0.0')
    print('Flask started')