def test_json_minimal_loading():
    file_path = "yaml/konduit_minimal.json"
    try:
        server = server_from_file(file_path, use_yaml=False, start_server=True)
        client = client_from_file(file_path, use_yaml=False)
    finally:
        server.stop()
    del server, client
Exemple #2
0
def test_yaml_server_python_prediction():
    try:
        konduit_yaml_path = "yaml/konduit_tf_inference.yaml"
        img = np.load("../data/input_layer.npy")
        server = server_from_file(konduit_yaml_path, start_server=True)
        client = client_from_file(konduit_yaml_path)
        predicted = client.predict(data_input={"input_layer": img})
        result = dict(zip(np.arange(10), predicted[0].round(3)))
        # {0: 0.0, 1: 0.0, 2: 0.001, 3: 0.001, 4: 0.0, 5: 0.0, 6: 0.0, 7: 0.998, 8: 0.0, 9: 0.0}
        assert round(result.get(7) * 1000) == 998
        server.stop()
    finally:
        server.stop()
Exemple #3
0
def predict_numpy(config, numpy_data, input_names):
    """Get predictions for your pipeline from numpy data and input names"""
    from konduit.load import client_from_file

    numpy_files = numpy_data.split(",")
    input_names = input_names.split(",")
    assert len(numpy_files) == len(input_names)

    client = client_from_file(file_path=config)

    input_dict = {}
    for i in range(len(numpy_files)):
        input_dict[input_names[i]] = np.load(numpy_files[i])
    print(client.predict(input_dict))
Exemple #4
0
def test_model():
    client = client_from_file("konduit.yml")

    image = open("1902_airplane.png", "rb").read()

    responses = []

    start = time.time()
    for i in range(10):
        response = client.predict({"default": image})
        print(response)
        responses.append(response)

    end = time.time()

    print("%f seconds elapsed for %d requests (%d RPS)" % (end - start, len(responses), (10.0 / (end - start))))
Exemple #5
0
def test_model():
    client = client_from_file("konduit.yml")

    testset = torchvision.datasets.CIFAR10(root='./model/data',
                                           train=False,
                                           download=True)
    image, _ = testset[0]

    responses = []

    start = time.time()
    for i in range(10):
        byte_buf = io.BytesIO()
        image.save(byte_buf, format='PNG')
        byte_array = byte_buf.getvalue()
        responses.append(client.predict({"default": byte_array}))
    print(len(responses))
    end = time.time()

    print("%f seconds elapsed for 1000 requests (%d RPS)" %
          (end - start, (10.0 / (end - start))))
# Copyright (c) 2020, codenamewei
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import json
import time
import numpy as np
from konduit.client import Client
from konduit.load import client_from_file

current_milli_time = lambda: int(round(time.time() * 1000))

client = client_from_file("config.yaml")

prediction = client.predict({client.input_names[0]: np.array([5])})

print("Output: {}".format(prediction))
def test_yaml_minimal_loading():
    file_path = "yaml/konduit_minimal.yaml"
    server = server_from_file(file_path, use_yaml=True, start_server=True)
    client = client_from_file(file_path, use_yaml=True)
    del server, client
def test_yaml_client_loading():
    file_path = "yaml/konduit.yaml"
    client = client_from_file(file_path)