Ejemplo n.º 1
0
 def test_ONNXGraph(self):
     onnx_model = get_onnx_model()
     path = f'{time.time()}.onnx'
     save_onnx(onnx_model, path)
     assert os.path.exists(path)
     load_model(path)
     os.remove(path)
Ejemplo n.º 2
0
    def test_run_tf_model(self):
        model_path = os.path.join(MODEL_DIR, 'graph.pb')
        bad_model_path = os.path.join(MODEL_DIR, 'pt-minimal.pt')

        model_pb = load_model(model_path)
        wrong_model_pb = load_model(bad_model_path)

        con = self.get_client()
        con.modelset('m',
                     'tf',
                     'cpu',
                     model_pb,
                     inputs=['a', 'b'],
                     outputs=['mul'],
                     tag='v1.0')
        con.modeldel('m')
        self.assertRaises(ResponseError, con.modelget, 'm')
        con.modelset('m',
                     'tf',
                     'cpu',
                     model_pb,
                     inputs=['a', 'b'],
                     outputs='mul',
                     tag='v1.0')

        # wrong model
        self.assertRaises(ResponseError,
                          con.modelset,
                          'm',
                          'tf',
                          'cpu',
                          wrong_model_pb,
                          inputs=['a', 'b'],
                          outputs=['mul'])
        # missing inputs/outputs
        self.assertRaises(ValueError, con.modelset, 'm', 'tf', 'cpu',
                          wrong_model_pb)

        # wrong backend
        self.assertRaises(ResponseError,
                          con.modelset,
                          'm',
                          'torch',
                          'cpu',
                          model_pb,
                          inputs=['a', 'b'],
                          outputs=['mul'])

        con.tensorset('a', (2, 3), dtype='float')
        con.tensorset('b', (2, 3), dtype='float')
        con.modelrun('m', ['a', 'b'], ['c'])
        tensor = con.tensorget('c')
        self.assertTrue(np.allclose([4, 9], tensor))
        model_det = con.modelget('m')
        self.assertTrue(model_det['backend'] == 'TF')
        self.assertTrue(
            model_det['device'] == 'cpu')  # TODO; RedisAI returns small letter
        self.assertTrue(model_det['tag'] == 'v1.0')
        con.modeldel('m')
        self.assertRaises(ResponseError, con.modelget, 'm')
Ejemplo n.º 3
0
 def initiate(self):
     encoder_path = f'{dirname(dirname(dirname(__file__)))}/models/pytorch/chatbot/encoder.pt'
     decoder_path = f'{dirname(dirname(dirname(__file__)))}/models/pytorch/chatbot/decoder.pt'
     en_model = ml2rt.load_model(encoder_path)
     de_model = ml2rt.load_model(decoder_path)
     self.con.modelset('encoder', rai.Backend.torch, rai.Device.cpu,
                       en_model)
     self.con.modelset('decoder', rai.Backend.torch, rai.Device.cpu,
                       de_model)
Ejemplo n.º 4
0
 def initiate(self):
     encoder_path = f'{dirname(__file__)}/assets/encoder.pt'
     decoder_path = f'{dirname(__file__)}/assets/decoder.pt'
     en_model = ml2rt.load_model(encoder_path)
     de_model = ml2rt.load_model(decoder_path)
     self.con.modelset('encoder', 'torch', 'cpu', en_model)
     self.con.modelset('decoder', 'torch', 'cpu', de_model)
     # 4 = no layers + no directions, 1 = batch, 500 = hidden size
     dummy_hidden = np.zeros((2, 1, 500), dtype=np.float32)
     self.con.tensorset('hidden', dummy_hidden)
Ejemplo n.º 5
0
def stonk_market():
    """
    Load Stonk models into redisai
    """
    rai = redisai.Client(host="redis", port=6379)
    current_stonks: List[str] = []
    for name, tag in cast(Tuple[str, str], rai.modelscan()):
        if tag in [LOAD_MEME_CLF_VERSION, LOAD_STONK_VERSION]:
            current_stonks.append(name)
        else:
            _ = rai.modeldel(name)
    for idx, name in enumerate(
        name
        for name in (
            os.path.splitext(file)[0]
            for file in os.listdir(LOAD_MEME_CLF_REPO.format("jit"))
            if "backup" not in file
        )
        if name not in current_stonks
    ):
        model = ml2rt.load_model(LOAD_MEME_CLF_REPO.format("jit") + f"{name}.pt")
        _ = rai.modelset(
            name,
            backend,
            device,
            cast(Any, model),
            tag=LOAD_MEME_CLF_VERSION,
            inputs=cast(Any, None),
            outputs=cast(Any, None),
        )
        print(f"{name} Loaded")
    names_on_disk = [
        os.path.splitext(file)[0]
        for file in os.listdir(LOAD_STONK_REPO.format("jit"))
        if "backup" not in file
    ]
    names_to_load = [name for name in names_on_disk if name not in current_stonks]
    for idx, name in enumerate(names_to_load):
        print(f"{idx+1}/{len(names_to_load)} - {name}")
        model = ml2rt.load_model(LOAD_STONK_REPO.format("jit") + f"{name}.pt")
        _ = rai.modelset(
            name,
            backend,
            device,
            cast(Any, model),
            tag=LOAD_STONK_VERSION,
            inputs=cast(Any, None),
            outputs=cast(Any, None),
        )
Ejemplo n.º 6
0
    def test_run_tf_model(self):
        model_path = os.path.join(MODEL_DIR, 'graph.pb')
        bad_model_path = os.path.join(MODEL_DIR, 'pt-minimal.pt')

        model_pb = load_model(model_path)
        wrong_model_pb = load_model(bad_model_path)

        con = self.get_client()
        con.modelset('m',
                     Backend.tf,
                     Device.cpu,
                     model_pb,
                     inputs=['a', 'b'],
                     outputs='mul')

        # wrong model
        self.assertRaises(ResponseError,
                          con.modelset,
                          'm',
                          Backend.tf,
                          Device.cpu,
                          wrong_model_pb,
                          inputs=['a', 'b'],
                          outputs='mul')
        # missing inputs/outputs
        self.assertRaises(ValueError, con.modelset, 'm', Backend.tf,
                          Device.cpu, wrong_model_pb)

        # wrong backend
        self.assertRaises(ResponseError,
                          con.modelset,
                          'm',
                          Backend.torch,
                          Device.cpu,
                          model_pb,
                          inputs=['a', 'b'],
                          outputs='mul')

        con.tensorset('a', (2, 3), dtype=DType.float)
        con.tensorset('b', (2, 3), dtype=DType.float)
        con.modelrun('m', ['a', 'b'], 'c')
        tensor = con.tensorget('c')
        self.assertTrue(np.allclose([4, 9], tensor))
        model_det = con.modelget('m')
        self.assertTrue(model_det.backend == Backend.tf)
        self.assertTrue(model_det.device == Device.cpu)
        con.modeldel('m')
        self.assertRaises(ResponseError, con.modelget, 'm')
Ejemplo n.º 7
0
 def test_info(self):
     model_path = os.path.join(MODEL_DIR, 'graph.pb')
     model_pb = load_model(model_path)
     con = self.get_client()
     con.modelset('m',
                  'tf',
                  'cpu',
                  model_pb,
                  inputs=['a', 'b'],
                  outputs=['mul'])
     first_info = con.infoget('m')
     expected = {
         'key': 'm',
         'type': 'MODEL',
         'backend': 'TF',
         'device': 'cpu',
         'tag': '',
         'duration': 0,
         'samples': 0,
         'calls': 0,
         'errors': 0
     }
     self.assertEqual(first_info, expected)
     con.tensorset('a', (2, 3), dtype='float')
     con.tensorset('b', (2, 3), dtype='float')
     con.modelrun('m', ['a', 'b'], ['c'])
     con.modelrun('m', ['a', 'b'], ['c'])
     second_info = con.infoget('m')
     self.assertEqual(second_info['calls'], 2)  # 2 model runs
     con.inforeset('m')
     third_info = con.infoget('m')
     self.assertEqual(first_info,
                      third_info)  # before modelrun and after reset
Ejemplo n.º 8
0
 def test_model_scan(self):
     model_path = os.path.join(MODEL_DIR, 'graph.pb')
     model_pb = load_model(model_path)
     con = self.get_client()
     con.modelset('m',
                  'tf',
                  'cpu',
                  model_pb,
                  inputs=['a', 'b'],
                  outputs=['mul'],
                  tag='v1.2')
     model_path = os.path.join(MODEL_DIR, 'pt-minimal.pt')
     ptmodel = load_model(model_path)
     con = self.get_client()
     # TODO: RedisAI modelscan issue
     con.modelset("pt_model", 'torch', 'cpu', ptmodel)
     mlist = con.modelscan()
     self.assertEqual(mlist, [['pt_model', ''], ['m', 'v1.2']])
Ejemplo n.º 9
0
 def test_run_tflite_model(self):
     model_path = os.path.join(MODEL_DIR, 'mnist_model_quant.tflite')
     tflmodel = load_model(model_path)
     con = self.get_client()
     con.modelset("tfl_model", 'tflite', 'cpu', tflmodel)
     img = np.random.random((1, 1, 28, 28)).astype(np.float)
     con.tensorset('img', img)
     con.modelrun("tfl_model", ["img"], ["output1", "output2"])
     output = con.tensorget('output1')
     self.assertTrue(np.allclose(output, [8]))
Ejemplo n.º 10
0
 def test_run_pytorch_model(self):
     model_path = os.path.join(MODEL_DIR, 'pt-minimal.pt')
     ptmodel = load_model(model_path)
     con = self.get_client()
     con.modelset("pt_model", 'torch', 'cpu', ptmodel, tag='v1.0')
     con.tensorset('a', [2, 3, 2, 3], shape=(2, 2), dtype='float')
     con.tensorset('b', [2, 3, 2, 3], shape=(2, 2), dtype='float')
     con.modelrun("pt_model", ["a", "b"], ["output"])
     output = con.tensorget('output', as_numpy=False)
     self.assertTrue(np.allclose(output['values'], [4, 6, 4, 6]))
Ejemplo n.º 11
0
 def test_run_pytorch_model(self):
     model_path = os.path.join(MODEL_DIR, 'pt-minimal.pt')
     ptmodel = load_model(model_path)
     con = self.get_client()
     con.modelset("pt_model", Backend.torch, Device.cpu, ptmodel)
     con.tensorset('a', [2, 3, 2, 3], shape=(2, 2), dtype=DType.float)
     con.tensorset('b', [2, 3, 2, 3], shape=(2, 2), dtype=DType.float)
     con.modelrun("pt_model", ["a", "b"], "output")
     output = con.tensorget('output', as_numpy=False)
     self.assertTrue(np.allclose(output.value, [4, 6, 4, 6]))
Ejemplo n.º 12
0
 def test_run_onnxdl_model(self):
     # A PyTorch model that finds the square
     dlmodel_path = os.path.join(MODEL_DIR, 'findsquare.onnx')
     onnxdl_model = load_model(dlmodel_path)
     con = self.get_client()
     con.modelset("onnx_model", 'onnx', 'cpu', onnxdl_model)
     tensor = np.array((2, )).astype(np.float32)
     con.tensorset("input", tensor)
     con.modelrun("onnx_model", ["input"], ["output"])
     outtensor = con.tensorget("output")
     self.assertTrue(np.allclose(outtensor, [4.0]))
Ejemplo n.º 13
0
 def test_run_onnxml_model(self):
     mlmodel_path = os.path.join(MODEL_DIR, 'boston.onnx')
     onnxml_model = load_model(mlmodel_path)
     con = self.get_client()
     con.modelset("onnx_model", 'onnx', 'cpu', onnxml_model)
     tensor = np.ones((1, 13)).astype(np.float32)
     con.tensorset("input", tensor)
     con.modelrun("onnx_model", ["input"], ["output"])
     # tests `convert_to_num`
     outtensor = con.tensorget("output", as_numpy=False)
     self.assertEqual(int(float(outtensor['values'][0])), 24)
Ejemplo n.º 14
0
    def test_SparkMLGraph(self):
        spark_model, prototype = get_spark_model_and_prototype()

        # saving with prototype
        path = f'{time.time()}.onnx'
        save_sparkml(spark_model, path, prototype=prototype)
        load_model(path)
        assert os.path.exists(path)
        os.remove(path)

        # saving with shape and dtype
        shape = prototype.shape
        if prototype.dtype == np.float32:
            dtype = prototype.dtype
        else:
            raise RuntimeError(
                "Test is not configured to run with another type")
        path = f'{time.time()}.onnx'
        save_sparkml(spark_model, path, shape=shape, dtype=dtype)
        assert os.path.exists(path)
        load_model(path)
        os.remove(path)

        # saving with initial_types
        inital_types = utils.guess_onnx_tensortype(shape=shape, dtype=dtype)
        path = f'{time.time()}.onnx'
        save_sparkml(spark_model, path, initial_types=[inital_types])
        assert os.path.exists(path)
        load_model(path)
        os.remove(path)
Ejemplo n.º 15
0
    def create_deployment(self, name, model_uri, flavor=None, config=None):
        device = config.get('device', 'CPU')
        autobatch_size = config.get('batchsize')
        tag = config.get('tag')
        path = Path(_download_artifact_from_uri(model_uri))
        model_config = path / 'MLmodel'
        if not model_config.exists():
            raise MlflowException(
                message=(
                    "Failed to find MLmodel configuration within the specified model's"
                    " root directory."),
                error_code=INVALID_PARAMETER_VALUE)
        model_config = Model.load(model_config)

        if flavor is None:
            flavor = get_preferred_deployment_flavor(model_config)
        else:
            validate_deployment_flavor(model_config, flavor)
        logger.info("Using the {} flavor for deployment!".format(flavor))

        if flavor == 'tensorflow':
            # TODO: test this for tf1.x and tf2.x
            tags = model_config.flavors[flavor]['meta_graph_tags']
            signaturedef = model_config.flavors[flavor]['signature_def_key']
            model_dir = path / model_config.flavors[flavor]['saved_model_dir']
            model, inputs, outputs = ml2rt.load_model(model_dir, tags, signaturedef)
        else:
            model_path = None
            for file in path.iterdir():
                if file.suffix == '.pt':
                    model_path = file
            if model_path is None:
                raise RuntimeError("Model file does not have a valid suffix. Expected ``.pt``")
            model = ml2rt.load_model(model_path)
            inputs = outputs = None
        backend = flavor2backend[flavor]
        self.con.modelset(name, backend, device, model, inputs=inputs, outputs=outputs, batch=autobatch_size, tag=tag)
        return {'name': name, 'flavor': flavor}
Ejemplo n.º 16
0
 def test_modelrun_non_list_input_output(self):
     model_path = os.path.join(MODEL_DIR, 'graph.pb')
     model_pb = load_model(model_path)
     con = self.get_client()
     con.modelset('m',
                  'tf',
                  'cpu',
                  model_pb,
                  inputs=['a', 'b'],
                  outputs=['mul'],
                  tag='v1.7')
     con.tensorset('a', (2, 3), dtype='float')
     con.tensorset('b', (2, 3), dtype='float')
     ret = con.modelrun('m', ['a', 'b'], 'out')
     self.assertEqual(ret, 'OK')
Ejemplo n.º 17
0
 def test_nonasciichar(self):
     nonascii = 'ĉ'
     model_path = os.path.join(MODEL_DIR, 'graph.pb')
     model_pb = load_model(model_path)
     con = self.get_client()
     con.modelset('m' + nonascii,
                  'tf',
                  'cpu',
                  model_pb,
                  inputs=['a', 'b'],
                  outputs=['mul'],
                  tag='v1.0')
     con.tensorset('a' + nonascii, (2, 3), dtype='float')
     con.tensorset('b', (2, 3), dtype='float')
     con.modelrun('m' + nonascii, ['a' + nonascii, 'b'], ['c' + nonascii])
     tensor = con.tensorget('c' + nonascii)
     self.assertTrue((np.allclose(tensor, [4., 9.])))
Ejemplo n.º 18
0
 def test_modelset_errors(self):
     model_path = os.path.join(MODEL_DIR, 'graph.pb')
     model_pb = load_model(model_path)
     con = self.get_client()
     with self.assertRaises(ValueError):
         con.modelset('m',
                      'tf',
                      'wrongdevice',
                      model_pb,
                      inputs=['a', 'b'],
                      outputs=['mul'],
                      tag='v1.0')
     with self.assertRaises(ValueError):
         con.modelset('m',
                      'wrongbackend',
                      'cpu',
                      model_pb,
                      inputs=['a', 'b'],
                      outputs=['mul'],
                      tag='v1.0')
Ejemplo n.º 19
0
 def test_modelget_meta(self):
     model_path = os.path.join(MODEL_DIR, 'graph.pb')
     model_pb = load_model(model_path)
     con = self.get_client()
     con.modelset('m',
                  'tf',
                  'cpu',
                  model_pb,
                  inputs=['a', 'b'],
                  outputs=['mul'],
                  tag='v1.0')
     model = con.modelget('m', meta_only=True)
     self.assertEqual(
         model, {
             'backend': 'TF',
             'batchsize': 0,
             'device': 'cpu',
             'inputs': ['a', 'b'],
             'minbatchsize': 0,
             'outputs': ['mul'],
             'tag': 'v1.0'
         })
def predict_object():
    if arguments.gpu:
        device = rai.Device.gpu
    else:
        device = rai.Device.cpu

    con = rai.Client(host=arguments.host, port=arguments.port)

    tf_model_path = 'models/tensorflow/imagenet/resnet50.pb'
    script_path = 'models/tensorflow/imagenet/data_processing_script.txt'
    img_path = 'images/x.png'

    class_idx = json.load(open("data/imagenet_classes.json"))

    image = io.imread(img_path)

    tf_model = load_model(tf_model_path)
    script = load_script(script_path)

    out1 = con.modelset('imagenet_model',
                        rai.Backend.tf,
                        device,
                        inputs=['images'],
                        outputs=['output'],
                        data=tf_model)
    out2 = con.scriptset('imagenet_script', device, script)
    a = time.time()
    tensor = rai.BlobTensor.from_numpy(image)
    con.tensorset('image', tensor)
    out4 = con.scriptrun('imagenet_script', 'pre_process_3ch', 'image',
                         'temp1')
    out5 = con.modelrun('imagenet_model', 'temp1', 'temp2')
    out6 = con.scriptrun('imagenet_script', 'post_process', 'temp2', 'out')
    final = con.tensorget('out', as_type=rai.BlobTensor)
    ind = final.to_numpy().item()

    return class_idx[str(ind)]
Ejemplo n.º 21
0
# -*- coding: utf-8 -*-
import ml2rt
import redisai as rai

from src.utils.config import Config


if __name__ == '__main__':
    config = Config()
    if not Config.validate_configs():
        exit(1)

    connection_rai = rai.Client(host='localhost', port='6379')
    model = ml2rt.load_model('resources/bert.rai')
    connection_rai.modelset("bert", 'onnx', config.device, model)
Ejemplo n.º 22
0
import redisai as rai
import ml2rt
import numpy as np
import tensorflow as tf
from skimage import io
import json
con=rai.Client()
model = ml2rt.load_model('models/model1.pt')


con.modelset('model',rai.Backend.torch, rai.Device.cpu, model)


numpy_img = np.float32(np.random.rand(1, 3, 32, 32),axis=0)
print(numpy_img.shape)

con.tensorset('input', numpy_img)
con.modelrun(
    'model',
    input=['input'],
    output=['output']
)
ret = con.tensorget('output', as_type=rai.BlobTensor).to_numpy()
'''
classes_idx = json.load(open('data/imagenet_classes.json'))
ind = ret.argmax()
print(ind, ret.shape)
print(classes_idx[str(ind-1)])
'''
Ejemplo n.º 23
0
if arguments.gpu:
    device = 'gpu'
else:
    device = 'cpu'

con = rai.Client(host=arguments.host, port=arguments.port)

pt_model_path = '../models/pytorch/imagenet/resnet50.pt'
script_path = '../models/pytorch/imagenet/data_processing_script.txt'
img_path = '../data/cat.jpg'

class_idx = json.load(open("../data/imagenet_classes.json"))

image = io.imread(img_path)

pt_model = ml2rt.load_model(pt_model_path)
script = ml2rt.load_script(script_path)

out1 = con.modelset('imagenet_model', 'torch', device, pt_model)
out2 = con.scriptset('imagenet_script', device, script)
a = time.time()
out3 = con.tensorset('image', image)
out4 = con.scriptrun('imagenet_script', 'pre_process_3ch', 'image', 'temp1')
out5 = con.modelrun('imagenet_model', 'temp1', 'temp2')
out6 = con.scriptrun('imagenet_script', 'post_process', 'temp2', 'out')
final = con.tensorget('out')
ind = final[0]
print(ind, class_idx[str(ind)])
print(time.time() - a)
    14: "motorbike",
    15: "person",
    16: "pottedplant",
    17: "sheep",
    18: "sofa",
    19: "train",
    20: "tvmonitor"
}

if arguments.gpu:
    device = 'gpu'
else:
    device = 'cpu'

con = rai.Client(host=arguments.host, port=arguments.port)
model = ml2rt.load_model('../models/tensorflow/tinyyolo/tinyyolo.pb')
script = ml2rt.load_script(
    '../models/tensorflow/tinyyolo/yolo_boxes_script.py')

con.modelset('yolo', 'tf', device, model, inputs=['input'], outputs=['output'])
con.scriptset('yolo-post', device, script)

img_jpg = Image.open('../data/sample_dog_416.jpg')
# normalize
img = np.array(img_jpg).astype(np.float32)
img = np.expand_dims(img, axis=0)
img /= 256.0

con.tensorset('in', img)
con.modelrun('yolo', 'in', 'out')
con.scriptrun('yolo-post', 'boxes_from_tf', inputs='out', outputs='boxes')
Ejemplo n.º 25
0
import numpy as np
from redisai import Client, DType, Device, Backend
import ml2rt

client = Client()
client.tensorset('x', [2, 3], dtype=DType.float)
t = client.tensorget('x')
print(t.value)

model = ml2rt.load_model('test/testdata/graph.pb')
tensor1 = np.array([2, 3], dtype=np.float)
client.tensorset('a', tensor1)
client.tensorset('b', (12, 10), dtype=np.float)
client.modelset('m',
                Backend.tf,
                Device.cpu,
                inputs=['a', 'b'],
                outputs='mul',
                data=model)
client.modelrun('m', ['a', 'b'], ['mul'])
print(client.tensorget('mul'))

# Try with a script
script = ml2rt.load_script('test/testdata/script.txt')
client.scriptset('ket', Device.cpu, script)
client.scriptrun('ket', 'bar', inputs=['a', 'b'], outputs='c')
Ejemplo n.º 26
0
import numpy as np
import redisai as rai
from ml2rt import load_model
model = load_model("../models/spark/pca/spark.onnx")

con = rai.Client()
con.modelset("spark_model", rai.Backend.onnx, rai.Device.cpu, model)
dummydata = np.array([[2.0, 0.0, 3.0, 4.0, 5.0], [4.0, 0.0, 0.0, 6.0, 7.0]],
                     dtype=np.float32)
tensor = rai.BlobTensor.from_numpy(dummydata)
con.tensorset("input", tensor)
con.modelrun("spark_model", ["input"], ["output"])
outtensor = con.tensorget("output", as_type=rai.BlobTensor)
print(outtensor.to_numpy())
if arguments.gpu:
    device = rai.Device.gpu
else:
    device = rai.Device.cpu

con = rai.Client(host=arguments.host, port=arguments.port)

tf_model_path = '../models/tensorflow/imagenet/resnet50.pb'
script_path = '../models/tensorflow/imagenet/data_processing_script.txt'
img_path = '../data/cat.jpg'

class_idx = json.load(open("../data/imagenet_classes.json"))

image = io.imread(img_path)

tf_model = load_model(tf_model_path)
script = load_script(script_path)

out1 = con.modelset('imagenet_model',
                    rai.Backend.tf,
                    device,
                    inputs=['images'],
                    outputs=['output'],
                    data=tf_model)
out2 = con.scriptset('imagenet_script', device, script)
a = time.time()
tensor = rai.BlobTensor.from_numpy(image)
con.tensorset('image', tensor)
out4 = con.scriptrun('imagenet_script', 'pre_process_3ch', 'image', 'temp1')
out5 = con.modelrun('imagenet_model', 'temp1', 'temp2')
out6 = con.scriptrun('imagenet_script', 'post_process', 'temp2', 'out')
Ejemplo n.º 28
0
import ml2rt
import redisai as rai
import numpy as np

all_characters = string.printable

con = rai.Client(host='localhost', port=6379, db=0)
hidden_size = 300
n_layers = 2
batch_size = 1

filepath = '../models/pytorch/charrnn/charrnn_pipeline.pt'


def int2str(int_data):
    return ''.join([all_characters[i] for i in int_data])


model = ml2rt.load_model(filepath)

out1 = con.modelset('charRnn', rai.Backend.torch, rai.Device.cpu, model)
hidden = np.zeros((n_layers, batch_size, hidden_size), dtype=np.float32)
hidden_tensor = rai.BlobTensor.from_numpy(hidden)
out2 = con.tensorset('hidden', hidden_tensor)
prime_tensor = rai.Tensor(rai.DType.int64, shape=(1, ), value=5)
out3 = con.tensorset('prime', prime_tensor)
out4 = con.modelrun('charRnn', ['prime', 'hidden'], ['out'])
out5 = con.tensorget('out')
para = int2str(out5.value)
print(para)
Ejemplo n.º 29
0
import numpy as np
import redisai as rai
from ml2rt import load_model

model = load_model("../models/sklearn/logistic_regression/logistic.onnx")

con = rai.Client()
con.modelset("sklearn_model", rai.Backend.onnx, rai.Device.cpu, model)

dummydata = np.array([[6.9, 3.1, 5.4, 2.1]], dtype=np.float32)
tensor = rai.BlobTensor.from_numpy(dummydata)
con.tensorset("input", tensor)

# dummy output because by default sklearn logistic regression outputs
# value and probability. Since RedisAI doesn't support specifying required
# outputs now, we need to keep placeholders for all the default outputs.
con.modelrun("sklearn_model", ["input"], ["output", "dummy"])
outtensor = con.tensorget("output", as_type=rai.BlobTensor)
print(f" Output class: {outtensor.to_numpy().item()}")
Ejemplo n.º 30
0
import redisai as rai
from ml2rt import load_model

model = load_model(
    "../models/sklearn/linear_regression/linear_regression.onnx")

con = rai.Client()
con.modelset("sklearn_model", rai.Backend.onnx, rai.Device.cpu, model)

# dummydata taken from sklearn.datasets.load_boston().data[0]
dummydata = [
    0.00632, 18.0, 2.31, 0.0, 0.538, 6.575, 65.2, 4.09, 1.0, 296.0, 15.3,
    396.9, 4.98
]
tensor = rai.Tensor.scalar(rai.DType.float, *dummydata)
con.tensorset("input", tensor)

con.modelrun("sklearn_model", ["input"], ["output"])
outtensor = con.tensorget("output", as_type=rai.BlobTensor)
print(
    f"House cost predicted by model is ${outtensor.to_numpy().item() * 1000}")