def predict_from_files(filenames, key, img_size=512, names_file='data/coco.names', conf_thresh=0.3, iou_thresh=0.6, host=DEFAULT_HOST, port=DEFAULT_PORT): names = load_classes(names_file) r = redisai.Client(host=host, port=port) outputs = [] for filename in filenames: img = cv2.imread(filename) img_lb = letterbox_image(img, (img_size, img_size)) out = predict(r, key, img_lb, orig_shape=img.shape[:2], names=names, conf_thresh=conf_thresh, iou_thresh=iou_thresh) outputs.append(out) return outputs
def init(config): model = raimodel.Model.load(config['modelpath']) host = config['server'].split(':')[0] port = config['server'].split(':')[1] init.con = rai.Client(host=host, port=port) init.con.modelset('model', rai.Backend.torch, rai.Device.cpu, model) image, init.img_class = get_one_image(transpose=(2, 0, 1)) init.image = rai.BlobTensor.from_numpy(image)
def __init__(self, uri): super().__init__(uri) server_config = Config() path = urlparse(uri).path if path: uri = f"redis:/{path}" self.con = redisai.Client.from_url(uri) else: self.con = redisai.Client(**server_config)
def stonk_market(): """ Load Stonk models into redisai """ rai = redisai.Client(host="redis", port=6379) current_stonks: List[str] = [] for name, tag in cast(Tuple[str, str], rai.modelscan()): if tag in [LOAD_MEME_CLF_VERSION, LOAD_STONK_VERSION]: current_stonks.append(name) else: _ = rai.modeldel(name) for idx, name in enumerate( name for name in ( os.path.splitext(file)[0] for file in os.listdir(LOAD_MEME_CLF_REPO.format("jit")) if "backup" not in file ) if name not in current_stonks ): model = ml2rt.load_model(LOAD_MEME_CLF_REPO.format("jit") + f"{name}.pt") _ = rai.modelset( name, backend, device, cast(Any, model), tag=LOAD_MEME_CLF_VERSION, inputs=cast(Any, None), outputs=cast(Any, None), ) print(f"{name} Loaded") names_on_disk = [ os.path.splitext(file)[0] for file in os.listdir(LOAD_STONK_REPO.format("jit")) if "backup" not in file ] names_to_load = [name for name in names_on_disk if name not in current_stonks] for idx, name in enumerate(names_to_load): print(f"{idx+1}/{len(names_to_load)} - {name}") model = ml2rt.load_model(LOAD_STONK_REPO.format("jit") + f"{name}.pt") _ = rai.modelset( name, backend, device, cast(Any, model), tag=LOAD_STONK_VERSION, inputs=cast(Any, None), outputs=cast(Any, None), )
def init(config): host = config['server'].split(':')[0] port = config['server'].split(':')[1] init.con = rai.Client(host=host, port=port) graph = raimodel.Model.load(config['modelpath']) inputs = ['images'] outputs = ['output'] init.con.modelset('graph', rai.Backend.tf, rai.Device.cpu, graph, input=inputs, output=outputs) image, init.img_class = get_one_image() init.image = rai.BlobTensor.from_numpy(image)
def dump_articles(articles): rai_connection = rai.Client(host='localhost', port='6379') tokenizer = Tokenizer() for article_dict in articles: tokens_array = tokenizer.run([article_dict['clean_content']]) rai_connection.tensorset("input", tokens_array) rai_connection.modelrun("bert", ["input"], ["output"]) embedding = rai_connection.tensorget("output")[0].tolist() article = Article( link=article_dict['link'], title=article_dict['title'], content=article_dict['raw_content'], timestamp=datetime.utcnow(), embedding=embedding, ) try: article.save() except IntegrityError: continue
def __init__(self, network: nn.Module, dataset: KubeDataset, gpu=False): """Init the KubeModel, device can be either gpu or cpu""" # if device is set to gpu, get the correct gpu if # for the self._network = network self._dataset = dataset self.platform = 'gpu' if gpu else 'cpu' self.device = None self.args = None self.logger = None # training options, these will be updated when reading the parameters # in each iteration self.lr = None self.batch_size = None self.task = None self.optimizer = None # initialize redis connection self._redis_client = rai.Client(host=REDIS_URL, port=REDIS_PORT)
def predict_object(): if arguments.gpu: device = rai.Device.gpu else: device = rai.Device.cpu con = rai.Client(host=arguments.host, port=arguments.port) tf_model_path = 'models/tensorflow/imagenet/resnet50.pb' script_path = 'models/tensorflow/imagenet/data_processing_script.txt' img_path = 'images/x.png' class_idx = json.load(open("data/imagenet_classes.json")) image = io.imread(img_path) tf_model = load_model(tf_model_path) script = load_script(script_path) out1 = con.modelset('imagenet_model', rai.Backend.tf, device, inputs=['images'], outputs=['output'], data=tf_model) out2 = con.scriptset('imagenet_script', device, script) a = time.time() tensor = rai.BlobTensor.from_numpy(image) con.tensorset('image', tensor) out4 = con.scriptrun('imagenet_script', 'pre_process_3ch', 'image', 'temp1') out5 = con.modelrun('imagenet_model', 'temp1', 'temp2') out6 = con.scriptrun('imagenet_script', 'post_process', 'temp2', 'out') final = con.tensorget('out', as_type=rai.BlobTensor) ind = final.to_numpy().item() return class_idx[str(ind)]
# -*- coding: utf-8 -*- import ml2rt import redisai as rai from src.utils.config import Config if __name__ == '__main__': config = Config() if not Config.validate_configs(): exit(1) connection_rai = rai.Client(host='localhost', port='6379') model = ml2rt.load_model('resources/bert.rai') connection_rai.modelset("bert", 'onnx', config.device, model)
import json import time import redisai as rai import ml2rt from skimage import io from cli import arguments if arguments.gpu: device = 'gpu' else: device = 'cpu' con = rai.Client(host=arguments.host, port=arguments.port) pt_model_path = '../models/pytorch/imagenet/resnet50.pt' script_path = '../models/pytorch/imagenet/data_processing_script.txt' img_path = '../data/cat.jpg' class_idx = json.load(open("../data/imagenet_classes.json")) image = io.imread(img_path) pt_model = ml2rt.load_model(pt_model_path) script = ml2rt.load_script(script_path) out1 = con.modelset('imagenet_model', 'torch', device, pt_model) out2 = con.scriptset('imagenet_script', device, script) a = time.time() out3 = con.tensorset('image', image) out4 = con.scriptrun('imagenet_script', 'pre_process_3ch', 'image', 'temp1') out5 = con.modelrun('imagenet_model', 'temp1', 'temp2')
import json import time import redisai as rai import ml2rt from skimage import io con = rai.Client(host='localhost', port=6379, db=0) pt_model_path = '../models/pytorch/imagenet/resnet50.pt' script_path = '../models/pytorch/imagenet/data_processing_script.txt' img_path = '../data/cat.jpg' class_idx = json.load(open("../data/imagenet_classes.json")) image = io.imread(img_path) pt_model = ml2rt.load_model(pt_model_path) script = ml2rt.load_script(script_path) out1 = con.modelset('imagenet_model', rai.Backend.torch, rai.Device.cpu, pt_model) out2 = con.scriptset('imagenet_script', rai.Device.cpu, script) a = time.time() tensor = rai.BlobTensor.from_numpy(image) out3 = con.tensorset('image', tensor) out4 = con.scriptrun('imagenet_script', 'pre_process_3ch', 'image', 'temp1') out5 = con.modelrun('imagenet_model', 'temp1', 'temp2') out6 = con.scriptrun('imagenet_script', 'post_process', 'temp2', 'out') final = con.tensorget('out') ind = final.value[0] print(ind, class_idx[str(ind)])
def __init__(self, host='localhost', port=6379, db=0): self.max_len = 10 self.exec = redis.Redis(host=host, port=port, db=db).execute_command self.con = rai.Client(host=host, port=port, db=db)
from flask import Flask from flask import request, send_from_directory from utils import init_context, init_conversation, process_output, ids2text import numpy as np import redisai app = Flask(__name__, static_url_path='/frontend') con = redisai.Client() context = init_context() @app.route('/') def frontend(): return send_from_directory('frontend', 'index.html') @app.route('/<path:path>') def frontend_assets(path): return send_from_directory('frontend', path) @app.route('/next') def next_(): last = request.args.get('lastid') premise = request.args.get('premise') if last and premise: return {"error": "You shouldn't send both ``last`` and ``context``"} elif not any([last, premise]): return {"error": "You must send something"} elif last: last = np.array([[int(last)]])
import redisai as rai con = rai.Client(host='159.65.150.75', port=6379, db=0) pt_model_path = '../models/imagenet/pytorch/resnet50.pt' script_path = '../models/imagenet/pytorch/data_processing_script.txt' pt_model = rai.load_model(pt_model_path) script = rai.load_script(script_path) out1 = con.modelset('imagenet_model', rai.Backend.torch, rai.Device.cpu, pt_model) out2 = con.scriptset('imagenet_script', rai.Device.cpu, script)
import onnxruntime as rt sess = rt.InferenceSession('rfc_onx.onnx') input_name = sess.get_inputs()[0].name label_name = sess.get_outputs()[0].name pred_onx = sess.run( [label_name], {input_name: sample.astype(np.float32)})[0] print('onnx prediction...') print(pred_onx) conn = redis.Redis(host='localhost', port=6379, db=0) with open("rfc_onx.onnx", "rb") as f: model = f.read() res = conn.execute_command('AI.MODELSET', 'sklmodel', 'ONNX', 'CPU', model) rai = redisai.Client() tensor = redisai.BlobTensor.from_numpy( sample.astype(np.float32)) rai.tensorset('tensor', tensor) rai.modelrun('sklmodel', inputs=['tensor'], outputs=['out_label', 'out_probs']) out = rai.tensorget('out_label') print('RedisAI prediction...') print(out) print('Load gear') with open('rfc_gear.py', 'rb') as f: gear = f.read() res = conn.execute_command('RG.PYEXECUTE', gear) print(res) conn.xadd('xIn', {'text':'an apple a day'}, maxlen=None)
import redisai as rai import ml2rt import numpy as np import tensorflow as tf from skimage import io import json con = rai.Client() #model = ml2rt.load_model('reference_model/3/resnet50.pb') filepath = 'data/guitar.jpg' numpy_img = io.imread(filepath).astype(dtype=np.float32) numpy_img = np.expand_dims(numpy_img, axis=0) / 255 print(numpy_img) #con.modelset('model',rai.Backend.tf, rai.Device.cpu, input=['images'], output=['output'], data=model) con.tensorset('images', numpy_img) con.modelrun('model', input=['images'], output=['output']) ret = con.tensorget('output', as_type=rai.BlobTensor).to_numpy() classes_idx = json.load(open('data/imagenet_classes.json')) ind = ret.argmax() print(ind, ret.shape) print(classes_idx[str(ind - 1)])
type=str, default='redis://127.0.0.1:6379') args = parser.parse_args() # Set up some vars # input_stream_key = '{}:{}'.format(args.camera_prefix, args.camera_id) # Input video stream key name # initialized_key = '{}:initialized'.format(input_stream_key) device = rai.Device.gpu pt_model_path = 'canny.pt' # script_path = '../models/pytorch/imagenet/data_processing_script.txt' # Set up Redis connection url = urlparse(args.url) # conn = redis.Redis(host=url.hostname, port=url.port) conn = rai.Client(host=url.hostname, port=url.port) if not conn.ping(): raise Exception('Redis unavailable') # Load the RedisAI model print('Loading model - ', end='') pt_model = ml2rt.load_model(pt_model_path) # script = ml2rt.load_script(script_path) out1 = conn.modelset('canny_model', rai.Backend.torch, device, pt_model) # out2 = conn.scriptset('canny_script', device, script) # Load the gear print('Loading gear - ', end='') with open('canny_gear.py', 'rb') as f: gear = f.read()
def __init__(self, host='localhost', port=6379, db=0): self.max_len = 10 self.con = rai.Client(host=host, port=port, db=db)