async def addToGraphRunner(x): x = x['value'] try: xlog('addToGraphRunner:', 'count=', x['count']) # converting the image to matrix of colors data = io.BytesIO(x['img']) dataM = imageio.imread(data).astype(dtype='float32') newImg = (cv2.resize(dataM, (224, 224)) / 128) - 1 l = numpy.asarray(newImg, dtype=numpy.float32) img_ba = bytearray(l.tobytes()) # converting the matrix color to Tensor v1 = redisAI.createTensorFromBlob('FLOAT', [1, 224, 224, 3], img_ba) # creating the graph runner, 'g1' is the key in redis on which the graph is located graphRunner = redisAI.createModelRunner('mobilenet:model') redisAI.modelRunnerAddInput(graphRunner, 'input', v1) redisAI.modelRunnerAddOutput(graphRunner, 'MobilenetV2/Predictions/Reshape_1') # run the graph and translate the result to python list # running the model ASYNCHRONOUSLY so the server is not blocked res = await redisAI.modelRunnerRunAsync(graphRunner) res = redisAI.tensorToFlatList(res[0]) # extract the animal name res1 = sorted(res, reverse=True) animal = index[str(res.index(res1[0]) - 1)][1] xlog('addToGraphRunner:', 'animal=', animal) return (animal, x['img']) except: xlog('addToGraphRunner: error:', sys.exc_info()[0])
def addToGraphRunner(x): # converting the image to matrix of colors data = io.BytesIO(x['img']) dataM = imageio.imread(data).astype(dtype='float32') newImg = (cv2.resize(dataM, (224, 224)) / 128) - 1 l = numpy.asarray(newImg, dtype=numpy.float32) img_ba = bytearray(l.tobytes()) # converting the matrix color to Tensor v1 = redisAI.createTensorFromBlob('FLOAT', [1, 224, 224, 3], img_ba) # creating the graph runner, 'g1' is the key in redis on which the graph is located graphRunner = redisAI.createModelRunner('mobilenet:model') redisAI.modelRunnerAddInput(graphRunner, 'input', v1) redisAI.modelRunnerAddOutput(graphRunner, 'MobilenetV2/Predictions/Reshape_1') # run the graph and translate the result to python list res = redisAI.tensorToFlatList(redisAI.modelRunnerRun(graphRunner)[0]) # extract the animal name res1 = sorted(res, reverse=True) return (index[str(res.index(res1[0]) - 1)][1], x['img'])
def execute_model(i, transaction_tensor, reference_tensor): modelRunner = redisAI.createModelRunner('model_'+str(i)) redisAI.modelRunnerAddInput(modelRunner, 'transaction', transaction_tensor) redisAI.modelRunnerAddInput(modelRunner, 'reference', reference_tensor) redisAI.modelRunnerAddOutput(modelRunner, 'output') model_replies = redisAI.modelRunnerRun(modelRunner) return model_replies[0]
async def qa(record): log("Called with " + str(record)) log("Key " + str(record[1])) log("Question " + str(record[2])) global tokenizer import redisAI import numpy as np sentence_key = record[1] question = record[2] hash_tag = "{%s}" % hashtag() log("Shard_id " + hash_tag) if not tokenizer: tokenizer = loadTokeniser() token_key = f"tokenized:bert:qa:{sentence_key}" input_ids_question = tokenizer.encode(question, add_special_tokens=True, truncation=True, return_tensors="np") t = redisAI.getTensorFromKey(token_key) input_ids_context = to_np(t, np.int64) input_ids = np.append(input_ids_question, input_ids_context) attention_mask = np.array([[1] * len(input_ids)]) input_idss = np.array([input_ids]) num_seg_a = input_ids_question.shape[1] num_seg_b = input_ids_context.shape[0] token_type_ids = np.array([0] * num_seg_a + [1] * num_seg_b) modelRunner = redisAI.createModelRunner(f'bert-qa{hash_tag}') input_idss_ts = redisAI.createTensorFromBlob('INT64', input_idss.shape, input_idss.tobytes()) attention_mask_ts = redisAI.createTensorFromBlob('INT64', attention_mask.shape, attention_mask.tobytes()) token_type_ids_ts = redisAI.createTensorFromBlob('INT64', token_type_ids.shape, token_type_ids.tobytes()) redisAI.modelRunnerAddInput(modelRunner, 'input_ids', input_idss_ts) redisAI.modelRunnerAddInput(modelRunner, 'attention_mask', attention_mask_ts) redisAI.modelRunnerAddInput(modelRunner, 'token_type_ids', token_type_ids_ts) redisAI.modelRunnerAddOutput(modelRunner, 'answer_start_scores') redisAI.modelRunnerAddOutput(modelRunner, 'answer_end_scores') res = await redisAI.modelRunnerRunAsync(modelRunner) answer_start_scores = to_np(res[0], np.float32) answer_end_scores = to_np(res[1], np.float32) answer_start = np.argmax(answer_start_scores) answer_end = np.argmax(answer_end_scores) + 1 answer = tokenizer.convert_tokens_to_string( tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end], skip_special_tokens=True)) log("Answer " + str(answer)) return answer
def runModel(x): print(x['text']) sample = vectorizer.transform( [x['text']]).toarray() ba = np.asarray(sample, dtype=np.float32) modelRunner = rai.createModelRunner('sklmodel') rai.modelRunnerAddInput(modelRunner, 'float_input', ba) rai.modelRunnerAddOutput(modelRunner, 'output_label') rai.modelRunnerAddOutput(modelRunner, 'output_probability') model_replies = rai.modelRunnerRun(modelRunner) # ERROR: type 0 is not supported in this function # model_output = model_replies[0] print("runModel output...") print(str(model_replies)) # = None
def is_fraud(record): # Retrieve tensors from keyspace # Range query with limit 100. (Without limit it can return 100-150K results which reduce performance) ref_data_keys = execute("ZRANGEBYSCORE", "references", record[1], record[2], "LIMIT", "0", "100") # Set "_tensor" suffix for every returned key keys = [x + "_tensor" for x in ref_data_keys] # Append the new transaction tensor key keys.append(record[3]) # Do mgetTensors from the keyspace tensors = redisAI.mgetTensorsFromKeyspace(keys) # Take the reference data tensors and the sample data ref_data = tensors[:len(tensors) - 2] new_sample = tensors[len(tensors) - 1] # Create a new reference tensor out the of the reference data from the keyspace, with a torch script scriptRunner = redisAI.createScriptRunner('concat_script', 'concat_tensors') redisAI.scriptRunnerAddInputList(scriptRunner, ref_data) redisAI.scriptRunnerAddOutput(scriptRunner) # Run two models over the reference data and the transaction ref_data = redisAI.scriptRunnerRun(scriptRunner)[0] modelRunner = redisAI.createModelRunner('model_1') redisAI.modelRunnerAddInput(modelRunner, 'transaction', new_sample) redisAI.modelRunnerAddInput(modelRunner, 'reference', ref_data) redisAI.modelRunnerAddOutput(modelRunner, 'output') output_1 = redisAI.modelRunnerRun(modelRunner)[0] modelRunner = redisAI.createModelRunner('model_2') redisAI.modelRunnerAddInput(modelRunner, 'transaction', new_sample) redisAI.modelRunnerAddInput(modelRunner, 'reference', ref_data) redisAI.modelRunnerAddOutput(modelRunner, 'output') output_2 = redisAI.modelRunnerRun(modelRunner)[0] # Average the results with numpy and set in keyspace shape = redisAI.tensorGetDims(output_1) reply_ndarray_0 = np.frombuffer(redisAI.tensorGetDataAsBlob(output_1), dtype=np.float32).reshape(shape) reply_ndarray_1 = np.frombuffer(redisAI.tensorGetDataAsBlob(output_2), dtype=np.float32).reshape(shape) res = (reply_ndarray_0 + reply_ndarray_1) / 2.0 output = redisAI.createTensorFromBlob('FLOAT', res.shape, res.tobytes()) redisAI.setTensorInKey('model_result', output)
def predictImage(x): try: detectedProbability = np.array([]) detectedClasses = np.array([]) bloburl = '' if x['value']['image']: image_data = io.BytesIO(x['value']['image']) image = Image.open(image_data) numpy_img = np.array(image) resize_img = cv2.resize(numpy_img, (320, 320), interpolation=cv2.INTER_LINEAR) inputs = np.array(resize_img, dtype=np.float32)[np.newaxis, :, :, :] img_ba = bytearray(inputs.tobytes()) v1 = redisAI.createTensorFromBlob('FLOAT', [1, 320, 320, 3], img_ba) graphRunner = redisAI.createModelRunner('customvisionmodel') redisAI.modelRunnerAddInput(graphRunner, 'image_tensor', v1) redisAI.modelRunnerAddOutput(graphRunner, 'detected_boxes') redisAI.modelRunnerAddOutput(graphRunner, 'detected_scores') redisAI.modelRunnerAddOutput(graphRunner, 'detected_classes') res = redisAI.modelRunnerRun(graphRunner) res1 = redisAI.tensorToFlatList(res[0]) res2 = redisAI.tensorToFlatList(res[1]) res3 = redisAI.tensorToFlatList(res[2]) redisgears.executeCommand('xadd', 'result1', '*', 'text', res1) redisgears.executeCommand('xadd', 'result2', '*', 'text', res2) redisgears.executeCommand('xadd', 'result3', '*', 'text', res3) deleteLowProbResult = [] for idx, prediction in enumerate(res2): if (prediction < 0.5): deleteLowProbResult.append(idx) array_2d_rowcount = int(len(res1) / 4) arr_2d = np.reshape(res1, (array_2d_rowcount, 4)) detectedBoxes = np.delete(arr_2d, deleteLowProbResult, axis=0) detectedProbability = np.delete(res2, deleteLowProbResult) detectedClasses = np.delete(res3, deleteLowProbResult) imagename = x['value']['imagename'] connectionString = getSecret("azure_blob_secret") blob = BlobClient.from_connection_string( conn_str=connectionString, container_name=ContainerName, blob_name=imagename) add_boxes_to_images(image, detectedBoxes, detectedClasses, blob) bloburl = getBlobUrl(imagename, connectionString) weatherCondition = x['value']['weather'] windSpeed = x['value']['windSpeed'] isDone = x['value']['isDone'] return detectedProbability, detectedClasses, bloburl, weatherCondition, windSpeed, isDone except: xlog('Predict_image: error:', sys.exc_info())
def runYolo(x): ''' Runs the model on an input image using RedisAI ''' IMG_SIZE = 736 # Model's input size # Read the image from the stream's message buf = io.BytesIO(x['image']) pil_image = Image.open(buf) numpy_img = np.array(pil_image) image = process_image(numpy_img, IMG_SIZE) # Prepare the image and shape tensors as model inputs image_tensor = redisAI.createTensorFromBlob('FLOAT', [1, IMG_SIZE, IMG_SIZE, 3], image.tobytes()) shape_tensor = redisAI.createTensorFromValues('FLOAT', [2], [IMG_SIZE, IMG_SIZE]) # Create yolo's RedisAI model runner and run it modelRunner = redisAI.createModelRunner('yolo:model') redisAI.modelRunnerAddInput(modelRunner, 'input_1', image_tensor) redisAI.modelRunnerAddInput(modelRunner, 'input_image_shape', shape_tensor) redisAI.modelRunnerAddOutput(modelRunner, 'concat_13') redisAI.modelRunnerAddOutput(modelRunner, 'concat_12') redisAI.modelRunnerAddOutput(modelRunner, 'concat_11') model_reply = redisAI.modelRunnerRun(modelRunner) # Get the model's outputs classes_tensor = model_reply[0] shape = redisAI.tensorGetDims(classes_tensor) buf = redisAI.tensorGetDataAsBlob(classes_tensor) classes = np.frombuffer(buf, dtype=np.float32).reshape(shape) boxes_tensor = model_reply[2] shape = redisAI.tensorGetDims(boxes_tensor) buf = redisAI.tensorGetDataAsBlob(boxes_tensor) boxes = np.frombuffer(buf, dtype=np.float32).reshape(shape) # Extract the people boxes boxes_out = [] people_count = 0 ratio = float(IMG_SIZE) / max(pil_image.width, pil_image.height) # ratio = old / new pad_x = (IMG_SIZE - pil_image.width * ratio) / 2 # Width padding pad_y = (IMG_SIZE - pil_image.height * ratio) / 2 # Height padding for ind, class_val in enumerate(classes): if class_val == 0: # 0 is people people_count += 1 # Descale coordinates back to original image size top, left, bottom, right = boxes[ind] x1 = (left - pad_x) / ratio x2 = (right - pad_x) / ratio y1 = (top - pad_y) / ratio y2 = (bottom - pad_y) / ratio # Store boxes as a flat list boxes_out += [x1, y1, x2, y2] return x['streamId'], people_count, boxes_out
async def qa(record): log("Called with " + str(record)) log("Trigger " + str(record[0])) log("Key " + str(record[1])) log("Question " + str(record[2])) global tokenizer import redisAI import numpy as np sentence_key = record[1] question = record[2] hash_tag = "{%s}" % hashtag() log("Shard_id " + hash_tag) if not tokenizer: tokenizer = loadTokeniser() token_key = f"tokenized:bert:qa:{sentence_key}" input_ids_question = tokenizer.encode(question, add_special_tokens=True, truncation=True, return_tensors="np") log("Input ids question " + str(input_ids_question)) log("Input ids question shape" + str(input_ids_question.shape)) log("Input ids question shape" + str(input_ids_question.dtype)) t = redisAI.getTensorFromKey(token_key) input_ids_context = np.frombuffer(redisAI.tensorGetDataAsBlob(t), dtype=np.int64).reshape( redisAI.tensorGetDims(t)) log("Input ids context " + str(input_ids_context)) log("Input ids content shape " + str(input_ids_context.shape)) log("Input ids content dtype " + str(input_ids_context.dtype)) input_ids = np.append(input_ids_question, input_ids_context) log("Combined input_ids shape" + str(input_ids.shape)) attention_mask = np.array([[1] * len(input_ids)]) input_idss = np.array([input_ids]) log("input ids " + str(input_idss.shape)) log("Attention mask shape " + str(attention_mask.shape)) num_seg_a = input_ids_question.shape[1] log(str(num_seg_a)) num_seg_b = input_ids_context.shape[0] # num_seg_b=redisAI.tensorGetDims(input_ids_context)[0] log("Tensor get dims " + str(num_seg_b)) token_type_ids = np.array([0] * num_seg_a + [1] * num_seg_b) log("Segments id " + str(token_type_ids.shape)) modelRunner = redisAI.createModelRunner(f'bert-qa{hash_tag}') input_idss_ts = redisAI.createTensorFromBlob('INT64', input_idss.shape, input_idss.tobytes()) attention_mask_ts = redisAI.createTensorFromBlob('INT64', attention_mask.shape, attention_mask.tobytes()) token_type_ids_ts = redisAI.createTensorFromBlob('INT64', token_type_ids.shape, token_type_ids.tobytes()) redisAI.modelRunnerAddInput(modelRunner, 'input_ids', input_idss_ts) redisAI.modelRunnerAddInput(modelRunner, 'attention_mask', attention_mask_ts) redisAI.modelRunnerAddInput(modelRunner, 'token_type_ids', token_type_ids_ts) redisAI.modelRunnerAddOutput(modelRunner, 'answer_start_scores') redisAI.modelRunnerAddOutput(modelRunner, 'answer_end_scores') res = await redisAI.modelRunnerRunAsync(modelRunner) # redisAI.setTensorInKey('c{1}', res[0]) log(str(res[0])) log("answer end" + str(res[1])) log(f"Model run on {hash_tag}") answer_start_scores = np.frombuffer(redisAI.tensorGetDataAsBlob(res[0]), dtype=np.float32).reshape( redisAI.tensorGetDims(res[0])) # answer_start_scores = res[0] answer_end_scores = np.frombuffer(redisAI.tensorGetDataAsBlob(res[1]), dtype=np.float32).reshape( redisAI.tensorGetDims(res[1])) log("Answer start scores type " + str(type(answer_start_scores))) answer_start = np.argmax(answer_start_scores) answer_end = np.argmax(answer_end_scores) + 1 log("Answer start " + str(answer_start)) log("Answer end " + str(answer_end)) answer = tokenizer.convert_tokens_to_string( tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end], skip_special_tokens=True)) return answer
def runCanny(x): ''' Runs the model on an input image from the stream ''' global prf IMG_SIZE = 416 # Model's input image size # log('read') # Read the image from the stream's message buf = io.BytesIO(x['image']) pil_image = Image.open(buf) image = np.array(pil_image).transpose((2, 0, 1)) / 255. # log('resize') # Resize, normalize and tensorize the image for the model (number of images, width, height, channels) # log('tensor') img_ba = bytearray(image.tobytes()) image_tensor = redisAI.createTensorFromBlob('FLOAT', [1, 480, 640, 3], img_ba) # log('model') # Create the RedisAI model runner and run it modelRunner = redisAI.createModelRunner('canny:model') redisAI.modelRunnerAddInput(modelRunner, 'input', image_tensor) redisAI.modelRunnerAddOutput(modelRunner, 'output') model_replies = redisAI.modelRunnerRun(modelRunner) # model_output = model_replies[0] shape = redisAI.tensorGetDims(model_replies) buf = redisAI.tensorGetDataAsBlob(model_replies) edges = np.frombuffer(buf, dtype=np.float32).reshape(shape) # log('script') # The model's output is processed with a PyTorch script for non maxima suppression # scriptRunner = redisAI.createScriptRunner('yolo:script', 'boxes_from_tf') # redisAI.scriptRunnerAddInput(scriptRunner, model_output) # redisAI.scriptRunnerAddOutput(scriptRunner) # script_reply = redisAI.scriptRunnerRun(scriptRunner) # prf.add('script') # log('boxes') # The script outputs bounding boxes # shape = redisAI.tensorGetDims(script_reply) # buf = redisAI.tensorGetDataAsBlob(script_reply) # boxes = np.frombuffer(buf, dtype=np.float32).reshape(shape) # Iterate boxes to extract the people # ratio = float(IMG_SIZE) / max(pil_image.width, pil_image.height) # ratio = old / new # pad_x = (IMG_SIZE - pil_image.width * ratio) / 2 # Width padding # pad_y = (IMG_SIZE - pil_image.height * ratio) / 2 # Height padding # boxes_out = [] # people_count = 0 # for box in boxes[0]: # if box[4] == 0.0: # Remove zero-confidence detections # continue # if box[-1] != 14: # Ignore detections that aren't people # continue # people_count += 1 # # # Descale bounding box coordinates back to original image size # x1 = (IMG_SIZE * (box[0] - 0.5 * box[2]) - pad_x) / ratio # y1 = (IMG_SIZE * (box[1] - 0.5 * box[3]) - pad_y) / ratio # x2 = (IMG_SIZE * (box[0] + 0.5 * box[2]) - pad_x) / ratio # y2 = (IMG_SIZE * (box[1] + 0.5 * box[3]) - pad_y) / ratio # # # Store boxes as a flat list # boxes_out += [x1,y1,x2,y2] # prf.add('boxes') return x['streamId'], edges
def runYolo(x): ''' Runs the model on an input image from the stream ''' global prf IMG_SIZE = 416 # Model's input image size prf.start() # Start a new profiler iteration # log('read') # Read the image from the stream's message buf = io.BytesIO(x['image']) pil_image = Image.open(buf) numpy_img = np.array(pil_image) prf.add('read') # log('resize') # Resize, normalize and tensorize the image for the model (number of images, width, height, channels) image = process_image(numpy_img, IMG_SIZE) # log('tensor') img_ba = bytearray(image.tobytes()) image_tensor = redisAI.createTensorFromBlob('FLOAT', [1, IMG_SIZE, IMG_SIZE, 3], img_ba) prf.add('resize') # log('model') # Create the RedisAI model runner and run it modelRunner = redisAI.createModelRunner('yolo:model') redisAI.modelRunnerAddInput(modelRunner, 'input', image_tensor) redisAI.modelRunnerAddOutput(modelRunner, 'output') model_replies = redisAI.modelRunnerRun(modelRunner) model_output = model_replies[0] prf.add('model') # log('script') # The model's output is processed with a PyTorch script for non maxima suppression scriptRunner = redisAI.createScriptRunner('yolo:script', 'boxes_from_tf') redisAI.scriptRunnerAddInput(scriptRunner, model_output) redisAI.scriptRunnerAddOutput(scriptRunner) script_reply = redisAI.scriptRunnerRun(scriptRunner) prf.add('script') # log('boxes') # The script outputs bounding boxes shape = redisAI.tensorGetDims(script_reply) buf = redisAI.tensorGetDataAsBlob(script_reply) boxes = np.frombuffer(buf, dtype=np.float32).reshape(shape) # Iterate boxes to extract the people ratio = float(IMG_SIZE) / max(pil_image.width, pil_image.height) # ratio = old / new pad_x = (IMG_SIZE - pil_image.width * ratio) / 2 # Width padding pad_y = (IMG_SIZE - pil_image.height * ratio) / 2 # Height padding boxes_out = [] people_count = 0 for box in boxes[0]: if box[4] == 0.0: # Remove zero-confidence detections continue if box[-1] != 14: # Ignore detections that aren't people continue people_count += 1 # Descale bounding box coordinates back to original image size x1 = (IMG_SIZE * (box[0] - 0.5 * box[2]) - pad_x) / ratio y1 = (IMG_SIZE * (box[1] - 0.5 * box[3]) - pad_y) / ratio x2 = (IMG_SIZE * (box[0] + 0.5 * box[2]) - pad_x) / ratio y2 = (IMG_SIZE * (box[1] + 0.5 * box[3]) - pad_y) / ratio # Store boxes as a flat list boxes_out += [x1, y1, x2, y2] prf.add('boxes') return x['streamId'], people_count, boxes_out