def addTensors(x):
    tensors = redisAI.mgetTensorsFromKeyspace(['tensor_a'])
    tensors.append(redisAI.getTensorFromKey('tensor_b'))
    log(str(redisAI.tensorGetDims(tensors[0])))
    log(str(redisAI.tensorGetDims(tensors[1])))
    scriptRunner = redisAI.createScriptRunner('my_script', 'concat_tensors')
    redisAI.scriptRunnerAddInputList(scriptRunner, tensors)
    redisAI.scriptRunnerAddOutput(scriptRunner)
    script_reply = redisAI.scriptRunnerRun(scriptRunner)
    redisAI.setTensorInKey('script_reply', script_reply[0])
    redisAI.msetTensorsInKeyspace({'script_reply_1': script_reply[0]})
Ejemplo n.º 2
0
def is_fraud(record):
    # Retrieve tensors from keyspace
    # Range query with limit 100. (Without limit it can return 100-150K results which reduce performance)
    ref_data_keys = execute("ZRANGEBYSCORE", "references", record[1],
                            record[2], "LIMIT", "0", "100")
    # Set "_tensor" suffix for every returned key
    keys = [x + "_tensor" for x in ref_data_keys]
    # Append the new transaction tensor key
    keys.append(record[3])
    # Do mgetTensors from the keyspace
    tensors = redisAI.mgetTensorsFromKeyspace(keys)

    # Take the reference data tensors and the sample data
    ref_data = tensors[:len(tensors) - 2]
    new_sample = tensors[len(tensors) - 1]

    # Create a new reference tensor out the of the reference data from the keyspace, with a torch script
    scriptRunner = redisAI.createScriptRunner('concat_script',
                                              'concat_tensors')
    redisAI.scriptRunnerAddInputList(scriptRunner, ref_data)
    redisAI.scriptRunnerAddOutput(scriptRunner)

    # Run two models over the reference data and the transaction
    ref_data = redisAI.scriptRunnerRun(scriptRunner)[0]
    modelRunner = redisAI.createModelRunner('model_1')
    redisAI.modelRunnerAddInput(modelRunner, 'transaction', new_sample)
    redisAI.modelRunnerAddInput(modelRunner, 'reference', ref_data)
    redisAI.modelRunnerAddOutput(modelRunner, 'output')
    output_1 = redisAI.modelRunnerRun(modelRunner)[0]
    modelRunner = redisAI.createModelRunner('model_2')
    redisAI.modelRunnerAddInput(modelRunner, 'transaction', new_sample)
    redisAI.modelRunnerAddInput(modelRunner, 'reference', ref_data)
    redisAI.modelRunnerAddOutput(modelRunner, 'output')
    output_2 = redisAI.modelRunnerRun(modelRunner)[0]

    # Average the results with numpy and set in keyspace
    shape = redisAI.tensorGetDims(output_1)
    reply_ndarray_0 = np.frombuffer(redisAI.tensorGetDataAsBlob(output_1),
                                    dtype=np.float32).reshape(shape)
    reply_ndarray_1 = np.frombuffer(redisAI.tensorGetDataAsBlob(output_2),
                                    dtype=np.float32).reshape(shape)
    res = (reply_ndarray_0 + reply_ndarray_1) / 2.0
    output = redisAI.createTensorFromBlob('FLOAT', res.shape, res.tobytes())
    redisAI.setTensorInKey('model_result', output)
Ejemplo n.º 3
0
def runYolo(x):
    ''' Runs the model on an input image from the stream '''
    global prf
    IMG_SIZE = 416  # Model's input image size
    prf.start()  # Start a new profiler iteration

    # log('read')

    # Read the image from the stream's message
    buf = io.BytesIO(x['image'])
    pil_image = Image.open(buf)
    numpy_img = np.array(pil_image)
    prf.add('read')

    # log('resize')
    # Resize, normalize and tensorize the image for the model (number of images, width, height, channels)
    image = process_image(numpy_img, IMG_SIZE)
    # log('tensor')
    img_ba = bytearray(image.tobytes())
    image_tensor = redisAI.createTensorFromBlob('FLOAT',
                                                [1, IMG_SIZE, IMG_SIZE, 3],
                                                img_ba)
    prf.add('resize')

    # log('model')
    # Create the RedisAI model runner and run it
    modelRunner = redisAI.createModelRunner('yolo:model')
    redisAI.modelRunnerAddInput(modelRunner, 'input', image_tensor)
    redisAI.modelRunnerAddOutput(modelRunner, 'output')
    model_replies = redisAI.modelRunnerRun(modelRunner)
    model_output = model_replies[0]
    prf.add('model')

    # log('script')
    # The model's output is processed with a PyTorch script for non maxima suppression
    scriptRunner = redisAI.createScriptRunner('yolo:script', 'boxes_from_tf')
    redisAI.scriptRunnerAddInput(scriptRunner, model_output)
    redisAI.scriptRunnerAddOutput(scriptRunner)
    script_reply = redisAI.scriptRunnerRun(scriptRunner)
    prf.add('script')

    # log('boxes')
    # The script outputs bounding boxes
    shape = redisAI.tensorGetDims(script_reply)
    buf = redisAI.tensorGetDataAsBlob(script_reply)
    boxes = np.frombuffer(buf, dtype=np.float32).reshape(shape)

    # Iterate boxes to extract the people
    ratio = float(IMG_SIZE) / max(pil_image.width,
                                  pil_image.height)  # ratio = old / new
    pad_x = (IMG_SIZE - pil_image.width * ratio) / 2  # Width padding
    pad_y = (IMG_SIZE - pil_image.height * ratio) / 2  # Height padding
    boxes_out = []
    people_count = 0
    for box in boxes[0]:
        if box[4] == 0.0:  # Remove zero-confidence detections
            continue
        if box[-1] != 14:  # Ignore detections that aren't people
            continue
        people_count += 1

        # Descale bounding box coordinates back to original image size
        x1 = (IMG_SIZE * (box[0] - 0.5 * box[2]) - pad_x) / ratio
        y1 = (IMG_SIZE * (box[1] - 0.5 * box[3]) - pad_y) / ratio
        x2 = (IMG_SIZE * (box[0] + 0.5 * box[2]) - pad_x) / ratio
        y2 = (IMG_SIZE * (box[1] + 0.5 * box[3]) - pad_y) / ratio

        # Store boxes as a flat list
        boxes_out += [x1, y1, x2, y2]
    prf.add('boxes')

    return x['streamId'], people_count, boxes_out