Beispiel #1
0
def _main():
    context = PredictContext(FLAGS.model_name, url=FLAGS.url, protocol=FLAGS.protocol, verbose=True)
    model_config = context.model_config

    input_name, output_name, c, h, w, data_format, dtype = _parse_model(
        model_config, FLAGS.model_name, FLAGS.batch_size)

    file_names, image_data = _gen_input_data(data_format, dtype, c, h, w)

    cur_idx = 0
    num_of_images = len(image_data)

    def _next_batch(batch_size):
        nonlocal cur_idx
        if cur_idx + batch_size <= num_of_images:
            inputs = image_data[cur_idx:cur_idx + batch_size]
            outputs = file_names[cur_idx:cur_idx + batch_size]
            cur_idx = (cur_idx + batch_size) % num_of_images
        else:
            image_idx = cur_idx
            cur_idx = 0
            next_inputs, next_outputs = _next_batch(batch_size - (num_of_images - image_idx))
            inputs = image_data[image_idx:] + next_inputs
            outputs = file_names[image_idx:] + next_outputs

        return inputs, outputs

    num_of_batches = 99
    for _ in range(num_of_batches):
        i_inputs, i_outputs = _next_batch(FLAGS.batch_size)
        context.run(inputs={input_name: i_inputs},
                    outputs={output_name: FLAGS.classes},
                    batch_size=FLAGS.batch_size)
Beispiel #2
0
def _main():
    context = PredictContext(FLAGS.model_name,
                             url=FLAGS.url,
                             protocol=FLAGS.protocol,
                             verbose=True)
    image_data = []
    image = np.load('./input_0.npy')
    image_data.append(image)

    input_name = context.model_config.input[0].name
    output_name = 'save_infer_model/scale_0.tmp_1'

    cur_idx = 0
    num_of_images = len(image_data)

    def _next_batch(batch_size):
        nonlocal cur_idx
        if cur_idx + batch_size <= num_of_images:
            inputs = image_data[cur_idx:cur_idx + batch_size]
            cur_idx = (cur_idx + batch_size) % num_of_images
        else:
            image_idx = cur_idx
            cur_idx = 0
            next_inputs = _next_batch(batch_size - (num_of_images - image_idx))
            inputs = image_data[image_idx:] + next_inputs

        return inputs

    num_of_batches = 100

    for _ in range(num_of_batches):
        i_inputs = _next_batch(FLAGS.batch_size)
        context.run(inputs={input_name: i_inputs},
                    outputs=[output_name],
                    batch_size=FLAGS.batch_size)
Beispiel #3
0
def _main():
    context = PredictContext(FLAGS.model_name,
                             url=FLAGS.url,
                             protocol=FLAGS.protocol,
                             verbose=True)
    model_config = context.model_config

    input_name, output_name, c, h, w, data_format, dtype = _parse_model(
        model_config, FLAGS.model_name, FLAGS.batch_size)

    file_names, image_data = _gen_input_data(data_format, dtype, c, h, w)

    cur_idx = 0
    num_of_images = len(image_data)

    def _next_batch(batch_size):
        nonlocal cur_idx
        if cur_idx + batch_size <= num_of_images:
            inputs = image_data[cur_idx:cur_idx + batch_size]
            outputs = file_names[cur_idx:cur_idx + batch_size]
            cur_idx = (cur_idx + batch_size) % num_of_images
        else:
            image_idx = cur_idx
            cur_idx = 0
            next_inputs, next_outputs = _next_batch(batch_size -
                                                    (num_of_images -
                                                     image_idx))
            inputs = image_data[image_idx:] + next_inputs
            outputs = file_names[image_idx:] + next_outputs

        return inputs, outputs

    num_of_batches = 99
    if num_of_images % FLAGS.batch_size != 0:
        num_of_batches += 1
    logging.basicConfig(
        level=logging.DEBUG,
        filename=os.path.join(FLAGS.log_path, 'client_time.log'),
        filemode='a',
        format=
        '%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'
    )
    for _ in range(num_of_batches):
        i_inputs, i_outputs = _next_batch(FLAGS.batch_size)
        time1 = time.time()
        context.run(inputs={input_name: i_inputs},
                    outputs={output_name: FLAGS.classes},
                    batch_size=FLAGS.batch_size)
        logging.info(f'The time of predict: {time.time() - time1}')
        print(f'{_} / {num_of_batches}')
Beispiel #4
0
def _test_connect_of_serving(args):
    try:
        PredictContext(os.getenv('MODEL_NAME'),
                       url=args.url,
                       protocol=args.protocol,
                       verbose=True)
        return True
    except _InactiveRpcError:
        return False
Beispiel #5
0
def _main():
    context = PredictContext(FLAGS.model_name,
                             url=FLAGS.url,
                             protocol=FLAGS.protocol,
                             verbose=True)
    model_config = context.model_config

    input_name, output_name = _parse_model(model_config, FLAGS.model_name,
                                           FLAGS.batch_size)

    file_names, image_data = _gen_input_data()

    cur_idx = 0
    num_of_images = len(image_data)

    labels = open(FLAGS.label_file).read().strip().split('\n')

    def _next_batch(batch_size):
        nonlocal cur_idx
        if cur_idx + batch_size <= num_of_images:
            inputs = image_data[cur_idx:cur_idx + batch_size]
            outputs = file_names[cur_idx:cur_idx + batch_size]
            cur_idx = (cur_idx + batch_size) % num_of_images
        else:
            image_idx = cur_idx
            cur_idx = 0
            next_inputs, next_outputs = _next_batch(batch_size -
                                                    (num_of_images -
                                                     image_idx))
            inputs = image_data[image_idx:] + next_inputs
            outputs = file_names[image_idx:] + next_outputs

        return inputs, outputs

    num_of_batches = num_of_images // FLAGS.batch_size
    if num_of_images % FLAGS.batch_size != 0:
        num_of_batches += 1

    for _ in range(num_of_batches):
        i_inputs, i_outputs = _next_batch(FLAGS.batch_size)
        result = context.run(inputs={input_name: i_inputs},
                             outputs={output_name: FLAGS.classes},
                             batch_size=FLAGS.batch_size)
        _postprocess(result[output_name], i_outputs, FLAGS.batch_size, labels)
Beispiel #6
0
def _main():
    context = PredictContext(FLAGS.model_name, url=FLAGS.url, protocol=FLAGS.protocol, verbose=True)
    image_data = np.load('./ppyolo_inputs/input_0.npy')
    input_size = np.load('./ppyolo_inputs/input_1.npy')

    input_name1 = context.model_config.input[0].name
    input_name2 = context.model_config.input[1].name
    output_name1 = context.model_config.output[0].name
    output_name2 = context.model_config.output[1].name

    cur_idx = 0
    num_of_images = len(image_data)

    def _next_batch(batch_size):
        nonlocal cur_idx
        if cur_idx + batch_size <= num_of_images:
            inputs_1 = image_data[cur_idx:cur_idx + batch_size]
            inputs_2 = input_size[cur_idx:cur_idx + batch_size]
            cur_idx = (cur_idx + batch_size) % num_of_images
        else:
            image_idx = cur_idx
            cur_idx = 0
            next_inputs_1, next_inputs_2 = _next_batch(batch_size - (num_of_images - image_idx))
            inputs_1 = image_data[image_idx:] + next_inputs_1
            inputs_2 = input_size[image_idx:] + next_inputs_2

        return inputs_1, inputs_2

    num_of_batches = 1
    if num_of_images % FLAGS.batch_size != 0:
        num_of_batches += 1

    for _ in range(num_of_batches):
        i_inputs_1, i_inputs_2 = _next_batch(FLAGS.batch_size)
        context.run(inputs={input_name1: i_inputs_1, input_name2: i_inputs_2},
                    outputs=[output_name1, output_name2],
                    batch_size=FLAGS.batch_size)
Beispiel #7
0
    FLAGS = parser.parse_args()
    protocol = FLAGS.protocol

    # We use a simple model that takes 2 input tensors of 16 integers
    # each and returns 2 output tensors of 16 integers each. One
    # output tensor is the element-wise sum of the inputs and one
    # output is the element-wise difference.
    model_name = "plan_float16_float32_float32"
    model_version = 1
    batch_size = 1

    input0_data = np.arange(start=0, stop=16, dtype=np.float16)
    input1_data = np.ones(shape=16, dtype=np.float16)

    context = PredictContext(model_name,
                             model_version=model_version,
                             url=FLAGS.url,
                             protocol=FLAGS.protocol)

    result = context.run(inputs={
        'INPUT0': (input0_data, ),
        'INPUT1': (input1_data, )
    },
                         outputs=['OUTPUT0', 'OUTPUT1'],
                         batch_size=batch_size)
    output0_data = result['OUTPUT0'].tensor[0]
    output1_data = result['OUTPUT1'].tensor[0]

    for i in range(16):
        print(
            str(input0_data[i]) + " + " + str(input1_data[i]) + " = " +
            str(output0_data[i]))
Beispiel #8
0
def _main():
    credentials = None
    if FLAGS.protocol == 'grpc' and FLAGS.credentials is not None:
        with open(FLAGS.credentials, 'rb') as f:
            credentials = f.read()

    context = PredictContext(FLAGS.model_name, model_version=FLAGS.model_version, signature=FLAGS.signature,
                             url=FLAGS.url, protocol=FLAGS.protocol, grpc_domain=FLAGS.grpc_domain,
                             credentials=credentials, verbose=FLAGS.verbose)

    input_name, output_name, c, h, w, data_format, dtype, output_size = parse_model(
        context.model_config, FLAGS.model_name, FLAGS.batch_size)
    print('Input name: {}, output name: {}, (c, h, w)=({}, {}, {}), data_format: {}, dtype: {}, output size: {}'.format(
        input_name, output_name, c, h, w, data_format, dtype, output_size))

    if os.path.isdir(FLAGS.image_filename):
        filenames = [os.path.join(FLAGS.image_filename, f)
                     for f in os.listdir(FLAGS.image_filename)
                     if os.path.isfile(os.path.join(FLAGS.image_filename, f))]
    else:
        filenames = [FLAGS.image_filename, ]

    filenames.sort()

    image_data = []
    for filename in filenames:
        img = Image.open(filename)
        array = preprocess(img, data_format, dtype, c, h, w, FLAGS.scaling)
        image_data.append(array)

    cur_idx = 0
    num_of_images = len(image_data)

    def _next_batch(batch_size):
        nonlocal cur_idx
        if cur_idx + batch_size <= num_of_images:
            inputs = image_data[cur_idx:cur_idx + batch_size]
            outputs = filenames[cur_idx:cur_idx + batch_size]
            cur_idx = (cur_idx + batch_size) % num_of_images
        else:
            image_idx = cur_idx
            cur_idx = 0
            next_inputs, next_outputs = _next_batch(batch_size - (num_of_images - image_idx))
            inputs = image_data[image_idx:] + next_inputs
            outputs = filenames[image_idx:] + next_outputs

        return inputs, outputs

    num_of_batches = num_of_images // FLAGS.batch_size
    if num_of_images % FLAGS.batch_size != 0:
        num_of_batches += 1

    if FLAGS.protocol == 'grpc' and FLAGS.is_async:
        print("async inference ")
        requests = []
        batchOutputs = []
        # async submit input data use the grpc inferface
        for _ in range(num_of_batches):
            i_inputs, i_outputs = _next_batch(FLAGS.batch_size)
            batchOutputs.append(i_outputs)
            requests.append(context.async_run(inputs={input_name: i_inputs},
                                              outputs={output_name: FLAGS.classes},
                                              batch_size=FLAGS.batch_size))
        for i in range(len(requests)):
            response = requests[i].result()
            result = context.process_response({output_name: FLAGS.classes}, response)
            postprocess(result[output_name], batchOutputs[i], FLAGS.batch_size)

    else:
        for _ in range(num_of_batches):
            i_inputs, i_outputs = _next_batch(FLAGS.batch_size)
            result = context.run(inputs={input_name: i_inputs},
                                 outputs={output_name: FLAGS.classes},
                                 batch_size=FLAGS.batch_size)
            postprocess(result[output_name], i_outputs, FLAGS.batch_size)