예제 #1
0
def main(FLAGS):
    if FLAGS.enable_client:
        print("Using client")
    else:
        print("Not using client")

    imagenet_inference_labels = get_imagenet_inference_labels()
    imagenet_training_labels = get_imagenet_training_labels()

    util.VAL_IMAGE_FLAGS = FLAGS

    assert sorted(imagenet_training_labels) == sorted(
        imagenet_inference_labels)

    validation_nums = get_validation_labels(FLAGS)
    validation_labels = imagenet_inference_labels[validation_nums]

    if FLAGS.enable_client:
        # Server input is dummy
        x_test = np.random.rand(FLAGS.batch_size, FLAGS.image_size,
                                FLAGS.image_size, 3)
    else:
        x_test = get_validation_images(FLAGS)

    config = server_config_from_flags(FLAGS, "input")

    sess = tf.compat.v1.Session(config=config)
    graph_def = load_model(FLAGS.model)

    tf.import_graph_def(graph_def, name="")

    input_tensor = sess.graph.get_tensor_by_name("input:0")
    output_tensor = sess.graph.get_tensor_by_name(
        "MobilenetV2/Logits/Conv2d_1c_1x1/BiasAdd:0")

    print("performing inference")
    start_time = time.time()
    y_pred = sess.run(output_tensor, {input_tensor: x_test})
    end_time = time.time()
    runtime = end_time - start_time
    per_image_runtime = runtime / float(FLAGS.batch_size)
    print("performed inference, runtime (s):", np.round(runtime, 2))
    print("runtime per image (s)", np.round(per_image_runtime, 2))
    y_pred = np.squeeze(y_pred)

    if FLAGS.batch_size == 1:
        top5 = y_pred.argsort()[-5:]
    else:
        top5 = np.flip(y_pred.argsort()[:, -5:], axis=1)

    if not FLAGS.enable_client:
        preds = imagenet_training_labels[top5]

        if FLAGS.batch_size < 10:
            print("validation_labels", validation_labels)
            print("validation_labels shape", validation_labels.shape)
            print("preds", preds)
            print("preds shape", preds.shape)

        util.accuracy(preds, validation_labels)
예제 #2
0
def main(FLAGS):
    util.VAL_IMAGE_FLAGS = FLAGS

    imagenet_inference_labels = get_imagenet_inference_labels()
    imagenet_training_labels = get_imagenet_training_labels()
    assert (
        sorted(imagenet_training_labels) == sorted(imagenet_inference_labels))
    validation_nums = get_validation_labels(FLAGS)
    x_test = get_validation_images(FLAGS)
    validation_labels = imagenet_inference_labels[validation_nums]

    if FLAGS.batch_size < 10:
        print('validation_labels', validation_labels)

    (batch_size, width, height, channels) = x_test.shape
    print('batch_size', batch_size)
    print('width', width)
    print('height', height)
    print('channels', channels)

    x_test_flat = x_test.flatten(order='C')
    hostname = 'localhost'
    port = 34000

    if 'NGRAPH_COMPLEX_PACK' in os.environ:
        complex_packing = str2bool(os.environ['NGRAPH_COMPLEX_PACK'])
    else:
        complex_packing = False

    client = pyhe_client.HESealClient(FLAGS.hostname, port, batch_size,
                                      x_test_flat, complex_packing)

    while not client.is_done():
        time.sleep(1)
    results = client.get_results()

    imagenet_labels = get_imagenet_labels()
    results = np.array(results)

    if (FLAGS.batch_size == 1):
        top5 = results.argsort()[-5:]
    else:
        results = np.reshape(results, (FLAGS.batch_size, 1001))
        top5 = np.flip(results.argsort()[:, -5:], axis=1)

    preds = imagenet_labels[top5]
    print('validation_labels', validation_labels)
    print('top5', preds)

    util.accuracy(preds, validation_labels)
예제 #3
0
def main(FLAGS):
    imagenet_inference_labels = get_imagenet_inference_labels()
    imagenet_training_labels = get_imagenet_training_labels()
    assert (
        sorted(imagenet_training_labels) == sorted(imagenet_inference_labels))
    validation_nums = get_validation_labels(FLAGS)
    x_test = get_validation_images(FLAGS)
    validation_labels = imagenet_inference_labels[validation_nums]

    if FLAGS.batch_size < 10:
        print('validation_labels', validation_labels)

    (batch_size, width, height, channels) = x_test.shape
    print('batch_size', batch_size)
    print('width', width)
    print('height', height)
    print('channels', channels)

    x_test_flat = x_test.flatten(order='C')
    port = 34000

    client = pyhe_client.HESealClient(FLAGS.hostname, port, batch_size,
                                      {'input': ('encrypt', x_test_flat)})

    results = client.get_results()

    imagenet_labels = get_imagenet_labels()
    results = np.array(results)

    if (FLAGS.batch_size == 1):
        top5 = results.argsort()[-5:]
    else:
        results = np.reshape(results, (FLAGS.batch_size, 1001))
        top5 = np.flip(results.argsort()[:, -5:], axis=1)

    preds = imagenet_labels[top5]
    print('validation_labels', validation_labels)
    print('top5', preds)

    util.accuracy(preds, validation_labels)
예제 #4
0
def main(FLAGS):

    if FLAGS.enable_client:
        print('Using client')
    else:
        print('Not using client')

    imagenet_inference_labels = get_imagenet_inference_labels()
    imagenet_training_labels = get_imagenet_training_labels()

    util.VAL_IMAGE_FLAGS = FLAGS

    assert (
        sorted(imagenet_training_labels) == sorted(imagenet_inference_labels))

    validation_nums = get_validation_labels(FLAGS)
    validation_labels = imagenet_inference_labels[validation_nums]

    if FLAGS.enable_client:
        # Server input is dummy
        x_test = np.random.rand(FLAGS.batch_size, FLAGS.image_size,
                                FLAGS.image_size, 3)
    else:
        x_test = get_validation_images(FLAGS)

    config = server_config_from_flags(FLAGS, 'input')

    sess = tf.compat.v1.Session(config=config)
    graph_def = load_model(FLAGS.model)

    for node in graph_def.node:
        if 'FusedBatchNorm' in node.name or 'Pow' in node.name:
            print(node)

    #print('node names', [n.name for n in graph_def.node])

    tf.import_graph_def(graph_def, name='')

    input_tensor = sess.graph.get_tensor_by_name('input:0')
    output_tensor = sess.graph.get_tensor_by_name(
        'MobilenetV2/Logits/Conv2d_1c_1x1/BiasAdd:0')

    print('performing inference')
    start_time = time.time()
    y_pred = sess.run(output_tensor, {input_tensor: x_test})
    end_time = time.time()
    runtime = end_time - start_time
    per_image_runtime = runtime / float(FLAGS.batch_size)
    print('performed inference, runtime (s):', np.round(runtime, 2))
    print('runtime per image (s)', np.round(per_image_runtime, 2))
    y_pred = np.squeeze(y_pred)

    if (FLAGS.batch_size == 1):
        top5 = y_pred.argsort()[-5:]
    else:
        top5 = np.flip(y_pred.argsort()[:, -5:], axis=1)

    if not FLAGS.enable_client:
        preds = imagenet_training_labels[top5]

        if FLAGS.batch_size < 10:
            print('validation_labels', validation_labels)
            print('validation_labels shape', validation_labels.shape)
            print('preds', preds)
            print('preds shape', preds.shape)

        util.accuracy(preds, validation_labels)
예제 #5
0
def main(FLAGS):
    using_client = (os.environ.get('NGRAPH_ENABLE_CLIENT') is not None)

    if using_client:
        print('Using client')
    else:
        print('Not using client')

    imagenet_inference_labels = get_imagenet_inference_labels()
    imagenet_training_labels = get_imagenet_training_labels()

    util.VAL_IMAGE_FLAGS = FLAGS

    assert (
        sorted(imagenet_training_labels) == sorted(imagenet_inference_labels))

    if not using_client:
        validation_nums = get_validation_labels(FLAGS)
        x_test = get_validation_images(FLAGS)
        validation_labels = imagenet_inference_labels[validation_nums]
    else:
        x_test = np.random.rand(FLAGS.batch_size, FLAGS.image_size,
                                FLAGS.image_size, 3)

    if FLAGS.ngraph:
        import ngraph_bridge
        print(ngraph_bridge.__version__)

    config = tf.compat.v1.ConfigProto()
    config.intra_op_parallelism_threads = 44
    config.inter_op_parallelism_threads = 44
    if FLAGS.ngraph:
        config = ngraph_bridge.update_config(config)
    sess = tf.compat.v1.Session(config=config)
    graph_def = load_model(FLAGS.model)
    tf.import_graph_def(graph_def, name='')

    input_tensor = sess.graph.get_tensor_by_name('input:0')
    output_tensor = sess.graph.get_tensor_by_name(
        'MobilenetV2/Logits/Conv2d_1c_1x1/BiasAdd:0')

    print('performing inference')
    start_time = time.time()
    y_pred = sess.run(output_tensor, {input_tensor: x_test})
    end_time = time.time()
    runtime = end_time - start_time
    per_image_runtime = runtime / float(FLAGS.batch_size)
    print('performed inference, runtime (s):', np.round(runtime, 2))
    print('runtime per image (s)', np.round(per_image_runtime, 2))
    y_pred = np.squeeze(y_pred)

    if (FLAGS.batch_size == 1):
        top5 = y_pred.argsort()[-5:]
    else:
        top5 = np.flip(y_pred.argsort()[:, -5:], axis=1)

    if not using_client:
        preds = imagenet_training_labels[top5]

        if FLAGS.batch_size < 10:
            print('validation_labels', validation_labels)
            print('validation_labels shape', validation_labels.shape)
            print('preds', preds)
            print('preds shape', preds.shape)

        util.accuracy(preds, validation_labels)
예제 #6
0
def main(FLAGS):
    util.VAL_IMAGE_FLAGS = FLAGS

    imagenet_inference_labels = get_imagenet_inference_labels()
    imagenet_training_labels = get_imagenet_training_labels()
    assert (
        sorted(imagenet_training_labels) == sorted(imagenet_inference_labels))
    validation_nums = get_validation_labels(FLAGS)
    x_test = get_validation_images(FLAGS)
    validation_labels = imagenet_inference_labels[validation_nums]

    if FLAGS.batch_size < 10:
        print('validation_labels', validation_labels)

    (batch_size, width, height, channels) = x_test.shape
    print('batch_size', batch_size)
    print('width', width)
    print('height', height)
    print('channels', channels)

    # Reshape to expected format (batch axes innermost)
    x_test = np.moveaxis(x_test, 0, -1)
    x_test_flat = x_test.flatten(order='C"')
    hostname = 'localhost'
    port = 34000

    complex_scale_factor = 1
    if ('NGRAPH_COMPLEX_PACK' in os.environ):
        complex_scale_factor = 2
    print('complex_scale_factor', complex_scale_factor)

    # TODO: support even batch sizes
    assert (batch_size % complex_scale_factor == 0)
    new_batch_size = batch_size // complex_scale_factor
    client = he_seal_client.HESealClient(FLAGS.hostname, port, new_batch_size,
                                         x_test_flat)

    while not client.is_done():
        time.sleep(1)
    results = client.get_results()

    imagenet_labels = get_imagenet_labels()
    results = np.array(results)

    if (FLAGS.batch_size == 1):
        top5 = results.argsort()[-5:]
    else:
        print('results shape', results.shape)
        results = np.reshape(results, (
            1001,
            FLAGS.batch_size,
        ))
        print('results.shape', results.shape)

        try:
            res_sort = results.argsort(axis=0)
            res_top5 = res_sort[-5:, :]
            top5x = np.flip(res_top5, axis=0)
            top5 = top5x
            top5 = top5.T
            print('top5.shape', top5.shape)
        except e:
            print('e', e)
    preds = imagenet_labels[top5]
    print('validation_labels', validation_labels)
    print('top5', preds)

    util.accuracy(preds, validation_labels)