def main(opset=13):
    url = "https://tfhub.dev/google/seefood/segmenter/mobile_food_segmenter_V1/1?tf-hub-format=compressed"
    dest = "tf-mobile_food_segmenter_V1"
    name = "mobile_food_segmenter_V1"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    imgs = generate_random_images(shape=(1, 513, 513, 3), scale=1.)

    if True:
        benchmark(url, dest, onnx_name, opset, imgs, tag='')
        # The conversion works but tensorflow fails with
        # TypeError: 'AutoTrackable' object is not callable

    if True:
        import tensorflow.compat.v2 as tf
        import tensorflow_hub as hub

        m = hub.KerasLayer(
            'https://tfhub.dev/google/seefood/segmenter/mobile_food_segmenter_V1/1'
        )
        inputs = {
            "X": tf.keras.Input(shape=[1, 513, 513, 3],
                                dtype="float32",
                                name="X"),
        }
        outputs = m(inputs)["default"]
        # TypeError: pruned(images) missing required arguments: images
        print(outputs)
        model = tf.keras.Model(inputs, outputs)

        if not os.path.exists(dest):
            os.makedirs(dest)

        # This model is a large model.
        tf2onnx.convert.from_keras(model, opset=13, output_path=onnx_name)
def main(opset=13):

    if False:
        import tensorflow as tf
        import tensorflow_text
        import tensorflow_hub as hub
        sentences = tf.constant(["Hi I'm some text"])
        text_input = tf.keras.layers.Input(shape=(), dtype=tf.string)
        preprocessor = hub.KerasLayer(
            "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3")
        encoder_inputs = preprocessor(text_input)
        embedded_inputs = {
            k: v.numpy()
            for k, v in preprocessor(sentences).items()
        }
        for k, v in embedded_inputs.items():
            print(k, v.dtype, v.shape)

    url = "https://tfhub.dev/tensorflow/mobilebert_en_uncased_L-24_H-128_B-512_A-4_F-4_OPT/1?tf-hub-format=compressed"
    dest = "tf-mobilebert_en_uncased_L-24_H-128_B-512_A-4_F-4_OPT"
    name = "mobilebert_en_uncased_L-24_H-128_B-512_A-4_F-4_OPT"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    inputs = generate_text_inputs()
    benchmark(
        url, dest, onnx_name, opset, inputs,
        output_name="attention_scores")  #, ort_name="mobile_bert_encoder_50")
Ejemplo n.º 3
0
def main(opset=13):
    
    if False:
        import tensorflow as tf
        import tensorflow_text
        import tensorflow_hub as hub
        sentences = tf.constant(["Hi I'm some text"])
        text_input = tf.keras.layers.Input(shape=(), dtype=tf.string)
        encoder = hub.KerasLayer(
            "https://tfhub.dev/tensorflow/lambert_en_uncased_L-24_H-1024_A-16/2", trainable=True)
        preprocessor = hub.KerasLayer(
            "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3")
        encoder_inputs = preprocessor(text_input)
        embedded_inputs = {k: v.numpy() for k, v in preprocessor(sentences).items()}
        for k, v in embedded_inputs.items():
            print(k, v.dtype, v.shape)
    
    url = "https://tfhub.dev/tensorflow/lambert_en_uncased_L-24_H-1024_A-16/2?tf-hub-format=compressed"
    dest = "tf-lambert_en_uncased_L-24_H-1024_A-16"
    name = "lambert_en_uncased_L-24_H-1024_A-16"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    inputs = [OrderedDict([
        ('input_word_ids', numpy.array([rnd.randint(0, 1000) for i in range(0, 128)], dtype=numpy.int32).reshape((1, -1))),
        ('input_mask', numpy.array([rnd.randint(0, 1) for i in range(0, 128)], dtype=numpy.int32).reshape((1, -1))),
        ('input_type_ids', numpy.array([i//5 for i in range(0, 128)], dtype=numpy.int32).reshape((1, -1)))
    ]) for i in range(0, 10)]

    benchmark(url, dest, onnx_name, opset, inputs, output_name="pooled_output")
Ejemplo n.º 4
0
def main(opset=13):
    url = "https://tfhub.dev/google/movenet/singlepose/thunder/3?tf-hub-format=compressed"
    dest = "tf-thunder"
    name = "thunder"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    imgs = generate_random_images(shape=(1, 256, 256, 3), dtype=numpy.int32)

    benchmark(url, dest, onnx_name, opset, imgs, signature='serving_default')
Ejemplo n.º 5
0
def main(opset=13):
    url = "https://tfhub.dev/google/inaturalist/inception_v3/feature_vector/5?tf-hub-format=compressed"
    dest = "tf-inception_v3"
    name = "inception_v3"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    imgs = generate_random_images(shape=(1, 299, 299, 3))

    benchmark(url, dest, onnx_name, opset, imgs)
Ejemplo n.º 6
0
def main(opset=13):
    url = "https://tfhub.dev/captain-pool/esrgan-tf2/1?tf-hub-format=compressed"
    dest = "tf-esrgan-tf2"
    name = "esrgan-tf2"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    imgs = generate_random_images()

    benchmark(url, dest, onnx_name, opset, imgs)
Ejemplo n.º 7
0
def main(opset=13):
    url = "https://tfhub.dev/google/seefood/segmenter/mobile_food_segmenter_V1/1?tf-hub-format=compressed"
    dest = "tf-mobile_food_segmenter_V1"
    name = "mobile_food_segmenter_V1"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    imgs = generate_random_images(shape=(1, 513, 513, 3), scale=1.)

    benchmark(url, dest, onnx_name, opset, imgs, tag='')
Ejemplo n.º 8
0
def main(opset=13):
    url = "https://tfhub.dev/google/imagenet/resnet_v1_101/feature_vector/5?tf-hub-format=compressed"
    dest = "tf-resnet_v1_101"
    name = "resnet_v1_101"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    imgs = generate_random_images(shape=(1, 224, 224, 3), scale=1.)

    benchmark(url, dest, onnx_name, opset, imgs)
Ejemplo n.º 9
0
def main(opset=13):
    url = "https://tfhub.dev/google/imagenet/resnet_v2_152/classification/5?tf-hub-format=compressed"
    dest = "tf-resnet_v2_152"
    name = "resnet_v2_152"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    imgs = generate_random_images(shape=(1, 224, 224, 3))

    benchmark(url, dest, onnx_name, opset, imgs)
Ejemplo n.º 10
0
def main(opset=13):
    url = "https://tfhub.dev/google/imagenet/mobilenet_v3_large_075_224/classification/5?tf-hub-format=compressed"
    dest = "tf-mobilenet-v3-large-075-224"
    name = "mobilenet-v3-large-075-224"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    imgs = generate_random_images(shape=(1, 224, 224, 3), scale=1.)

    benchmark(url, dest, onnx_name, opset, imgs)
Ejemplo n.º 11
0
def main(opset=13):
    url = "https://tfhub.dev/deepmind/enformer/1?tf-hub-format=compressed"
    dest = "tf-enformer"
    name = "enformer"
    onnx_name = os.path.join(dest, "%s-%d.zip" % (name, opset))

    imgs = generate_random_images(shape=(1, 224, 224, 3))

    benchmark(url, dest, onnx_name, opset, imgs)
def main(opset=13):
    url = "https://tfhub.dev/mediapipe/tfjs-model/blazeposedetector/1/default/1?tfjs-format=compressed"
    dest = "tf-blazeposedetector"
    name = "blazeposedetector"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    imgs = generate_random_images(shape=(1, 513, 513, 3), scale=1.)

    benchmark(url, dest, onnx_name, opset, imgs)
Ejemplo n.º 13
0
def main(opset=13):
    url = "https://tfhub.dev/tensorflow/tutorials/spam-detection/1?tf-hub-format=compressed"
    dest = "tf-spam-detection"
    name = "spam-detection"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    imgs = generate_random_images((1, 20), dtype=numpy.int32)

    benchmark(url, dest, onnx_name, opset, imgs)
Ejemplo n.º 14
0
def main(opset=13):
    url = "https://tfhub.dev/google/imagenet/nasnet_large/feature_vector/5?tf-hub-format=compressed"
    dest = "tf-nasnet-large"
    name = "nasnet-large"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    imgs = generate_random_images(shape=(1, 331, 331, 3))

    benchmark(url, dest, onnx_name, opset, imgs)
Ejemplo n.º 15
0
def main(opset=13):
    print("[main]")
    url = "https://tfhub.dev/google/imagenet/mobilenet_v3_small_075_224/feature_vector/5?tf-hub-format=compressed"
    dest = "tf-mobilenet-v3-small-075-224"
    name = "mobilenet-v3-small-075-224"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    imgs = generate_random_images(shape=(1, 224, 224, 3), scale=1.)

    print("[benchmark]")
    benchmark(url, dest, onnx_name, opset, imgs)
    print("[end]")
Ejemplo n.º 16
0
def main(opset=13):
    url = "https://tfhub.dev/google/humpback_whale/1?tf-hub-format=compressed"
    dest = "tf-humpback-whale"
    name = "humpback-whale"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))
    print("[download data]")
    FILENAME = 'gs://bioacoustics-www1/sounds/Cross_02_060203_071428.d20_7.wav'
    pkl_name = os.path.join(dest, "data.pkl")
    if not os.path.exists(pkl_name):
        with open(pkl_name, "wb") as f:
            waveform, sample_rate = tf.audio.decode_wav(
                tf.io.read_file(FILENAME))
            waveform = tf.expand_dims(waveform, 0)  # makes a batch of size 1
            context_step_samples = tf.cast(sample_rate, tf.int64)
            data = dict(waveform=waveform,
                        context_step_samples=context_step_samples)
            pickle.dump(data, f)
    else:
        with open(pkl_name, "rb") as f:
            data = pickle.load(f)
        waveform = data["waveform"]
        context_step_samples = data["context_step_samples"]
    print("[data] done. context_step_samples=", context_step_samples.numpy())

    def benchmark_custom(local_name):
        model = hub.load(local_name)
        score_fn = model.signatures['score']
        scores = score_fn(waveform=waveform,
                          context_step_samples=context_step_samples)
        imgs_tf = [
            dict(waveform=waveform, context_step_samples=context_step_samples)
        ]
        results_tf, duration_tf = measure_time(
            lambda inputs: score_fn(**inputs), imgs_tf)
        return scores, results_tf, duration_tf

    imgs = generate_random_images(shape=(1, 750000, 1), scale=1., n=2)
    inputs = [
        dict(waveform=waveform.numpy(),
             context_step_samples=numpy.array(context_step_samples.numpy(),
                                              dtype=numpy.int64))
    ]
    benchmark(url,
              dest,
              onnx_name,
              opset,
              inputs,
              optimize=False,
              signature='score',
              custom_tf=benchmark_custom)
Ejemplo n.º 17
0
def main(opset=13):
    url = "https://tfhub.dev/google/humpback_whale/1?tf-hub-format=compressed"
    dest = "tf-humpback-whale"
    name = "humpback-whale"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    imgs = generate_random_images(shape=(1, 1024, 1))
    inputs = [
        dict(waveform=img,
             context_step_samples=numpy.array(512, dtype=numpy.int64))
        for img in imgs
    ]

    benchmark(url, dest, onnx_name, opset, inputs, optimize=False)
Ejemplo n.º 18
0
def main(opset=13):
    url = "https://tfhub.dev/tensorflow/bert_en_wwm_uncased_L-24_H-1024_A-16/4?tf-hub-format=compressed"
    dest = "tf-bert-en-wwm-uncased-L-24-H-1024-A-16"
    name = "bert-en-wwm-uncased-L-24-H-1024-A-16"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    inputs = [
        OrderedDict([('input_word_ids',
                      numpy.array([rnd.randint(0, 1000) for i in range(0, 32)],
                                  dtype=numpy.int32).reshape((1, -1))),
                     ('input_mask',
                      numpy.array([rnd.randint(0, 1) for i in range(0, 32)],
                                  dtype=numpy.int32).reshape((1, -1))),
                     ('input_type_ids',
                      numpy.array([i // 5 for i in range(0, 32)],
                                  dtype=numpy.int32).reshape((1, -1)))])
        for i in range(0, 10)
    ]

    benchmark(url, dest, onnx_name, opset, inputs, output_name="pooled_output")
Ejemplo n.º 19
0
def main(opset=13):
    url = "https://tfhub.dev/tensorflow/talkheads_ggelu_bert_en_large/2?tf-hub-format=compressed"
    dest = "tf-talkheads_ggelu_bert_en_large"
    name = "talkheads_ggelu_bert_en_large"
    onnx_name = os.path.join(dest, "%s-%d.onnx" % (name, opset))

    inputs = [
        OrderedDict([
            ('input_word_ids',
             numpy.array([rnd.randint(0, 1000) for i in range(0, 128)],
                         dtype=numpy.int32).reshape((1, -1))),
            ('input_mask',
             numpy.array([rnd.randint(0, 1) for i in range(0, 128)],
                         dtype=numpy.int32).reshape((1, -1))),
            ('input_type_ids',
             numpy.array([i // 5 for i in range(0, 128)],
                         dtype=numpy.int32).reshape((1, -1)))
        ]) for i in range(0, 10)
    ]

    benchmark(url, dest, onnx_name, opset, inputs, output_name="pooled_output")