Example #1
0
def convert_to_tfjs(exported_graph_path, output_names, output_path):
    from tensorflowjs.converters import convert_tf_saved_model

    saved_model_path = os.path.join(exported_graph_path, 'saved_model')
    output_names_str = ','.join(output_names)

    try:
        convert_tf_saved_model(saved_model_path,
                               output_names_str,
                               output_path,
                               saved_model_tags='serve',
                               quantization_dtype=None,
                               skip_op_check=False,
                               strip_debug_ops=True)
    except Exception as err:
        print('Error: {}'.format(err))

    # Move the labels to the model directory.
    json_labels = os.path.join(exported_graph_path, 'labels.json')
    text_labels = os.path.join(exported_graph_path, 'labels.txt')
    if not os.path.isfile(json_labels):
        with open(text_labels, 'r') as f:
            labels = f.read()
        labels = list(filter(bool, [s.strip() for s in labels.splitlines()]))
        with open(os.path.join(output_path, 'labels.json'), 'w') as f:
            json.dump(labels, f)
    else:
        shutil.copy2(json_labels, output_path)
Example #2
0
def convert_to_tfjs(exported_graph_path, output_names, output_path):
    from tensorflowjs.converters import convert_tf_saved_model

    saved_model_path = os.path.join(exported_graph_path, 'saved_model')
    output_names_str = ','.join(output_names)

    try:    
        convert_tf_saved_model(
            saved_model_path,
            output_names_str,
            output_path,
            saved_model_tags='serve',
            quantization_dtype=None,
            skip_op_check=False,
            strip_debug_ops=True)
    except Exception as err:
        print('Error: {}'.format(err))

    # Move the labels to the model directory.
    json_labels = os.path.join(exported_graph_path, 'labels.json')
    text_labels = os.path.join(exported_graph_path, 'labels.txt')
    if not os.path.isfile(json_labels):
        with open(text_labels, 'r') as f:
            labels = f.read()
        labels = list(filter(bool, [s.strip() for s in labels.splitlines()]))
        with open(os.path.join(output_path, 'labels.json'), 'w') as f:
            json.dump(labels, f)
    else:
        shutil.copy2(json_labels, output_path)
Example #3
0
def convert_to_tfjs():
    from tensorflowjs.converters import convert_tf_saved_model

    output_node_names = ','.join(args.output_names)

    convert_tf_saved_model(
            args.tf_model_path,
            output_node_names,
            args.tfjs_path,
            saved_model_tags='serve',
            quantization_dtype=None,
            skip_op_check=False,
            strip_debug_ops=True)
Example #4
0
def main(*args):
  base_model_path = "/tmp/jax2tf/tf_js_quickdraw"
  dataset_path = os.path.join(base_model_path, "data")
  classes = utils.download_dataset(dataset_path, NB_CLASSES)
  assert len(classes) == NB_CLASSES, classes
  print(f"Classes are: {classes}")
  train_ds, test_ds = utils.load_classes(dataset_path, classes)
  flax_params = train(train_ds, test_ds, classes)

  model_dir = os.path.join(base_model_path, "saved_models")
  # the model must be converted with with_gradient set to True to be able to
  # convert the saved model to TF.js, as "PreventGradient" is not supported
  saved_model_lib.convert_and_save_model(predict, flax_params, model_dir,
                             input_signatures=[tf.TensorSpec([1, 28, 28, 1])],
                             with_gradient=True, compile_model=False,
                             enable_xla=False)
  conversion_dir = os.path.join(base_model_path, 'tfjs_models')
  convert_tf_saved_model(model_dir, conversion_dir)
Example #5
0
def to_tfjs():
    import tensorflow as tf
    from jax.experimental.jax2tf import convert
    from tensorflowjs.converters import convert_tf_saved_model
    import os
    # tf_areas = convert(areas, polymorphic_shapes=["(a, 2), (b, 2), (c, 2)"])
    # tf_areas = convert(areas, polymorphic_shapes=['(a, 2)', '(b, 2)', '(c, 2)'], with_gradient=False)
    # f_tf_areas = tf.function(tf_areas, autograph=False)

    tf_areas = convert(areas, with_gradient=True, enable_xla=False)
    f_tf_areas = tf.function(tf_areas,
                             autograph=False,
                             input_signature=[
                                 tf.TensorSpec([1, 2]),
                                 tf.TensorSpec([1, 2]),
                                 tf.TensorSpec([1, 2]),
                             ])

    # tf_areas = convert(areas, polymorphic_shapes=['(a, d)', '(b, d)', '(c, d)'], with_gradient=True, enable_xla=False)
    # f_tf_areas = tf.function(tf_areas, autograph=False,
    #                          input_signature=[tf.TensorSpec([None, 2]), tf.TensorSpec([None, 2]),
    #                                           tf.TensorSpec([None, 2]), ])

    # f_tf_areas = tf.function(tf_areas, autograph=False)
    # f_tf_areas.get_concrete_function(tf.TensorSpec([None, 2]), tf.TensorSpec([None, 2]), tf.TensorSpec([None, 2]))

    # f_tf_areas.get_concrete_function(tf.TensorSpec([None, 2]), tf.TensorSpec([None, 2]), tf.TensorSpec([None, 2]))
    model = tf.Module()
    model.f = f_tf_areas
    tf.saved_model.save(
        model,
        './scripts/simplex_volume_tf',
        options=tf.saved_model.SaveOptions(experimental_custom_gradients=True))
    restored_model = tf.saved_model.load('./scripts/simplex_volume_tf')
    restored_model.f(tf.convert_to_tensor([[0., 0.]]),
                     tf.convert_to_tensor([[1., 0.]]),
                     tf.convert_to_tensor([[0., 1.]]))
    convert_tf_saved_model(
        './scripts/simplex_volume_tf',
        os.path.expanduser(
            '~/Work/Projects/JavaScript/understanding-modules/model'))
def main():
    # load huggingface model
    model = TFDistilBertForFakeNewsClassification.from_pretrained(
        './model', num_labels_aggregate=3, num_labels_category=8)

    # define tensorflow function for faster inference
    callable = tf.function(model.call)
    max_sequence_length = 512
    concrete_function = callable.get_concrete_function([
        tf.TensorSpec([None, max_sequence_length], tf.int32, name="input_ids"),
        tf.TensorSpec([None, max_sequence_length],
                      tf.int32,
                      name="attention_mask")
    ])
    print(concrete_function)

    # save huggingface model as tf saved_model
    tf.saved_model.save(model, 'distilbert_nela', signatures=concrete_function)

    # convert saved_model to tensorflow.js web format
    convert_tf_saved_model('distilbert_nela', 'distilbert_nela_js')

    # convert tokenizer vocab
    vocab = []
    with open('model/vocab.txt', 'r', encoding='utf-8') as f:
        for line in f:
            line = line.rstrip()
            # idk why but the tokenizer library we're using does it the opposite of huggingface
            if line.startswith('##'):
                vocab.append(line.strip('##'))
            elif line.startswith('['):
                vocab.append(line)
            else:
                vocab.append(u'\u2581' + line)

    # jsonString = json.dumps(vocab)
    # print(jsonString[:50])
    with open('../src/detection/tokenizer/vocab.json', 'w+',
              encoding='utf-8') as f:
        json.dump(vocab, f, ensure_ascii=False)