コード例 #1
0
def run(train_data_path,
        validation_data_path,
        export_dir,
        spec='bert_qa',
        **kwargs):
    """Runs demo."""
    # Chooses model specification that represents model.
    spec = model_spec.get(spec)

    # Gets training data and validation data.
    train_data = question_answer.DataLoader.from_squad(train_data_path,
                                                       spec,
                                                       is_training=True)
    validation_data = question_answer.DataLoader.from_squad(
        validation_data_path, spec, is_training=False)

    # Fine-tunes the model.
    model = question_answer.create(train_data, model_spec=spec, **kwargs)

    # Gets evaluation results.
    metric = model.evaluate(validation_data)
    tf.compat.v1.logging.info('Eval F1 score:%f' % metric['final_f1'])

    # Exports to TFLite format.
    model.export(export_dir)
コード例 #2
0
def run(spec, data_dir, dataset_type, export_dir, **kwargs):
    """Runs demo."""
    spec = model_spec.get(spec)

    if dataset_type == 'esc50':
        # Limit to 2 categories to speed up the demo
        categories = ['dog', 'cat']
        train_data = audio_classifier.DataLoader.from_esc50(
            spec, data_dir, folds=[0, 1, 2, 3], categories=categories)
        validation_data = audio_classifier.DataLoader.from_esc50(
            spec, data_dir, folds=[
                4,
            ], categories=categories)
        test_data = audio_classifier.DataLoader.from_esc50(
            spec, data_dir, folds=[
                5,
            ], categories=categories)

    else:
        data = audio_classifier.DataLoader.from_folder(spec, data_dir)
        train_data, rest_data = data.split(0.8)
        validation_data, test_data = rest_data.split(0.5)

    print('Training the model')
    model = audio_classifier.create(train_data, spec, validation_data,
                                    **kwargs)

    print('Evaluating the model')
    _, acc = model.evaluate(test_data)
    print('Test accuracy: %f' % acc)

    print('Confusion matrix: ')
    print(model.confusion_matrix(test_data))

    model.export(export_dir)
コード例 #3
0
def run(data_dir, export_dir, batch_size=16, epochs=5, encoder_type='bow'):
    """Runs demo."""
    meta = recommendation.DataLoader.generate_movielens_dataset(data_dir)
    num_classes = recommendation.DataLoader.get_num_classes(meta)

    input_spec = get_input_spec(encoder_type, num_classes)
    train_data = recommendation.DataLoader.from_movielens(
        data_dir, 'train', input_spec)
    test_data = recommendation.DataLoader.from_movielens(
        data_dir, 'test', input_spec)

    model_spec = ms.get('recommendation',
                        input_spec=input_spec,
                        model_hparams=get_model_hparams())
    # Create a model and train.
    model = recommendation.create(train_data,
                                  model_spec=model_spec,
                                  model_dir=export_dir,
                                  validation_data=test_data,
                                  batch_size=batch_size,
                                  epochs=epochs)

    # Evaluate with test_data.
    history = model.evaluate(test_data)
    print('Test metrics from Keras model: %s' % history)

    # Export tflite model.
    model.export(export_dir)

    # Evaluate tflite model.
    tflite_model = os.path.join(export_dir, 'model.tflite')
    history = model.evaluate_tflite(tflite_model, test_data)
    print('Test metrics from TFLite model: %s' % history)
コード例 #4
0
def run(spec, data_dir, dataset_type, export_dir, **kwargs):
    """Runs demo."""
    spec = model_spec.get(spec)

    if dataset_type == 'esc50':
        # Limit to 2 categories to speed up the demo
        categories = ['dog', 'cat']
        train_data = audio_classifier.DataLoader.from_esc50(
            spec, data_dir, folds=[0, 1, 2, 3], categories=categories)
        validation_data = audio_classifier.DataLoader.from_esc50(
            spec, data_dir, folds=[
                4,
            ], categories=categories)
        test_data = audio_classifier.DataLoader.from_esc50(
            spec, data_dir, folds=[
                5,
            ], categories=categories)
    elif dataset_type == 'bird':
        if isinstance(spec, audio_classifier.YamNetSpec):
            # In some files, two consecutive bird sounds might be too far apart, so
            # increase the window size to have a higher probability of capturing the
            # bird sound.
            spec = audio_classifier.YamNetSpec(
                keep_yamnet_and_custom_heads=True,
                frame_length=6 *
                audio_classifier.YamNetSpec.EXPECTED_WAVEFORM_LENGTH)
        else:
            raise ValueError(
                'Bird dataset can only be used with YAMNet model.')
        train_data = audio_classifier.DataLoader.from_folder(spec,
                                                             os.path.join(
                                                                 data_dir,
                                                                 'train'),
                                                             cache=True)
        train_data, validation_data = train_data.split(0.8)
        test_data = audio_classifier.DataLoader.from_folder(spec,
                                                            os.path.join(
                                                                data_dir,
                                                                'test'),
                                                            cache=True)

    else:
        data = audio_classifier.DataLoader.from_folder(spec, data_dir)
        train_data, rest_data = data.split(0.8)
        validation_data, test_data = rest_data.split(0.5)

    print('\nTraining the model')
    model = audio_classifier.create(train_data, spec, validation_data,
                                    **kwargs)

    print('\nEvaluating the model')
    model.evaluate(test_data)

    print('\nConfusion matrix: ')
    print(model.confusion_matrix(test_data))
    print('labels: ', test_data.index_to_label)

    model.export(export_dir)
コード例 #5
0
def run(data_dir, export_dir, spec='efficientnet_lite0', **kwargs):
  """Runs demo."""
  spec = model_spec.get(spec)
  data = image_classifier.DataLoader.from_folder(data_dir)
  train_data, rest_data = data.split(0.8)
  validation_data, test_data = rest_data.split(0.5)

  model = image_classifier.create(
      train_data, model_spec=spec, validation_data=validation_data, **kwargs)

  _, acc = model.evaluate(test_data)
  print('Test accuracy: %f' % acc)

  # Exports to TFLite and SavedModel, with label file.
  export_format = [
      ExportFormat.TFLITE,
      ExportFormat.SAVED_MODEL,
  ]
  model.export(export_dir, export_format=export_format)
コード例 #6
0
def run(data_dir, export_dir, spec='bert_classifier', **kwargs):
    """Runs demo."""
    # Chooses model specification that represents model.
    spec = model_spec.get(spec)

    # Gets training data and validation data.
    train_data = text_classifier.DataLoader.from_csv(filename=os.path.join(
        os.path.join(data_dir, 'train.tsv')),
                                                     text_column='sentence',
                                                     label_column='label',
                                                     model_spec=spec,
                                                     delimiter='\t',
                                                     is_training=True)
    validation_data = text_classifier.DataLoader.from_csv(
        filename=os.path.join(os.path.join(data_dir, 'dev.tsv')),
        text_column='sentence',
        label_column='label',
        model_spec=spec,
        delimiter='\t',
        is_training=False)

    # Fine-tunes the model.
    model = text_classifier.create(train_data,
                                   model_spec=spec,
                                   validation_data=validation_data,
                                   **kwargs)

    # Gets evaluation results.
    _, acc = model.evaluate(validation_data)
    print('Eval accuracy: %f' % acc)

    # Exports to TFLite and SavedModel, with label and vocab files.
    export_format = [
        ExportFormat.TFLITE,
        ExportFormat.SAVED_MODEL,
    ]
    model.export(export_dir, export_format=export_format)
コード例 #7
0
ファイル: medleaf_test.py プロジェクト: Dinusbank/ML
  plt.yticks([])
  plt.grid(False)
  plt.imshow(image.numpy(), cmap=plt.cm.gray)
  plt.xlabel(data.index_to_label[label.numpy()])

plt.show()

"""### **7). Use Pre-trained Model**
Here, we use EfficientNet Lite1 as a base model for image classification and build custom tflite model

More info, click here:
<br>
https://blog.tensorflow.org/2020/03/higher-accuracy-on-vision-models-with-efficientnet-lite.html
"""

efficientnet_model = model_spec.get("efficientnet_lite1")

"""### **8). Training and Creating**
Here, we begin train the entire datasets and also create custom model based on pre-trained model
"""

model = image_classifier.create(train_data,
                                epochs=10,
                                validation_data=validation_data,
                                use_augmentation=True,
                                shuffle=True,
                                model_spec=efficientnet_model)

"""### **9). Display Model Summary**"""

model.summary()
コード例 #8
0
import tensorflow as tf
import numpy as np
import os

from tflite_model_maker.config import ExportFormat
from tflite_model_maker import model_spec
from tflite_model_maker import object_detector

spec = model_spec.get('efficientdet_lite0')

train_data = object_detector.DataLoader.from_csv(
    "/data/datasets/traffic_sign/train.csv")
model = object_detector.create(train_data,
                               model_spec=spec,
                               batch_size=8,
                               train_whole_model=True)