Exemplo n.º 1
0
def test():
    """Calculate accuracy and confusion matrices on validation and test sets.

    Model is created and weights loaded from supplied command line arguments.
    """
    model_settings = models.prepare_model_settings(
        len(data.prepare_words_list(FLAGS.wanted_words.split(','))),
        FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
        FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)

    model = models.create_model(model_settings, FLAGS.model_architecture,
                                FLAGS.model_size_info)

    audio_processor = data.AudioProcessor(
        data_url=FLAGS.data_url,
        data_dir=FLAGS.data_dir,
        silence_percentage=FLAGS.silence_percentage,
        unknown_percentage=FLAGS.unknown_percentage,
        wanted_words=FLAGS.wanted_words.split(','),
        validation_percentage=FLAGS.validation_percentage,
        testing_percentage=FLAGS.testing_percentage,
        model_settings=model_settings)

    model.load_weights(FLAGS.checkpoint).expect_partial()

    # Evaluate on validation set.
    print("Running testing on validation set...")
    val_data = audio_processor.get_data(
        audio_processor.Modes.VALIDATION).batch(FLAGS.batch_size)
    expected_indices = np.concatenate([y for x, y in val_data])

    predictions = model.predict(val_data)
    predicted_indices = tf.argmax(predictions, axis=1)

    val_accuracy = calculate_accuracy(predicted_indices, expected_indices)
    confusion_matrix = tf.math.confusion_matrix(
        expected_indices,
        predicted_indices,
        num_classes=model_settings['label_count'])
    print(confusion_matrix.numpy())
    print(f'Validation accuracy = {val_accuracy * 100:.2f}%'
          f'(N={audio_processor.set_size(audio_processor.Modes.VALIDATION)})')

    # Evaluate on testing set.
    print("Running testing on test set...")
    test_data = audio_processor.get_data(audio_processor.Modes.TESTING).batch(
        FLAGS.batch_size)
    expected_indices = np.concatenate([y for x, y in test_data])

    predictions = model.predict(test_data)
    predicted_indices = tf.argmax(predictions, axis=1)

    test_accuracy = calculate_accuracy(predicted_indices, expected_indices)
    confusion_matrix = tf.math.confusion_matrix(
        expected_indices,
        predicted_indices,
        num_classes=model_settings['label_count'])
    print(confusion_matrix.numpy())
    print(f'Test accuracy = {test_accuracy * 100:.2f}%'
          f'(N={audio_processor.set_size(audio_processor.Modes.TESTING)})')
Exemplo n.º 2
0
def main():
    model_settings = models.prepare_model_settings(
        len(data.prepare_words_list(FLAGS.wanted_words.split(','))),
        FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
        FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)

    audio_processor = data.AudioProcessor(
        data_url=FLAGS.data_url,
        data_dir=FLAGS.data_dir,
        silence_percentage=FLAGS.silence_percentage,
        unknown_percentage=FLAGS.unknown_percentage,
        wanted_words=FLAGS.wanted_words.split(','),
        validation_percentage=FLAGS.validation_percentage,
        testing_percentage=FLAGS.testing_percentage,
        model_settings=model_settings)

    if FLAGS.quantize:
        tflite_path = f'{FLAGS.model_architecture}_quantized.tflite'
    else:
        tflite_path = f'{FLAGS.model_architecture}.tflite'

    # Load floating point model from checkpoint and convert it.
    convert(model_settings, audio_processor, FLAGS.checkpoint, FLAGS.quantize,
            FLAGS.inference_type, tflite_path)

    # Test the newly converted model on the test set.
    tflite_test(model_settings, audio_processor, tflite_path)
Exemplo n.º 3
0
def get_weight():
    model_settings = models.prepare_model_settings(
        len(data.prepare_words_list(FLAGS.wanted_words.split(','))),
        FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
        FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)

    model = models.create_model(model_settings, FLAGS.model_architecture,
                                FLAGS.model_size_info)
    print(len(data.prepare_words_list(FLAGS.wanted_words.split(','))),
          data.prepare_words_list(FLAGS.wanted_words.split(',')))
    model.load_weights(FLAGS.checkpoint).expect_partial()
    model.summary()
    model_weights = model.get_weights()
    arr = np.array(model_weights)

    #np.set_printoptions(threshold=sys.maxsize)
    #np.set_printoptions(precision=14, suppress=True)
    write_txt(arr, FLAGS.output_file)
Exemplo n.º 4
0
def main():
    model_settings = models.prepare_model_settings(
        len(data.prepare_words_list(FLAGS.wanted_words.split(','))),
        FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
        FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)

    audio_processor = data.AudioProcessor(
        data_url=FLAGS.data_url,
        data_dir=FLAGS.data_dir,
        silence_percentage=FLAGS.silence_percentage,
        unknown_percentage=FLAGS.unknown_percentage,
        wanted_words=FLAGS.wanted_words.split(','),
        validation_percentage=FLAGS.validation_percentage,
        testing_percentage=FLAGS.testing_percentage,
        model_settings=model_settings)

    tflite_test(model_settings, audio_processor, FLAGS.tflite_path)
Exemplo n.º 5
0
def train():
    model_settings = models.prepare_model_settings(len(data.prepare_words_list(FLAGS.wanted_words.split(','))),
                                                   FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
                                                   FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)

    # Create the model.
    model = models.create_model(model_settings, FLAGS.model_architecture, FLAGS.model_size_info)

    audio_processor = data.AudioProcessor(data_url=FLAGS.data_url,
                                          data_dir=FLAGS.data_dir,
                                          silence_percentage=FLAGS.silence_percentage,
                                          unknown_percentage=FLAGS.unknown_percentage,
                                          wanted_words=FLAGS.wanted_words.split(','),
                                          validation_percentage=FLAGS.validation_percentage,
                                          testing_percentage=FLAGS.testing_percentage,
                                          model_settings=model_settings)

    # We decay learning rate in a constant piecewise way to help learning.
    training_steps_list = list(map(int, FLAGS.how_many_training_steps.split(',')))
    learning_rates_list = list(map(float, FLAGS.learning_rate.split(',')))
    lr_boundary_list = training_steps_list[:-1]  # Only need the values at which to change lr.
    lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(boundaries=lr_boundary_list,
                                                                       values=learning_rates_list)

    # Specify the optimizer configurations.
    optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
    model.compile(optimizer=optimizer,
                  loss=tf.keras.losses.SparseCategoricalCrossentropy(),
                  metrics=['accuracy'])

    train_data = audio_processor.get_data(audio_processor.Modes.TRAINING,
                                          FLAGS.background_frequency, FLAGS.background_volume,
                                          int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000))
    train_data = train_data.repeat().batch(FLAGS.batch_size).prefetch(1)
    val_data = audio_processor.get_data(audio_processor.Modes.VALIDATION)
    val_data = val_data.batch(FLAGS.batch_size).prefetch(1)

    # We train for a max number of iterations so need to calculate how many 'epochs' this will be.
    training_steps_max = np.sum(training_steps_list)
    training_epoch_max = int(np.ceil(training_steps_max / FLAGS.eval_step_interval))

    train_dir = Path(FLAGS.train_dir) / "best"
    train_dir.mkdir(parents=True, exist_ok=True)
    model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=(train_dir / (FLAGS.model_architecture + "_{val_accuracy:.3f}_ckpt")),
        save_weights_only=True,
        monitor='val_accuracy',
        mode='max',
        save_best_only=True)

    # Train the model
    model.fit(x=train_data,
              steps_per_epoch=FLAGS.eval_step_interval,
              epochs=training_epoch_max,
              validation_data=val_data,
              callbacks=[model_checkpoint_callback])

    # Test and save the model.
    test_data = audio_processor.get_data(audio_processor.Modes.TESTING)
    test_data = test_data.batch(FLAGS.batch_size)

    test_loss, test_acc = model.evaluate(x=test_data)
    print(f'Final test accuracy: {test_acc*100:.2f}%')
Exemplo n.º 6
0
def train():
    model_settings = models.prepare_model_settings(
        len(data.prepare_words_list(FLAGS.wanted_words.split(','))),
        FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
        FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)

    # Create the model.
    model = models.create_model(model_settings, FLAGS.model_architecture,
                                FLAGS.model_size_info)

    audio_processor = data.AudioProcessor(
        data_url=FLAGS.data_url,
        data_dir=FLAGS.data_dir,
        silence_percentage=FLAGS.silence_percentage,
        unknown_percentage=FLAGS.unknown_percentage,
        wanted_words=FLAGS.wanted_words.split(','),
        validation_percentage=FLAGS.validation_percentage,
        testing_percentage=FLAGS.testing_percentage,
        model_settings=model_settings)

    # We decay learning rate in a constant piecewise way to help learning.
    training_steps_list = list(
        map(int, FLAGS.how_many_training_steps.split(',')))
    learning_rates_list = list(map(float, FLAGS.learning_rate.split(',')))
    lr_boundary_list = training_steps_list[:
                                           -1]  # Only need the values at which to change lr.
    lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
        boundaries=lr_boundary_list, values=learning_rates_list)

    # Specify the optimizer configurations.
    optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
    model.compile(optimizer=optimizer,
                  loss=tf.keras.losses.SparseCategoricalCrossentropy(),
                  metrics=['accuracy'])

    log_dir = FLAGS.summaries_dir
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)

    train_data = audio_processor.get_data(
        audio_processor.Modes.TRAINING, FLAGS.background_frequency,
        FLAGS.background_volume,
        int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000))
    train_data = train_data.repeat().batch(FLAGS.batch_size).prefetch(1)
    val_data = audio_processor.get_data(audio_processor.Modes.VALIDATION)
    val_data = val_data.batch(FLAGS.batch_size).prefetch(1)

    # We train for a max number of iterations so need to calculate how many 'epochs' this will be.
    training_steps_max = np.sum(training_steps_list)
    training_epoch_max = int(
        np.ceil(training_steps_max / FLAGS.eval_step_interval))

    train_dir = Path(FLAGS.train_dir) / "best"
    train_dir.mkdir(parents=True, exist_ok=True)
    model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=(train_dir /
                  (FLAGS.model_architecture + "_{val_accuracy:.3f}_ckpt")),
        save_weights_only=True,
        monitor='val_accuracy',
        mode='max',
        save_best_only=True)

    # Train the model
    history = model.fit(
        x=train_data,
        steps_per_epoch=FLAGS.eval_step_interval,
        epochs=training_epoch_max,
        validation_data=val_data,
        callbacks=[model_checkpoint_callback, tensorboard_callback])

    # Show Accuracy and Validation with pyplot

    # Retrieve a list of accuracy results on training and validation data
    # sets for each training epoch
    acc = history.history['accuracy']
    val_acc = history.history['val_accuracy']

    # Retrieve a list of list results on training and validation data
    # sets for each training epoch
    loss = history.history['loss']
    val_loss = history.history['val_loss']

    # Get number of epochs
    epochs = range(len(acc))

    # Plot training and validation accuracy per epoch
    plt.plot(epochs, acc, label="accuracy")
    plt.plot(epochs, val_acc, label="val_accuracy")
    plt.title('Training and validation accuracy')
    plt.ylabel('accuracy/val_accuracy')
    plt.xlabel('epoch')
    plt.legend()
    plt.savefig(
        f'./{FLAGS.model_architecture}_{FLAGS.model_size_info[0]}_acc_val_acc_batch_{FLAGS.batch_size}.png'
    )
    plt.figure()

    # Plot training and validation loss per epoch
    plt.plot(epochs, loss, label="loss")
    plt.plot(epochs, val_loss, label="val_loss")
    plt.title('Training and validation loss')
    plt.ylabel('loss/val_loss')
    plt.xlabel('epoch')
    plt.legend()
    plt.savefig(
        f'./{FLAGS.model_architecture}_{FLAGS.model_size_info[0]}_loss_val_loss_batch_{FLAGS.batch_size}.png'
    )

    # Test and save the model.
    test_data = audio_processor.get_data(audio_processor.Modes.TESTING)
    test_data = test_data.batch(FLAGS.batch_size)

    test_loss, test_acc = model.evaluate(x=test_data)
    print(f'Final test accuracy: {test_acc*100:.2f}%')
Exemplo n.º 7
0
def optimize():
    model_settings = models.prepare_model_settings(
        len(data.prepare_words_list(FLAGS.wanted_words.split(','))),
        FLAGS.sample_rate, FLAGS.clip_duration_ms, FLAGS.window_size_ms,
        FLAGS.window_stride_ms, FLAGS.dct_coefficient_count)

    # Create the model to optimize from checkpoint.
    model = models.create_model(model_settings, FLAGS.model_architecture,
                                FLAGS.model_size_info)
    model.load_weights(FLAGS.checkpoint).expect_partial()

    audio_processor = data.AudioProcessor(
        data_url=FLAGS.data_url,
        data_dir=FLAGS.data_dir,
        silence_percentage=FLAGS.silence_percentage,
        unknown_percentage=FLAGS.unknown_percentage,
        wanted_words=FLAGS.wanted_words.split(','),
        validation_percentage=FLAGS.validation_percentage,
        testing_percentage=FLAGS.testing_percentage,
        model_settings=model_settings)

    # We decay learning rate in a constant piecewise way to help learning.
    training_steps_list = list(
        map(int, FLAGS.how_many_training_steps.split(',')))
    learning_rates_list = list(map(float, FLAGS.learning_rate.split(',')))
    lr_boundary_list = training_steps_list[:
                                           -1]  # Only need the values at which to change lr.
    lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
        boundaries=lr_boundary_list, values=learning_rates_list)

    cluster_weights = tfmot.clustering.keras.cluster_weights
    CentroidInitialization = tfmot.clustering.keras.CentroidInitialization

    clustering_params = {
        'number_of_clusters': 32,
        'cluster_centroids_init': CentroidInitialization.KMEANS_PLUS_PLUS
    }

    clustered_model = cluster_weights(model, **clustering_params)

    # Specify the optimizer configurations.
    optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
    clustered_model.compile(
        optimizer=optimizer,
        loss=tf.keras.losses.SparseCategoricalCrossentropy(),
        metrics=['accuracy'])

    train_data = audio_processor.get_data(
        audio_processor.Modes.TRAINING, FLAGS.background_frequency,
        FLAGS.background_volume,
        int((FLAGS.time_shift_ms * FLAGS.sample_rate) / 1000))
    train_data = train_data.repeat().batch(FLAGS.batch_size).prefetch(
        tf.data.AUTOTUNE)
    val_data = audio_processor.get_data(audio_processor.Modes.VALIDATION)
    val_data = val_data.batch(FLAGS.batch_size).prefetch(tf.data.AUTOTUNE)

    # We train for a max number of iterations so need to calculate how many 'epochs' this will be.
    training_steps_max = np.sum(training_steps_list)
    training_epoch_max = int(
        np.ceil(training_steps_max / FLAGS.eval_step_interval))

    # Train the model with clustering applied.
    clustered_model.fit(x=train_data,
                        steps_per_epoch=FLAGS.eval_step_interval,
                        epochs=training_epoch_max,
                        validation_data=val_data)

    stripped_clustered_model = tfmot.clustering.keras.strip_clustering(
        clustered_model)

    print_model_weight_clusters(stripped_clustered_model)

    # Save the clustered model weights
    train_dir = Path(FLAGS.train_dir) / "optimized"
    train_dir.mkdir(parents=True, exist_ok=True)

    stripped_clustered_model.save_weights(
        (train_dir / (FLAGS.model_architecture + "_clustered_ckpt")))

    # Test the model.
    test_data = audio_processor.get_data(audio_processor.Modes.TESTING)
    test_data = test_data.batch(FLAGS.batch_size)

    stripped_clustered_model.compile(
        optimizer=optimizer,
        loss=tf.keras.losses.SparseCategoricalCrossentropy(),
        metrics=['accuracy'])

    test_loss, test_acc = stripped_clustered_model.evaluate(x=test_data)
    print(f'Final test accuracy: {test_acc*100:.2f}%')