Exemplo n.º 1
0
def lstm_example():
    to_flatten = False
    x_train, x_test, y_train, y_test, num_labels = extract_data(
        flatten=to_flatten)
    y_train = np_utils.to_categorical(y_train)
    y_test_train = np_utils.to_categorical(y_test)
    #train the model
    #    print('Starting LSTM')
    #    model = LSTM(input_shape=x_train[0].shape,
    #                 num_classes=num_labels)
    #    print('x shape', x_train[0].shape)
    #    print('num labels',num_labels)
    #
    #    model.train(x_train, y_train, x_test, y_test_train, n_epochs=50)
    #    model.evaluate(x_test, y_test)
    #    model.save_model()

    newmodel = LSTM(input_shape=x_train[0].shape, num_classes=num_labels)
    newmodel.load_model(to_load="./LSTM_best_model.h5")
    newmodel.train(x_train, y_train, x_test, y_test_train, n_epochs=0)

    audio_file = "../dataset/union-interview.wav"

    section_by_section_analysis(model=newmodel,
                                audio_file=audio_file,
                                to_flatten=to_flatten)
Exemplo n.º 2
0
def ml_example():
    to_flatten = True
    x_train, x_test, y_train, y_test, _ = extract_data(flatten=to_flatten)
    model = NN()
    print('Starting', model.name)
    model.train(x_train, y_train)
    model.evaluate(x_test, y_test)
    filename = '../dataset/Sad/09b03Ta.wav'
    print('prediction', model.predict_one(
        get_feature_vector_from_mfcc(filename, flatten=to_flatten)),
          'Actual 3')
Exemplo n.º 3
0
def ml_example():
    to_flatten = True
    x_train, x_test, y_train, y_test, _ = extract_data(flatten=to_flatten)
    model = RF()
    print('Starting', model.name)
    model.train(x_train, y_train)
    filename = 'rf_finalized_model.sav'
    pickle.dump(model, open(filename, 'wb'))
    model.evaluate(x_test, y_test)
    filename = '../AudioData/test--anger.wav'
    print(
        'prediction',
        model.predict_one(
            get_feature_vector_from_mfcc(filename, flatten=to_flatten)),
        'Actual: Anger')
Exemplo n.º 4
0
def lstm_example():
    to_flatten = False
    x_train, x_test, y_train, y_test, num_labels = extract_data(
        flatten=to_flatten)
    y_train = np_utils.to_categorical(y_train)
    y_test_train = np_utils.to_categorical(y_test)
    print('Starting LSTM')
    model = LSTM(input_shape=x_train[0].shape, num_classes=num_labels)
    # model.train(x_train, y_train, x_test, y_test_train, n_epochs=50)
    # model.evaluate(x_test, y_test)
    filename = 'dataset/Sad/09b03Ta.wav'
    print(
        'prediction',
        model.predict_one(
            get_feature_vector_from_mfcc(filename, flatten=to_flatten)),
        'Actual 3')
Exemplo n.º 5
0
def lstm_saved_model():
    to_flatten = False
    x_train, x_test, y_train, y_test, num_labels = extract_data(
        flatten=to_flatten)
    y_train = np_utils.to_categorical(y_train)
    y_test_train = np_utils.to_categorical(y_test)
    print('Starting LSTM')
    model = LSTM(input_shape=x_train[0].shape,
                 num_classes=num_labels, save_path="./lstm.h5")
    print("Loading")
    model.restore_model()
    # filename = '../dataset/03a01Wa.wav'
    filename = '../dataset/sad.wav'
    print("\nPredicted: {}\nActual: {}".format(
        get_class_name(
            model.predict_one(get_feature_vector_from_mfcc(filename, flatten=to_flatten))),
        get_class_name(3)))
Exemplo n.º 6
0
def ml_example():
    to_flatten = True
    x_train, x_test, y_train, y_test, _ = extract_data(flatten=to_flatten)
    model = NN()
    print('Starting', model.name)
    model.train(x_train, y_train)
    model.evaluate(x_test, y_test)
    filename = '../dataset/Sad/09b03Ta.wav'
    i = model.predict_one(
        get_feature_vector_from_mfcc(filename, flatten=to_flatten))
    if i == 0:
        print("Prediction: Angry")
    elif i == 1:
        print("Prediction: Happy")
    elif i == 2:
        print("Prediction: Neutral")
    else:
        print("Prediction: Sad")
Exemplo n.º 7
0
def cnn_example():
    to_flatten = False
    x_train, x_test, y_train, y_test, num_labels = extract_data(
        flatten=to_flatten)
    y_train = np_utils.to_categorical(y_train)
    y_test_train = np_utils.to_categorical(y_test)
    in_shape = x_train[0].shape
    x_train = x_train.reshape(x_train.shape[0], in_shape[0], in_shape[1], 1)
    x_test = x_test.reshape(x_test.shape[0], in_shape[0], in_shape[1], 1)
    model = CNN(input_shape=x_train[0].shape,
                num_classes=num_labels)
    model.train(x_train, y_train, x_test, y_test_train)
    model.evaluate(x_test, y_test)
    filename = '../dataset/Sad/09b03Ta.wav'
    print('prediction', model.predict_one(
        get_feature_vector_from_mfcc(filename, flatten=to_flatten)),
          'Actual 3')
    print('CNN Done')
def lstm_example():
    to_flatten = False
    x_train, x_test, y_train, y_test, num_labels = extract_data(
        flatten=to_flatten)
    y_train = np_utils.to_categorical(y_train)
    y_test_train = np_utils.to_categorical(y_test)
    print('Starting LSTM')
    model = LSTM(input_shape=x_train[0].shape, num_classes=num_labels)
    print('Model Object Created, Going to train Model')
    model.train(x_train, y_train, x_test, y_test_train, n_epochs=50)
    print('Model Object trained, Going to evaluate Model')
    model.evaluate(x_test, y_test)
    print('Model Object Evaludated, Going to import testing audio')
    filename = './dataset/Sad/09b03Ta.wav'
    print(
        'prediction',
        model.predict_one(
            get_feature_vector_from_mfcc(filename, flatten=to_flatten)),
        'Actual 3')
    print('Finishing LSTM')
Exemplo n.º 9
0
def lstm_example():
    to_flatten = False
    x_train, x_test, y_train, y_test, num_labels = extract_data(
        flatten=to_flatten)
    y_train = np_utils.to_categorical(y_train)
    y_test_train = np_utils.to_categorical(y_test)
    print('Starting LSTM')
    model = LSTM(input_shape=x_train[0].shape,
                 num_classes=num_labels, save_path="./lstm.h5")
    print("Training")
    model.train(x_train, y_train, x_test, y_test_train, n_epochs=10)
    print("Evaluating")
    model.evaluate(x_test, y_test)
    model.save_model()
    filename = '../dataset/Sad/09b03Ta.wav'
    # filename = './laugh.wav'
    print("\nPredicted: {}\nActual: {}".format(
        get_class_name(
            model.predict_one(
                get_feature_vector_from_mfcc(filename, flatten=to_flatten))),
        get_class_name(2)))
Exemplo n.º 10
0
import sys
sys.path.insert(1, '../main')

import pickle
import utility
from common import extract_data
from mlmodel import NN
from utility import get_feature_vector_from_mfcc

filename = 'finalized_model.sav'
loaded_model = pickle.load(open(filename, 'rb'))
x_train, x_test, y_train, y_test, _ = extract_data(flatten=True)
loaded_model.evaluate(x_test, y_test)
filename = "../AudioData/testcaleb.wav"
print(
    loaded_model.predict_one(
        get_feature_vector_from_mfcc(filename, flatten=True)), "actual:Angry")
                        required=True,
                        type=int)
    parser.add_argument('-r',
                        '--LEARNING_RATE',
                        help='learning rate',
                        required=True,
                        type=float)
    parser.add_argument('-a',
                        '--ALPHA',
                        help='regularization coefficient',
                        required=True,
                        type=float)

    args = parser.parse_args()

    commondir = args.scriptdir
    sys.path.append(commondir)
    from common.utils import *
    from common.extract_data import *

    # load data into Pandas dataframe
    data_dir = args.datadir
    if not os.path.exists(os.path.join(data_dir, 'energy.csv')):
        extract_data(data_dir)
    energy = load_data(data_dir)

    out_file = os.path.join(args.outdir, 'output.txt')

    run(energy, args.T, args.LATENT_DIM_1, args.LATENT_DIM_2, args.BATCH_SIZE,
        args.LEARNING_RATE, args.ALPHA, out_file)
def perceptron_example():
    """Perceptron example demonstrating online learning, and also evaluation
       separate from training."""
    tf.logging.set_verbosity(FLAGS.verbosity)

    train_raw, test_raw, classes = get_data(FLAGS.data_dir)

    # Set the output dimension according to the number of classes
    FLAGS.output_dim = len(classes)

    print("\nSplitting the training and test data into two pieces...")
    # Seeding necessary for reproducibility.
    np.random.seed(FLAGS.np_seed)

    # Shuffle data to make the distribution of classes roughly stratified after splitting.
    train_raw = shuffle(train_raw)
    test_raw = shuffle(test_raw)

    train1_raw, train2_raw = np.split(train_raw, 2)
    test1_raw, test2_raw = np.split(test_raw, 2)

    print("First split:")
    x_train1_sentences, y_train1, x_test1_sentences, y_test1 = extract_data(
        train1_raw, test1_raw)

    print("\nProcessing the vocabulary...")
    tic()
    x_train1, x_test1, _, _, vocab_processor, n_words = process_vocabulary(
        x_train1_sentences, x_test1_sentences, FLAGS, reuse=False)
    toc()

    # Train the model on the first split.
    tic()
    run_experiment(x_train1, y_train1, x_test1, y_test1,
                   bag_of_words_perceptron_model, 'train_and_evaluate', FLAGS)
    toc()

    # Next we perform incremental training with the 2nd half of the split data.
    print("\nSecond split extends the vocabulary.")
    x_train2_sentences, y_train2, x_test2_sentences, y_test2 = extract_data(
        train2_raw, test2_raw)

    # Extend vocab_processor with the newly added training vocabulary, and save the vocabulary processor for later use.
    tic()
    x_train2, x_test2, _, _, vocab_processor, n_words = process_vocabulary(
        x_train2_sentences,
        x_test2_sentences,
        FLAGS,
        reuse=False,
        vocabulary_processor=vocab_processor,
        extend=True)
    toc()

    # Train the model on the second split.
    tic()
    run_experiment(x_train2, y_train2, x_test2, y_test2,
                   bag_of_words_perceptron_model, 'train_and_evaluate', FLAGS)
    toc()

    # We may be interested in the model performance on the training data
    # (e.g. to evaluate removable bias).
    print("\nEvaluation of the model performance on the training data.:")
    run_experiment(None, None, x_train1, y_train1,
                   bag_of_words_perceptron_model, 'evaluate', FLAGS)