Пример #1
0
    def test_ignore_failure(self):
        with TempDirectory() as temp_dir:
            file1, file2 = self._write_audio_files_in_dir(temp_dir)
            with open(temp_dir + '/junk.wav', 'w') as f:
                f.write('junk, junk, junk. Not audio data!')

            with self.assertRaises(ToolkitError):
                tc.load_audio(temp_dir, ignore_failure=False)
            sf = tc.load_audio(temp_dir)

            self._assert_audio_sframe_correct(sf, file1, file2)
    def test_simple_case(self, random_order=False):
        with TempDirectory() as temp_dir:
            file1, file2 = self._write_audio_files_in_dir(temp_dir)

            sf = tc.load_audio(temp_dir, recursive=False, random_order=random_order)

            self._assert_audio_sframe_correct(sf, file1, file2)
Пример #3
0
    def test_no_path(self):
        with TempDirectory() as temp_dir:
            file1, file2 = self._write_audio_files_in_dir(temp_dir)

            sf = tc.load_audio(temp_dir, with_path=False)

            self.assertEqual(len(sf), 2)
            self.assertEqual(sorted(sf.column_names()), ['audio'])
Пример #4
0
    def test_recursive_dir(self):
        with TempDirectory() as temp_dir:
            file1 = temp_dir + '/1.wav'
            mkdir(temp_dir + '/foo')
            file2 = temp_dir + '/foo/2.wav'
            wavfile.write(file1, self.sample_rate1, self.noise1)
            wavfile.write(file2, self.sample_rate2, self.noise2)

            sf = tc.load_audio(temp_dir)

            self._assert_audio_sframe_correct(sf, file1, file2)
Пример #5
0
    def test_single_file(self):
        with TempDirectory() as temp_dir:
            file1, _ = self._write_audio_files_in_dir(temp_dir)

            sf = tc.load_audio(file1)

            self.assertEqual(len(sf), 1)
            self.assertEqual(sorted(sf.column_names()), ['audio', 'path'])

            # Check the audio file
            audio1 = sf.filter_by([file1], 'path')['audio'][0]
            self.assertEqual(audio1['sample_rate'], self.sample_rate1)
            self.assertTrue(all(audio1['data'] == self.noise1))
    def test_recursive_dir(self):
        from scipy.io import wavfile

        with TempDirectory() as temp_dir:
            file1 = temp_dir + "/1.wav"
            mkdir(temp_dir + "/foo")
            file2 = temp_dir + "/foo/2.wav"
            wavfile.write(file1, self.sample_rate1, self.noise1)
            wavfile.write(file2, self.sample_rate2, self.noise2)

            sf = tc.load_audio(temp_dir)

            self._assert_audio_sframe_correct(sf, file1, file2)
Пример #7
0
    async def _load_files(self):
        # load the audio data
        audio_data = tc.load_audio('../data/audio')

        audio_file_names = []
        audio_labels = []

        # load the audio file names and labels from mongo
        for a in self.db.labeledinstances.find({}):
            audio_file_names.append(a['filename'])
            audio_labels.append(a['label'])

        return audio_data, audio_file_names, audio_labels
Пример #8
0
def train():
    # Load the audio data and meta data.
    data = tc.load_audio('../AudioData')

    # Calculate the deep features just once.
    data['deep_features'] = tc.sound_classifier.get_deep_features(
        data['audio'])

    # Join the audio data and the meta data.
    emotions = 'neutral calm happy sad angry fearful disgust surprised'.split()
    data['label'] = data['path'].apply(
        lambda p: emotions[int(basename(p).split('-')[2]) - 1])

    # Make a train-test split, just use the first fold as our test set.
    train_set, test_set = data.random_split(0.8)

    # Create the model.
    batch_size = 128
    max_iterations = 100
    model = tc.sound_classifier.create(train_set,
                                       target='label',
                                       feature='deep_features',
                                       custom_layer_sizes=[512, 256, 128],
                                       batch_size=batch_size,
                                       max_iterations=max_iterations)

    # Evaluate the model and print the results
    metrics = model.evaluate(test_set)
    print(metrics)

    format_string = "%Y-%m-%d %H:%M:%S"
    time_stamp = int(time.time())
    time_array = time.localtime(time_stamp)
    str_date = time.strftime(format_string, time_array)
    fo = open(
        f'../Output/logs/batch_size_{batch_size}-max_iterations_{max_iterations}-{str_date}.txt',
        'x')
    fo.write(str(metrics))
    fo.close()

    # Save the model for later use in Turi Create
    model.save(MODEL_PATH)

    # Export for use in Core ML
    model.export_coreml(f'../Output/mlmodels/AudioEmotion_{str_date}.mlmodel')
Пример #9
0
    def post(self):
        '''Predict the class of a sent feature vector
        '''
        # get data from POST body
        data = json.loads(self.request.body.decode("utf-8"))

        ml_model_type = data['ml_model_type']
        vals = data['feature']
        fvals = [float(val) for val in vals]

        # save audio sample to disk as a wav file
        wf.write(f'../data/temp.wav', 44100, np.array(fvals))

        # load the model from the disk
        loaded_model = tc.load_model(
            f'../data/models/SoundClassification-{ml_model_type}.model')

        # load the audio data
        audio_data = tc.load_audio('../data/temp.wav')

        # predict the audio label
        predictions = loaded_model.predict(audio_data)
        self.write_json({"prediction": predictions[0]})
def predict(audio):
    model = tc.load_model(MODEL_PATH)
    data = tc.load_audio(audio)
    predictions = model.predict(data, 'probability_vector')
    return predictions
Пример #11
0
def predict(audio):
    model = tc.load_model(MODEL_PATH)
    data = tc.load_audio(audio)
    data['deep_features'] = tc.sound_classifier.get_deep_features(data['audio'])
    predictions = model.predict(data, 'probability_vector')
    return predictions
Пример #12
0
 def test_audio_classifier(self):
     with pytest.raises(
             ImportError,
             match=r".*pip install --force-reinstall turicreate==.*"):
         _tc.load_audio("./dummy/audio")
Пример #13
0
import turicreate as tc
from os.path import basename

# Set the number of GPUs used (in this case all)
# -1 -> Every GPU available
# 1 -> One GPU
# 0 -> NO GPU only CPU
tc.config.set_num_gpus(-1)

# Load the audio data and meta data.
data = tc.load_audio('/content/ESC-50/ESC-50-master/audio/')
meta_data = tc.SFrame.read_csv('/content/ESC-50/ESC-50-master/meta/esc50.csv')

# Join the audio data and the meta data.
data['filename'] = data['path'].apply(lambda p: basename(p))
data = data.join(meta_data)

# Drop all records which are not part of the ESC-10.
data = data.filter_by('True', 'esc10')

# Make a train-test split, just use the first fold as our test set.
test_set = data.filter_by(1, 'fold')
train_set = data.filter_by(1, 'fold', exclude=True)

# Create the model.
model = tc.sound_classifier.create(train_set,
                                   target='category',
                                   feature='audio',
                                   max_iterations=100,
                                   custom_layer_sizes=[200, 200])
Пример #14
0
import turicreate as tc
from os.path import basename

# Load the audio data and meta data.
data = tc.load_audio('audio-normalized/')
meta_data = tc.SFrame.read_csv('metadata.csv')

# Join the audio data and the meta data.
data['track-id'] = data['path'].apply(lambda p: basename(p))
data = data.join(meta_data)

# Drop all records which are not part of the ESC-10.
data = data.filter_by('TRUE', 'data')

# Make a train-test split, just use the first fold as our test set.
test_set = data.filter_by(5, 'fold')
train_set = data.filter_by(5, 'fold', exclude=True)

# Create the model.
model = tc.sound_classifier.create(train_set,
                                   target='floor',
                                   feature='audio',
                                   max_iterations=400,
                                   batch_size=64)

# Generate an SArray of predictions from the test set.
predictions = model.predict(test_set)

# Evaluate the model and print the results
metrics = model.evaluate(test_set)
print(metrics)