Exemplo n.º 1
0
def main():
    block_blob_service = BlockBlobService(account_name=azureStorgeAccountName,
                                          account_key=azureStorageKeyName)
    if (block_blob_service.exists(
            container_name=azureStorageContainer) == False):
        block_blob_service.create_container(
            container_name=azureStorageContainer)

    audio_path_label_pairs = load_path_labels(str(args.data_dir))
    print('loaded: ' + str(len(audio_path_label_pairs)) + ' files')

    classifier = Cifar10AudioClassifier()
    batch_size = 8
    epochs = 50
    classifier.fit(audio_path_label_pairs=audio_path_label_pairs,
                   model_dir_path='model',
                   batch_size=batch_size,
                   epochs=epochs)

    #zipping and uploading the compiled model to blob storage
    shutil.make_archive(base_name='model', format='zip', base_dir='model')
    block_blob_service.create_blob_from_path(
        azureStorageContainer,
        str(int(time.time() * 1000)) + 'model.zip', 'model.zip')

    # copying the model contents to the output directory
    os.system('cp -r ./model outputs')
Exemplo n.º 2
0
def predict(audio_path, model_dir_path):
    classifier = Cifar10AudioClassifier()
    classifier.load_model(model_dir_path)

    predicted_label_id = classifier.predict_class(audio_path)

    print(predicted_label_id)
Exemplo n.º 3
0
def main():

    classifier = Cifar10AudioClassifier()
    classifier.load_model(model_dir_path='./models')

    classifier.export_tensorflow_model(
        output_fld='./models/tensorflow_models/cifar10')
Exemplo n.º 4
0
def main():
    audio_path_label_pairs = load_audio_path_label_pairs()
    print('loaded: ', len(audio_path_label_pairs))

    classifier = Cifar10AudioClassifier()
    batch_size = 8
    epochs = 100
    history = classifier.fit(audio_path_label_pairs, model_dir_path='./models_music_speech', batch_size=batch_size, epochs=epochs)
Exemplo n.º 5
0
def main():
    audio_path_label_pairs = load_audio_path_label_pairs()
    print('loaded: ', len(audio_path_label_pairs))

    classifier = Cifar10AudioClassifier()
    batch_size = 128
    epochs = 200
    history = classifier.fit(
        audio_path_label_pairs,
        model_dir_path='/newvolume/keras-audio-master/demo/models',
        batch_size=batch_size,
        epochs=epochs)
def main():
    audio_path_label_pairs = load_audio_path_label_pairs()
    shuffle(audio_path_label_pairs)
    print('loaded: ', len(audio_path_label_pairs))

    classifier = Cifar10AudioClassifier()
    classifier.load_model(model_dir_path='./models_music_speech')

    for i in range(0, 20):
        audio_path, actual_label_id = audio_path_label_pairs[i]
        predicted_label_id = classifier.predict_class(audio_path)
        print(audio_path)
        predicted_label = gtzan_categories[predicted_label_id]
        actual_label = gtzan_categories[actual_label_id]

        print('predicted: ', predicted_label, 'actual: ', actual_label)
Exemplo n.º 7
0
def processFile(filename):
    classifier = Cifar10AudioClassifier()
    classifier.load_model(model_dir_path='./models')
    predicted_label_id = classifier.predict_class(filename)
    predicted_label = labels[predicted_label_id]
    predicted_scores = classifier.predict(filename)
    predicted_score = predicted_scores[predicted_label_id]

    data = {}
    data['label'] = predicted_label
    data['score'] = float(predicted_score)
    data['dateTime'] =  str(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
    data['modelDate'] = str(time.ctime(os.path.getmtime('models/cifar10-weights.h5')))
    data['fileProcessed'] = filename

    os.remove(filename + "-mg.npy")
    os.remove(filename)
    K.clear_session()
    return json.dumps(data)
Exemplo n.º 8
0
def main():
    audio_path_label_pairs = load_path_labels(str(args.data_dir))
    print('loaded: ' + str(len(audio_path_label_pairs)) + ' files')

    classifier = Cifar10AudioClassifier()
    batch_size = 8
    epochs = 50000
    classifier.fit(audio_path_label_pairs=audio_path_label_pairs,
                   model_dir_path='model',
                   batch_size=batch_size,
                   epochs=epochs)

    #zipping compiled model and copy back to mounted blob storage
    shutil.make_archive(base_name='model', format='zip', base_dir='model')
    os.system('cp -r model.zip ' + args.data_dir + '/' +
              str(int(time.time() * 1000)) + 'model.zip')

    # copying the model contents to the output directory
    os.system('cp -r ./model outputs')