def __init__(self,
              keys,
              dir_name='/home/brihi16142/work2/processed_emovdb_disgust'):
     self.keys = keys
     self.path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                              dir_name)
     self.fnames, self.text_lengths, self.texts = read_metadata(
         os.path.join(self.path, 'transcript_bea.csv'))
     preprocess(dir_name, self)
     print('Generated mels and mags')
示例#2
0
def find_matches(samples, sr=RATE):
    """Finds fingerprint matches for the samples."""
    print('Finding matching fingerprints...')

    samples = preprocess(samples, sr)
    fingerprints = generate_fingerprints('Microphone',
                                         samples,
                                         sr=sr,
                                         plot=False)

    print('Looking up matches in database...')
    matches = lookup_fingerprints(fingerprints)
    print('{} Matches'.format(len(matches)))

    if len(matches) == 0:
        return None

    mapper = {}
    for f in fingerprints:
        mapper[f.hash] = f.time

    diffed_matches = []
    for f in matches:
        diffed_matches.append((f.song_id, f.time - mapper[f.hash]))

    return diffed_matches
示例#3
0
def read_audio_file(filename):
    """Read MP3 file into a Song object."""
    if not os.path.isfile(filename):
        print('{} does not exists'.format(filename))
        exit(1)

    # Read metadata from song
    meta = TinyTag.get(filename)

    # Read mp3 and save as tempoary wavfile
    ext = os.path.splitext(filename)[1].replace('.', '')
    song = AudioSegment.from_file(filename, ext)
    tmp_path = './tmp_{}'.format(os.path.basename(filename))
    song.export(tmp_path, format='wav')

    # Read and delete tempory wavefile
    samplerate, samples = wav.read(tmp_path)
    os.remove(tmp_path)

    samples = preprocess(samples, samplerate)

    s = Song(filename, meta, samples, samplerate)
    return s
示例#4
0
        sys.exit(0)
    else:
        dataset_file_path = os.path.join(datasets_path, dataset_file_name)
        if not os.path.isfile(dataset_file_path):
            url = "http://data.keithito.com/data/speech/%s" % dataset_file_name
            download_file(url, dataset_file_path)
        else:
            print("'%s' already exists" % dataset_file_name)

        print("extracting '%s'..." % dataset_file_name)
        os.system('cd %s; tar xvjf %s' % (datasets_path, dataset_file_name))

        # pre process
        print("pre processing...")
        lj_speech = LJSpeech([])
        preprocess(dataset_path, lj_speech)
elif args.dataset == 'mbspeech':
    dataset_name = 'MBSpeech-1.0'
    datasets_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                 'datasets')
    dataset_path = os.path.join(datasets_path, dataset_name)

    if os.path.isdir(dataset_path) and False:
        print("MBSpeech dataset folder already exists")
        sys.exit(0)
    else:
        bible_books = ['01_Genesis', '02_Exodus', '03_Leviticus']
        for bible_book_name in bible_books:
            bible_book_file_name = '%s.zip' % bible_book_name
            bible_book_file_path = os.path.join(datasets_path,
                                                bible_book_file_name)
示例#5
0
        sys.exit(0)
    else:
        dataset_file_path = os.path.join(datasets_path, dataset_file_name)
        if not os.path.isfile(dataset_file_path):
            url = "http://data.keithito.com/data/speech/%s" % dataset_file_name
            download_file(url, dataset_file_path)
        else:
            print("'%s' already exists" % dataset_file_name)

        print("extracting '%s'..." % dataset_file_name)
        os.system('cd %s; tar xvjf %s' % (datasets_path, dataset_file_name))

        # pre process
        print("pre processing...")
        lj_speech = LJSpeech([])
        preprocess(dataset_path, lj_speech)
elif args.dataset == 'mbspeech':
    dataset_name = 'MBSpeech-1.0'
    datasets_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                 'datasets')
    dataset_path = os.path.join(datasets_path, dataset_name)

    if os.path.isdir(dataset_path) and False:
        print("MBSpeech dataset folder already exists")
        sys.exit(0)
    else:
        bible_books = ['01_Genesis', '02_Exodus', '03_Leviticus']
        for bible_book_name in bible_books:
            bible_book_file_name = '%s.zip' % bible_book_name
            bible_book_file_path = os.path.join(datasets_path,
                                                bible_book_file_name)
示例#6
0
    """ Gives an answer. """
    box = driver.find_element_by_id("qpAnswerInput")
    box.send_keys(ans)
    box.send_keys(Keys.RETURN)


if __name__ == "__main__":
    driver = login()
    enter_game(driver, input("Room name? "), int(input("Room number? ")),
               input("Room password? "))
    while True:
        try:
            block_recording(driver)
            print("starting recording...")
            data = audio.record(LEN)
            audio.sd.wait()  # block on the recording
            print("processing...")
            vol1, clip = audio.preprocess(data)
            ans = main.find_song(vol1, clip, VERBOSE)
            if audio.np.max(clip) == 128:  # 0 is at 128 because of the scaling
                print("Clip is silent. Are you sure loopback is working?")
            answer(driver, ans)
        except KeyboardInterrupt:
            driver.quit()
            exit()
        except Exception as e:
            print(e)
            ans = input("quit driver?\n")
            if len(ans) > 0 and ans[0] == "y":
                driver.quit()
示例#7
0
args = parser.parse_args()

if args.dataset == 'ljspeech':
    dataset_file_name = 'LJSpeech-1.1.tar.bz2'
    datasets_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                 'datasets')
    dataset_path = os.path.join(datasets_path, 'LJSpeech-1.1')

    if os.path.isdir(dataset_path) and False:
        print("LJSpeech dataset folder already exists")
        sys.exit(0)
    else:
        dataset_file_path = os.path.join(datasets_path, dataset_file_name)
        if not os.path.isfile(dataset_file_path):
            url = "http://data.keithito.com/data/speech/%s" % dataset_file_name
            download_file(url, dataset_file_path)
        else:
            #print(os.system('pwd'))
            #os.system('ls -al')
            print("'%s' already exists" % dataset_file_name)

        print("extracting '%s'..." % dataset_file_name)
        #os.system('cd %s; tar xvjf %s' % (datasets_path, dataset_file_name))
        os.system('cd datasets')
        os.system('tar xvjf %s' % (dataset_file_name))

        # pre process
        print("pre processing...")
        lj_speech = LJSpeech([])
        preprocess(dataset_path, lj_speech)
示例#8
0
        sys.exit(0)
    else:
        dataset_file_path = os.path.join(datasets_path, dataset_file_name)
        if not os.path.isfile(dataset_file_path):
            url = "http://data.keithito.com/data/speech/%s" % dataset_file_name
            download_file(url, dataset_file_path)
        else:
            print("'%s' already exists" % dataset_file_name)

        print("extracting '%s'..." % dataset_file_name)
        os.system('cd %s; tar xvjf %s' % (datasets_path, dataset_file_name))

        # pre process
        print("pre processing...")
        lj_speech = LJSpeech([])
        preprocess(dataset_path, lj_speech)
elif args.dataset == 'mbspeech':
    dataset_name = 'MBSpeech-1.0'
    datasets_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                 'datasets')
    dataset_path = os.path.join(datasets_path, dataset_name)

    if os.path.isdir(dataset_path) and False:
        print("MBSpeech dataset folder already exists")
        sys.exit(0)
    else:
        bible_books = ['01_Genesis', '02_Exodus', '03_Leviticus']
        for bible_book_name in bible_books:
            bible_book_file_name = '%s.zip' % bible_book_name
            bible_book_file_path = os.path.join(datasets_path,
                                                bible_book_file_name)
from audio import preprocess

voice = 'LJ'
preprocess(f'datasets/{voice}')
print("done")
        sys.exit(0)
    else:
        dataset_file_path = os.path.join(datasets_path, dataset_file_name)
        if not os.path.isfile(dataset_file_path):
            url = "http://data.keithito.com/data/speech/%s" % dataset_file_name
            download_file(url, dataset_file_path)
        else:
            print("'%s' already exists" % dataset_file_name)

        print("extracting '%s'..." % dataset_file_name)
        os.system('cd %s; tar xvjf %s' % (datasets_path, dataset_file_name))

        # pre process
        print("pre processing...")
        lj_speech = LJSpeech([])
        preprocess(dataset_path, lj_speech)
elif args.dataset == 'en_uk':
    dataset_file_name = 'en_UK.tgz'
    datasets_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '')
    dataset_path = os.path.join(datasets_path, 'en_UK')

    if os.path.isdir(dataset_path) and False:
        print("en_UK dataset folder already exists")
        sys.exit(0)
    else:
        dataset_file_path = os.path.join(datasets_path, dataset_file_name)
        if not os.path.isfile(dataset_file_path):
            url = "http://data.m-ailabs.bayern/data/Training/stt_tts/%s" % dataset_file_name
            download_file(url, dataset_file_path)
        else:
            print("'%s' already exists" % dataset_file_name)
import os
import sys
import csv
import time
import argparse
import fnmatch
import librosa
import pandas as pd

from hparams import HParams as hp
from zipfile import ZipFile
from audio import preprocess
from utils import download_file
from datasets.np_speech import NPSpeech

parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--dataset", required=True, choices=['NPSpeech'], help='dataset name')
args = parser.parse_args()

args.dataset == 'NPSpeech':
dataset_name = 'NPSpeech-1.0'
datasets_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'datasets')
dataset_path = os.path.join(datasets_path, dataset_name)

   

# pre process
print("pre processing...")
np_speech = NPSpeech([])
preprocess(dataset_path, np_speech)