示例#1
0
def read_phones(phone_file_path):
    """
    Read .PHN file and return a compressed sequence of phones
    :param phone_file_path: path of .PHN file
    :param replacement: phones which are to be collapsed
    :return: a list of (phone, start_frame, end_frame)
    """
    labels = []

    with open(phone_file_path, 'r') as f:
        a = f.readlines()

    for phone in a:
        s_e_i = phone[:-1].split(
            ' ')  # start, end, phenome_name e.g. 0 5432 'aa'
        _, _, ph = int(s_e_i[0]), int(s_e_i[1]), s_e_i[2]
        # Collapse
        for father, son in utils.replacement_dict().items():
            if ph in son:
                ph = father
                break
        # Append to list
        labels.append(ph)

    return labels
示例#2
0
    def __init__(self, type_, config_file):

        self.config = config_file
        self.mode = type_
        self.db_path = config_file['dir']['dataset']

        # fold phones in list to the phone which is the key e.g. 'ao' is 'collapsed' into 'aa'
        self.replacement = utils.replacement_dict()

        feature_dim = self.config['n_fbank'] + self.config['n_mfcc']
        self.pkl_name = self.db_path + self.mode + '_rnn_ctc_' + str(
            feature_dim) + '.pkl'

        self.win_len, self.win_step = config_file['window_size'], config_file[
            'window_step']
示例#3
0
def read_grtruth(filepath):
    # phones to be collapsed
    replacement = utils.replacement_dict()

    gr_phones = []
    with open(filepath, 'r') as f:
        a = f.readlines()
    for phenome in a:
        s_e_i = phenome[:-1].split(
            ' ')  # start, end, phenome_name e.g. 0 5432 'aa'
        start, end, ph = int(s_e_i[0]), int(s_e_i[1]), s_e_i[2]

        # collapse into father phone
        for father, list_of_sons in replacement.items():
            if ph in list_of_sons:
                ph = father
                break
        gr_phones.append(ph)

    return gr_phones
示例#4
0
import json
from read_yaml import read_yaml
import utils
from utils import listdir
import torch
from extract_q_values import find_batch_q
import scipy.io.wavfile as wav
import pickle
import os
import time
import numpy as np
from dl_model import dl_model
from hypo_search import generate_lattice, traverse_best_lattice, find_q_values

replacement = utils.replacement_dict()


# Ignore DS_Store files found on Mac
def listdir(pth):
    return [x for x in os.listdir(pth) if x != '.DS_Store']


def word_distribution(base_pth):

    words = {}

    for dialect in sorted(listdir(base_pth)):

        for speaker_id in sorted(listdir(os.path.join(base_pth, dialect))):

            data = sorted(
示例#5
0
    def __init__(self, mode):

        # Read config fielewhich contains parameters
        self.config = read_yaml()
        self.mode = mode

        if self.config['rnn'] == 'liGRU':
            from ligru import liGRU as Model
        elif self.config['rnn'] == 'GRU' or self.config['rnn'] == 'LSTM':
            from rnn import RNN as Model
        elif self.config['rnn'] == 'TCN':
            from tcnn import TCN as Model
        elif self.config['rnn'] == 'BTCN':
            from tcnn import bidirectional_TCN as Model
        elif 'custom' in self.config['rnn']:
            from rnn import customRNN as Model
        else:
            print("Model import failed")
            exit(0)

        # Architecture name decides prefix for storing models and plots
        feature_dim = self.config['n_fbank'] + self.config['n_mfcc']
        self.arch_name = '_'.join([
            self.config['rnn'],
            str(self.config['num_layers']),
            str(self.config['hidden_dim']),
            str(feature_dim)
        ])

        print("Architecture:", self.arch_name)
        # Change paths for storing models
        self.config['dir']['models'] = self.config['dir']['models'].split(
            '/')[0] + '_' + self.arch_name + '/'
        self.config['dir']['plots'] = self.config['dir']['plots'].split(
            '/')[0] + '_' + self.arch_name + '/'

        # Make folders if DNE
        if not os.path.exists(self.config['dir']['models']):
            os.mkdir(self.config['dir']['models'])
        if not os.path.exists(self.config['dir']['plots']):
            os.mkdir(self.config['dir']['plots'])
        if not os.path.exists(self.config['dir']['pickle']):
            os.mkdir(self.config['dir']['pickle'])

        self.cuda = (self.config['cuda'] and torch.cuda.is_available())

        # load/initialise metrics to be stored and load model
        if mode == 'train' or mode == 'test':

            self.plots_dir = self.config['dir']['plots']
            # store hyperparameters
            self.total_epochs = self.config['train']['epochs']
            self.test_every = self.config['train']['test_every_epoch']
            self.test_per = self.config['train']['test_per_epoch']
            self.print_per = self.config['train']['print_per_epoch']
            self.save_every = self.config['train']['save_every']
            self.plot_every = self.config['train']['plot_every']

            # declare model

            # dataloader which returns batches of data
            self.train_loader = timit_loader('train', self.config)
            self.test_loader = timit_loader('test', self.config)
            self.model = Model(self.config, mode)

            self.start_epoch = 1
            self.edit_dist = []
            self.train_losses, self.test_losses = [], []

        else:

            self.model = Model(self.config, mode)

        if self.cuda:
            self.model.cuda()

        # resume training from some stored model
        if self.mode == 'train' and self.config['train']['resume']:
            self.start_epoch, self.train_losses, self.test_losses, self.edit_dist = self.model.load_model(
                mode, self.model.rnn_name, self.model.num_layers,
                self.model.hidden_dim)
            self.start_epoch += 1

        # load best model for testing/feature extraction
        elif self.mode == 'test' or mode == 'test_one':
            self.model.load_model(mode, self.config['rnn'],
                                  self.model.num_layers, self.model.hidden_dim)

        # Replacement phones
        self.replacement = utils.replacement_dict()