Exemple #1
0
    def __init__(self, input_stream, pdf_stream, phone_stream, exp_dir,
                 phone_map_fname, pfeats_lang, tmp_dir, batch_size):
        self.pfeats_map = PFeatsMap(phone_map_fname, pfeats_lang)
        self.exp_dir = exp_dir

        input_it = kio.read_mat_ark(input_stream)
        pdf_dict = {k:v for k,v in kio.read_ali_ark(pdf_stream)}
        phone_dict = {k:v for k,v in kio.read_ali_ark(phone_stream)}

        print("Loading data ", tmp_dir)
        gi.MultiOutDataLoader.__init__(self, input_it, (pdf_dict, phone_dict),
                                       tmp_dir, batch_size)
        print("Data loaded", tmp_dir)
Exemple #2
0
    def __init__(self, input_stream, output_stream, exp_dir, tmp_dir,
                 batch_size):
        self.exp_dir = exp_dir

        input_it = kio.read_mat_ark(input_stream)
        output_dict = {k: v for k, v in kio.read_ali_ark(output_stream)}

        gi.DataLoader.__init__(self, input_it, output_dict, tmp_dir,
                               batch_size)
    def __init__(self, input_stream, output_stream, exp_dir, phone_map_fname,
                 pfeats_lang, tmp_dir, batch_size):
        self.pfeats_map = PFeatsMap(phone_map_fname, pfeats_lang)

        input_it = kio.read_mat_ark(input_stream)
        output_dict = {k: v for k, v in kio.read_ali_ark(output_stream)}

        gi.DataLoader.__init__(self, input_it, output_dict, tmp_dir,
                               batch_size)
    def __init__(self, input_stream, output_stream, flags_stream, phone_map_fname,
                 tmp_dir, batch_size):
        self.phone_map = read_phone_map(phone_map_fname)

        input_it = kio.read_mat_ark(input_stream)
        output_dict = {k: v for k, v in kio.read_ali_ark(output_stream)}
        self.flags_dict = {k: v for k, v in kio.read_vec_int_ark(flags_stream)}

        self.flags = None

        gi.DataLoader.__init__(self, input_it, output_dict, tmp_dir, batch_size)
Exemple #5
0
    def get_kaldi_data(self):
        """Gets kaldi tags, features and alignments."""
        tags = []
        mats = []
        sizes = []
        l_dict = {}
        skipped = 0
        missing_labels = []
        if self.options.features.endswith(".scp"):
            f_reader = kaldi_io.read_mat_scp
        else:
            f_reader = kaldi_io.read_mat_ark
        if self.options.labels:
            for tag, l in kaldi_io.read_ali_ark(self.options.labels):
                if tag not in l_dict:
                    l_dict[tag] = l
                else:
                    raise KeyError("Tag is: " + tag + "is already present.")

        for tag, mat in f_reader(self.options.features):
            if not self.options.forward_pass and tag not in l_dict:
                skipped += 1
                missing_labels.append(tag)
                continue
            tags.append(tag)
            mats.append(mat)
            sizes.append(len(mat))

            if not self.options.forward_pass:
                if len(mat) != len(l_dict[tag]):
                    raise ValueError("Labels lengths does not match data length.")
        if self.options.resequence:
            # re-sequencing should be used only in the training mode
            # not in forward pass
            self.tags = []
            self.mats = []
            self.l_dict = {}
            del(sizes)
            for index in xrange(len(mats)):
                mat = mats.pop()
                tag = tags.pop()
                self.split_sequence(mat, tag, l_dict[tag])
                del(mat)
                del(l_dict[tag])
                del(tag)
        else:
            self.tags = tags
            self.mats = mats
            self.l_dict = l_dict
        if skipped != 0:
            print("Missing labels: " + str(skipped))
            print(missing_labels)
Exemple #6
0
def compute_priors(feats_stream, exp_dir) -> np.array:
    """Compute prior probabilities of pdf states"""
    feats_dim = output_feat_dim(exp_dir)
    counts = np.zeros(feats_dim)

    # Count pdf states
    for utt, feats in kio.read_ali_ark(feats_stream):
        for feat in feats:
            counts[feat] += 1

    # Compute priors
    priors = counts / np.sum(counts)

    # Floor zero values
    priors[priors == 0] = 1e-5

    return priors
Exemple #7
0
    def extract_alignments(self, mode='train'):

        if mode == 'train':
            self.convert_alignments(mode='train')
            all_aligns = sorted(glob.glob(self.align_train_root +
                                          '/*_ali.txt'))
        elif mode == 'test':
            self.convert_alignments(mode='test')
            all_aligns = sorted(glob.glob(self.align_test_root + '/*_ali.txt'))
        else:
            print('Unknown mode')
            sys.exit()

        create_store = os.path.join(self.store_aligns, mode)
        if not os.path.exists(create_store):
            os.makedirs(create_store)
        for align_file in all_aligns:
            for key, mat in kaldi_io.read_ali_ark(align_file):
                save_path = os.path.join(create_store, key + '.npy')
                np.save(save_path, mat)
                print('Saving alignments for {}'.format(save_path))
parser.add_argument('eval_dir')
args = parser.parse_args()

os.makedirs(args.eval_dir, exist_ok=True)
model_fname = os.path.join(args.exp_dir, 'final.h5')
phn_map_fname = os.path.join(args.exp_dir, 'phn_sil_to_idx.int')
score_fname = os.path.join(args.eval_dir, 'score.ark')
score_txt_fname = os.path.join(args.eval_dir, 'score.txt')
score_txt = open(score_txt_fname, 'w')

# Load phone map
phn_map = read_phone_map(phn_map_fname)

# Load kaldi files
flags_dict = {k: v for k, v in kio.read_vec_int_ark(args.flags)}
ali_dict = {k: v for k, v in kio.read_ali_ark(args.phones)}
feats_it = kio.read_mat_ark(args.feats)

# Load classifier model
model = MispModel.load(model_fname)

with open(score_fname, 'wb') as f:
    for utt, feats in feats_it:
        # Workaround to deal with missing alignments
        if utt not in ali_dict:
            continue

        print(utt, end=' ', file=score_txt)
        flags = flags_dict[utt]
        ali = ali_dict[utt]
        scores = np.zeros_like(flags, dtype=float)
def kaldi_alignment_to_numpy_dataset(
        featark=None, featnj=1,
        aliark=None, alinj=1,
        corpus=None, split_ratio=[8, 2], split_name=['train', 'valid'],
        left_context=5, right_context=5
):
    keys = []
    feats = []
    for i in range(1, featnj + 1):
        curfeatark = featark.replace('JOB', str(i)).replace('NJ', str(featnj))
        for key, feat in kaldi_io.read_mat_ark(curfeatark):
            keys.append(key)
            feats.append(feat)
        cur_dataset_feats = pd.DataFrame(data=feats, index=keys, columns=['feats'])
        keys = []
        feats = []
        if i == 1:
            dataset_feats = cur_dataset_feats
        else:
            dataset_feats = dataset_feats.append(cur_dataset_feats)
            print dataset_feats.shape

    dataset_feats = dataset_feats.sort_index()

    keys = []
    labs = []
    for i in range(1, alinj + 1):
        curaliark = aliark.replace('JOB', str(i))
        for key, lab in kaldi_io.read_ali_ark(curaliark):
            keys.append(key)
            labs.append(lab.reshape((lab.shape[0], 1)))
        cur_dataset_labs = pd.DataFrame(data=labs, index=keys, columns=['labs'])
        keys = []
        labs = []
        if i == 1:
            dataset_labs = cur_dataset_labs
        else:
            dataset_labs = dataset_labs.append(cur_dataset_labs)
            print dataset_labs.shape

    dataset_labs = dataset_labs.sort_index()

    print dataset_feats.shape
    print dataset_labs.shape
    dataset = pd.concat([dataset_feats, dataset_labs], axis=1)

    df_list = df_data_split(dataset=dataset, split_ratio=split_ratio, shuffle=False)

    if os.path.isdir('./%s'%corpus):
        print 'Corpus dir exists..'
    else:
        print 'Create new corpus dir..'
        os.makedirs('./%s'%corpus)

    for i in range(len(df_list)):
        cur_feat_npy, cur_lab_npy = df_to_npy(df=df_list[i], left_context=left_context, right_context=right_context)
        shu_feat_npy, shu_lab_npy = npy_shuffle(feats_npy=cur_feat_npy, labs_npy=cur_lab_npy)

        print 'Saving npys...'
        f_feat = './%s/%s_feat_l%d_r%d' % (corpus, split_name[i], left_context, right_context)
        f_lab = './%s/%s_lab_l%d_r%d' % (corpus, split_name[i], left_context, right_context)
        np.save(file=f_feat, arr=shu_feat_npy)
        np.save(file=f_lab, arr=shu_lab_npy)
# Prepere files
os.makedirs(args.res_dir, exist_ok=True)
feats_fd = sys.stdin.buffer
flags_fname = os.path.join(args.data_dir, 'text_ext_flags')
ali_force_frame_fname = os.path.join(args.cbps_dir,
                                     'force_ali_test/ali_frames.gz')
ali_force_fname = os.path.join(args.cbps_dir, 'force_ali_test/ali_pdf.gz')
score_fname = os.path.join(args.res_dir, 'score.ark')
score_txt_fname = os.path.join(args.res_dir, 'score.txt')
score_txt = open(score_txt_fname, 'w')
cmp_fd = open('test/cmp_gop.txt', 'w')

# Load kaldi files
flags_it = kio.read_vec_int_ark(flags_fname)
ali_force_it = kio.read_ali_ark(ali_force_fname)
ali_force_frm_it = kio.read_ali_ark(ali_force_frame_fname)
feats_it = kio.read_mat_ark(feats_fd)

with open(score_fname, 'wb') as f:
    for flags_t, ali_force_t, ali_force_frm_t, feats_t in zip(
            flags_it, ali_force_it, ali_force_frm_it, feats_it):
        # Unpack each tuple
        utt, flags = flags_t
        _, ali_force = ali_force_t
        _, ali_force_frm = ali_force_frm_t
        _, feats = feats_t

        # Get only features for corresponding states in alignments
        probs_force = hlp.np_pick(feats, ali_force)
Exemple #11
0
import kaldi_io
import numpy as np

post_path = sys.argv[1]
like_path = sys.argv[2]
pdfali_path = sys.argv[3]
phoneali_path = sys.argv[4]
sil_id_not_bigger_than = int(sys.argv[5])
frame_score_dest = sys.argv[6]
phone_score_dest = sys.argv[7]
score_dest = sys.argv[8]

# read in
posts = {key: mat for key, mat in kaldi_io.read_mat_ark(post_path)}
likes = {key: mat for key, mat in kaldi_io.read_mat_ark(like_path)}
pdfalis = {key: vec for key, vec in kaldi_io.read_ali_ark(pdfali_path)}
phonealis = {key: vec for key, vec in kaldi_io.read_ali_ark(phoneali_path)}

f_f = open(frame_score_dest, 'w')
f_p = open(phone_score_dest, 'w')
f = open(score_dest, 'w')
#f.write('wav_id    gop_posterior    gop_likelihood    gop_likelihood_ratio\n')

for key in pdfalis.keys():
    post = posts[key]
    like = likes[key]
    pdfali = pdfalis[key]
    phoneali = phonealis[key]

    # gop based on log phone posterior
    post_best = list(post[np.arange(len(pdfali)), pdfali])
 def read_ali(ali):
     filename = ali #'/home/jiaqi/OKR/ensembleVGG/ali/ali.ark'
     return {k: m for k, m in kaldi_io.read_ali_ark(filename)}