Exemplo n.º 1
0
def export2kaldi(databyid):
    kaldidata = databyid
    filter_field = ('Click1', 'Hush', 'Click2', 'Click3')  #, 'LongPress')
    #exclude_field = ('LongPress')
    test_size = 0.2
    trainset = []
    testset = []
    filtered_kaldidata = {}
    for k, v in kaldidata.items():
        label = k.split('_')[0]
        if label in filter_field:
            #if label not in exclude_field:
            if label not in filtered_kaldidata:
                filtered_kaldidata[label] = []
            filtered_kaldidata[label].append((k, v))
    for label in filtered_kaldidata:
        length = len(filtered_kaldidata[label])
        random.shuffle(filtered_kaldidata[label])
        testset.extend(filtered_kaldidata[label][:int(length * test_size)])
        trainset.extend(filtered_kaldidata[label][int(length * test_size):])
    with kaldi_io.open_or_fd('feats_train.ark', 'wb') as f:
        for k, m in trainset:
            kaldi_io.write_mat(f, m, k)
    with kaldi_io.open_or_fd('feats_test.ark', 'wb') as f:
        for k, m in testset:
            kaldi_io.write_mat(f, m, k)
Exemplo n.º 2
0
def make_feature(wav_path_list,
                 noise_wav_list,
                 feat_dir,
                 thread_num,
                 argument=False,
                 repeat_num=1):
    mag_ark_scp_output = 'ark:| copy-feats --compress=true ark:- ark,scp:{0}/feats{1}.ark,{0}/feats{1}.scp'.format(
        feat_dir, thread_num)
    ang_ark_scp_output = 'ark:| copy-feats --compress=true ark:- ark,scp:{0}/angles{1}.ark,{0}/angles{1}.scp'.format(
        feat_dir, thread_num)
    if argument:
        fwrite = open(os.path.join(feat_dir, 'db' + str(thread_num)), 'a')
    f_mag = kaldi_io.open_or_fd(mag_ark_scp_output, 'wb')
    f_ang = kaldi_io.open_or_fd(ang_ark_scp_output, 'wb')
    print("进入num循环")
    for num in range(repeat_num):
        for tmp in wav_path_list:
            uttid, wav_path = tmp
            clean = load_audio(wav_path)
            y = None
            print("argument = ", argument)
            while y is None:
                if argument:
                    print("argument=True")
                    noise_path = choice(noise_wav_list)
                    n = load_audio(noise_path[0])
                    db = np.random.uniform(low=0, high=20)
                    y = MakeMixture(clean, n, db)
                    uttid_new = uttid + '__mix{}'.format(num)
                    print(uttid_new + ' ' + str(db) + '\n')
                    fwrite.write(uttid_new + ' ' + str(db) + '\n')
                else:

                    y = clean
                    uttid_new = uttid
            # STFT
            print("y = ", y)
            if y is not None:
                D = librosa.stft(y,
                                 n_fft=512,
                                 hop_length=256,
                                 win_length=512,
                                 window=scipy.signal.hamming)
                spect = np.abs(D)
                angle = np.angle(D)
                print("创建STFT")
                ##feat = np.concatenate((spect, angle), axis=1)
                ##feat = feat.transpose((1, 0))
                kaldi_io.write_mat(f_mag,
                                   spect.transpose((1, 0)),
                                   key=uttid_new)
                kaldi_io.write_mat(f_ang,
                                   angle.transpose((1, 0)),
                                   key=uttid_new)
            else:
                print(noise_path, tmp, 'error')

    if argument:
        fwrite.close()
Exemplo n.º 3
0
    def testFloatVectorReadWrite(self):
        """
        Test read/write for float vectors.
        """
        # read,
        flt_vec = {
            k: v
            for k, v in kaldi_io.read_vec_flt_scp('tests/data/conf.scp')
        }  # scp,
        return

        flt_vec2 = {
            k: v
            for k, v in kaldi_io.read_vec_flt_ark('tests/data/conf.ark')
        }  # binary-ark,
        flt_vec3 = {
            k: v
            for k, v in kaldi_io.read_vec_flt_ark('tests/data/conf_ascii.ark')
        }  # ascii-ark,
        # store,
        with kaldi_io.open_or_fd('tests/data_re-saved/conf.ark', 'wb') as f:
            for k, v in flt_vec.items():
                kaldi_io.write_vec_flt(f, v, k)
        # read and compare,
        for k, v in kaldi_io.read_vec_flt_ark('tests/data_re-saved/conf.ark'):
            self.assertTrue(np.array_equal(v, flt_vec[k]),
                            msg="flt. vector same after re-saving")
Exemplo n.º 4
0
def scp2dict(ipath2scp):
    fd = kaldi_io.open_or_fd(ipath2scp)  # ipath2scp can be a pipeline
    id2path = {}
    for line in fd:
        (id, path) = line.decode("utf-8").rstrip().split(' ', 1)
        id2path[id] = path
    return id2path
Exemplo n.º 5
0
 def testMatrixReadWrite(self):
     """
     Test read/write for float matrices.
     """
     # read,
     flt_mat = {
         k: m
         for k, m in kaldi_io.read_mat_scp('tests/data/feats_ascii.scp')
     }  # ascii-scp,
     flt_mat2 = {
         k: m
         for k, m in kaldi_io.read_mat_ark('tests/data/feats_ascii.ark')
     }  # ascii-ark,
     flt_mat3 = {
         k: m
         for k, m in kaldi_io.read_mat_ark('tests/data/feats.ark')
     }  # ascii-ark,
     # store,
     with kaldi_io.open_or_fd('tests/data_re-saved/mat.ark', 'wb') as f:
         for k, m in flt_mat3.items():
             kaldi_io.write_mat(f, m, k)
     # read and compare,
     for k, m in kaldi_io.read_mat_ark('tests/data_re-saved/mat.ark'):
         self.assertTrue(np.array_equal(m, flt_mat3[k]),
                         msg="flt. matrix same after re-saving")
Exemplo n.º 6
0
def read_plda(file_or_fd):
    """ Loads PLDA from a file in kaldi format (binary or text).
    Input:
        file_or_fd - file name or file handle with kaldi PLDA model.
    Output:
        Tuple (mu, tr, psi) defining a PLDA model using the kaldi parametrization:
        mu  - mean vector
        tr  - transform whitening within- and diagonalizing across-class covariance matrix
        psi - diagonal of the across-class covariance in the transformed space
    """
    fd = open_or_fd(file_or_fd)
    try:
        binary = fd.read(2)
        if binary == b'\x00B':
            assert (fd.read(7) == b'<Plda> ')
            plda_mean = _read_vec_binary(fd)
            plda_trans = _read_mat_binary(fd)
            plda_psi = _read_vec_binary(fd)
        else:
            assert (binary + fd.read(5) == b'<Plda> ')
            plda_mean = np.array(fd.readline().strip(' \n[]').split(),
                                 dtype=float)
            assert (fd.read(2) == b' [')
            plda_trans = _read_mat_ascii(fd)
            plda_psi = np.array(fd.readline().strip(' \n[]').split(),
                                dtype=float)
        assert (fd.read(8) == b'</Plda> ')
    finally:
        if fd is not file_or_fd:
            fd.close()
    return plda_mean, plda_trans, plda_psi
Exemplo n.º 7
0
def main():
    print("start time:",
          time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
    model = load_model()
    ext = ExtractEmbedding(model, "layer4")
    dir = "/home/work_nfs4_ssd/hzhao/feature/voxceleb1/test/feats.scp"
    dataset = getTestData(dir)
    batch_size = 256
    test_loader = DataLoader(dataset=dataset,
                             batch_size=batch_size,
                             shuffle=False)
    for i, trainset in enumerate(test_loader):
        (input_x, target_var) = trainset
        input_x = input_x.cuda()
        #print(input_x.size(),type(input_x))
        output = ext(input_x)
        output = output.squeeze()
        output = output.cpu()
        #tensor to numpy
        output = output.detach().numpy()
        #print(":::",len(target_var))
        target_var = np.squeeze(target_var)
        #tmp="/home/work_nfs/lizhang/node6/kaldi02/kaldi02/egs/voxceleb/v2/tmp"
        filename = "./test/99_test/enroll/xvector." + str(i) + ".ark"

        f = kaldi_io.open_or_fd(filename, "wb")

        for i, uttid in enumerate(target_var):

            kaldi_io.write_vec_flt(f, output[i], key=uttid)

    print("end time:",
          time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
Exemplo n.º 8
0
def fetch_audio_segment(wavid, ipath2wav_scp, seg=(0.0, math.inf), fs=16000):
    """
    given wavid, return an audio segment from ipath2wav_scp

    args:  wavid       -- string, id of a audio file
           ipath2wav_scp -- the path to wav.scp 
                            (wav.scp has the same format as kaldi's:
                              each row of the file is <wavid> <wavpath>)
           seg          -- a tuple of float, (start_time, end_time)
           fs           -- sampling frequency

    return: y -- audio samples with numpy format
            fs -- sampling frequency
  """
    fd = kaldi_io.open_or_fd(ipath2wav_scp)
    for line in fd:
        (wid, path) = line.decode("utf-8").rstrip().split(' ', 1)
        if wavid == wid:
            y, fs = kaldi_read_wav(path)
            start_t, end_t = seg
            end_t = min(end_t,
                        y.shape[0] / fs)  # the second term is float by default
            assert start_t < end_t and start_t >= 0.0, "InputArg: seg {0} invalid".format(
                str(seg))
            # return the segment and fs
            return y[int(start_t * fs):int(end_t * fs)], fs
    #  wavid not found
    raise Exception("wavid: {0} not found in file {1}".format(
        wavid, ipath2wav_scp))
Exemplo n.º 9
0
def forward_dct(args,
                cpc_model,
                device,
                data_loader,
                output_ark,
                output_scp,
                dct_dim=24):
    ''' forward with dct '''

    logger.info("Starting Forward Passing")
    cpc_model.eval()  # not training cdc model

    ark_scp_output = 'ark:| copy-feats --compress=true ark:- ark,scp:' + output_ark + ',' + output_scp
    with torch.no_grad():
        with ko.open_or_fd(ark_scp_output, 'wb') as f:
            for [utt_id, data] in data_loader:
                data = data.float().unsqueeze(1).to(
                    device)  # add channel dimension
                data = data.contiguous()
                hidden = cpc_model.init_hidden(len(data))
                output, hidden = cpc_model.predict(data, hidden)
                mat = output.squeeze(
                    0).cpu().numpy()  # kaldi io does not accept torch tensor
                dct_mat = fft.dct(mat, type=2, n=dct_dim)  # apply dct
                ko.write_mat(f, dct_mat, key=utt_id[0])
Exemplo n.º 10
0
def fetch_llkprob_segment(wavid,
                          ipath2prob_scp,
                          seg=(0.0, math.inf),
                          win_len=0.025,
                          hop_len=0.010):
    """
    given wavid, return an loglikehood probability segment from ipath2prob_scp

    args: wavid       -- string, id of a audio file
          ipath2prob_scp -- the path to llk_prob.scp
                            each wavid corresponds to a float vector of llk_prob
                            llk_prob: the prob of a specific GMM generating a frame
          seg            -- a tuple of (start_time, end_time)
          win_len -- window length in second
          hop_len -- window shift in second

    return: vec        -- llk_prob curve with numpy format
  """
    fd = kaldi_io.open_or_fd(ipath2prob_scp)
    for line in fd:
        (wid, path) = line.decode("utf-8").rstrip().split(' ', 1)
        if wavid == wid:
            vec = kaldi_io.read_vec_flt(path)  # np.array
            start_t, end_t = seg
            end_t = min(end_t, vec.shape[0] *
                        hop_len)  # the second term is float by default
            assert start_t < end_t and start_t >= 0.0, "InputArg: seg {0} invalid".format(
                str(seg))
            start_f = int(start_t / hop_len)
            end_f = int(end_t / hop_len)
            return vec[start_f:end_f]
Exemplo n.º 11
0
def dvec_compute(generator,
                 ds_eval,
                 device,
                 num_jobs=20,
                 outfolder='./exp/example_dvecs'):
    # naively compute the embeddings for each window
    # ds_len = len(ds_feats)
    all_utts = ds_eval.all_utts
    ds_len = len(all_utts)
    indices = np.arange(ds_len)
    job_split = np.array_split(indices, num_jobs)
    generator.eval().to(device)
    for job_num, job in enumerate(tqdm(job_split)):
        print('Starting job {}'.format(job_num))
        ark_scp_output = 'ark:| copy-vector ark:- ark,scp:{0}/xvector.{1}.ark,{0}/xvector.{1}.scp'.format(
            outfolder, job_num + 1)
        job_utts = all_utts[job]
        job_feats = ds_eval.get_batches(job_utts)
        job_feats = mtd(job_feats, device)
        with torch.no_grad():
            job_embeds = torch.cat([
                generator(x.unsqueeze(0)) for x in tqdm(job_feats)
            ]).cpu().numpy()
        with kaldi_io.open_or_fd(ark_scp_output, 'wb') as f:
            for xvec, key in zip(job_embeds, job_utts):
                kaldi_io.write_vec_flt(f, xvec, key=key)
Exemplo n.º 12
0
def main():
    logging.basicConfig(format='[%(filename)s:%(lineno)d] %(message)s',
                        level=logging.WARN)
    parser = argparse.ArgumentParser(
        description="load .rec rawfile from first argument SCPPATH, \
            and convert all rec to scpfile-contained-utterence-id-indexed ark file to ARKPATH"
    )
    parser.add_argument("SCPPATH", help="scp file path")
    parser.add_argument("ARKPATH", help="ark file path")
    parser.add_argument("OUTPUTTYPE", help="raw | diff | baseline")
    args = parser.parse_args()
    logging.debug(args)
    SCP_FILEPATH = args.SCPPATH
    ARK_FILEPATH = args.ARKPATH
    OUTPUT_TYPE = args.OUTPUTTYPE
    if not ARK_FILEPATH.endswith('.ark') or not SCP_FILEPATH.endswith('.scp'):
        logging.error('extension error')
        exit()
    logging.debug(SCP_FILEPATH)
    uttid2recpath = io_helper.loadscp(SCP_FILEPATH)
    logging.debug(uttid2recpath)
    dataset = loaddata(uttid2recpath, OUTPUT_TYPE)
    with kaldi_io.open_or_fd(ARK_FILEPATH, 'wb') as f:
        for k, m in dataset.items():
            kaldi_io.write_mat(f, m, k)
Exemplo n.º 13
0
def compute_plda_score(enr_embeds,
                       test_embeds,
                       plda_dir,
                       all_pair=True,
                       mean=False):
    os.environ["KALDI_ROOT"] = "/host/projects/kaldi"
    if mean:
        enr_keys = ['enr']
        n_uttrs = len(enr_embeds)
        enr_embeds = enr_embeds.mean(0, keepdims=True)
    else:
        enr_keys = ['enr_{}'.format(i) for i in range(len(enr_embeds))]
    test_keys = ['test_{}'.format(i) for i in range(len(test_embeds))]
    keys = enr_keys + test_keys
    embeds = np.concatenate([enr_embeds, test_embeds])

    # write trials
    score_dir = TemporaryDirectory()
    with open("{}/num_utts.ark".format(score_dir.name), "w") as f:
        if mean:
            f.write("enr {}\n".format(n_uttrs))
        else:
            for key in enr_keys:
                f.write("{} {}\n".format(key, 1))

    with open("{}/kaldi_trial".format(score_dir.name), "w") as f:
        if all_pair:
            trial_pairs = itertools.product(enr_keys, test_keys)
        else:
            trial_pairs = zip(enr_keys, test_keys)
        for pair in trial_pairs:
            f.write(" ".join(pair))
            f.write("\n")

    # write feat
    ark_scp_output='ark:| copy-vector ark:- ' +\
    'ark,scp:{output}/feats.ark,{output}/feats.scp'.format(output=score_dir.name)
    with kaldi_io.open_or_fd(ark_scp_output, "wb") as f:
        for key, vec in zip(keys, embeds):
            kaldi_io.write_vec_flt(f, vec.squeeze(), key=str(key))

    # call scoring
    ret = subprocess.call(["./plda_score.sh", plda_dir, score_dir.name])
    if ret != 0:
        print("plda scoring fails")
        raise ValueError

    # read plda scores
    plda_scores = pd.read_csv("{}/plda_scores".format(score_dir.name),
                              delimiter=" ",
                              names=["enroll", "test", "score"]).score
    plda_scores = np.array(plda_scores)
    plda_scores = plda_scores.reshape(len(enr_keys), -1)
    if all_pair:
        assert plda_scores.shape[1] == len(test_embeds)

    score_dir.cleanup()

    return plda_scores
Exemplo n.º 14
0
def scp2dict(ipath2scp):
    fd = kaldi_io.open_or_fd(ipath2scp)  # iapth2scp can be a pipeline
    id2path = {}
    for line in fd:
        items = line.decode("utf-8").rstrip().split(' ')
        id = items[0]
        id2path[id] = items[1:]
    return id2path
Exemplo n.º 15
0
def read_wav_scp(ipath2wav_scp):
    fd = kaldi_io.open_or_fd(ipath2wav_scp)
    try:
        for line in fd:
            uttid, path2wav = line.rstrip().decode().split(' ', 1)
            yield uttid, path2wav
    finally:
        if fd is not ipath2wav_scp: fd.close()
Exemplo n.º 16
0
def kaldi_get_datas(file_path: str, targets: List[Tuple[str]],
                    dep: bool = False) -> Tuple[Callable, Callable, Callable, int]:

    def get_src_data_factory(raw_src_datas):
        def func(utt_ids):
            utt_datas = []
            for utt_id in utt_ids:
                rx_file = raw_src_datas[utt_id]
                utt_datas.append(kaldi_io.read_mat(rx_file))
            source_data = np.concatenate((utt_datas), axis=0)
            return source_data
        return func

    raw_src_datas: Dict[str, str] = {}
    for line in kaldi_io.open_or_fd(file_path):
        key, rxfile = line.decode().split(' ')
        raw_src_datas[key] = rxfile

    get_src_data = get_src_data_factory(raw_src_datas)

    targets = list(targets)
    instances_before_filtering = len(targets)

    targets = list(filter(lambda fields: fields[2].strip(), targets))
    logger.warning(f"number of empty targets: {instances_before_filtering - len(targets)}")

    orders = [i for i, fields in enumerate(targets) if
        all(utt_id in raw_src_datas for utt_id in fields[0])]
    ignored = [fields[0] for i, fields in enumerate(targets) if
        not all(utt_id in raw_src_datas for utt_id in fields[0])]

    print(ignored)
    dropped_instances = len(targets) - len(orders)
    if not dropped_instances:
        logger.info("No instances dropped from {}.".format(file_path))
    else:
        logger.warning("Dropped {} instances from {}.".format(dropped_instances,
                                                              file_path))

    # for utt_ids, src_trns, *tgt_trns in tqdm(targets):
    #     utt_datas = []
    #     source_data = get_src_data(utt_ids)
    #     source_datas.append(source_data)
    #     target_datas.append(tgt_trns[0])

    # src_lens = [src.shape[0] for src in source_datas]
    # source_orders = np.argsort(src_lens)
    src_data_func = data_func_factory(lambda idx: get_src_data(targets[idx][0]), orders)
    tgt_data_func = data_func_factory(lambda idx: targets[idx][2], orders)

    annotations = None
    if dep:
        annotations = read_dependencies(os.path.dirname(file_path))

    anno_data_func = data_func_factory(lambda idx: annotations[idx] if annotations else None,
                                       orders)

    return src_data_func, tgt_data_func, anno_data_func, len(orders)
Exemplo n.º 17
0
 def __execute_command__(self, datain, cmd):
     #try:
     fin, fout = kio.open_or_fd(cmd, 'wb')
     kio.write_wav(fin, datain, self.sr, key='utt')
     fin.close() #so its clear nothing new arrives
     feats_ark = kio.read_mat_ark(fout)
     for _, feats in feats_ark:
         fout.close()
         return feats.T #there is only one to read
Exemplo n.º 18
0
def construct_tensor(orig_feat_scp, ark_scp_output, tuncate_len):
    with ko.open_or_fd(ark_scp_output, 'wb') as f:
        for key, mat in ko.read_mat_scp(orig_feat_scp):
            tensor = tensor_cnn_utt(mat, truncate_len)
            repetition = int(tensor.shape[1] / truncate_len)
            for i in range(repetition):
                sub_tensor = tensor[:, i * truncate_len:(i + 1) * truncate_len]
                new_key = key + '-' + str(i)
                ko.write_mat(f, sub_tensor, key=new_key)
Exemplo n.º 19
0
def decode(args, dataset, model, priors, device='cpu'):
    '''
        Produce lattices from the input utterances.
    '''
    # This is all of the kaldi code we are calling. We are just piping out
    # out features to latgen-faster-mapped which does all of the lattice
    # generation.
    lat_output = '''ark:| copy-feats ark:- ark:- |\
    latgen-faster-mapped --min-active={} --max-active={} \
    --max-mem={} \
    --lattice-beam={} --beam={} \
    --acoustic-scale={} --allow-partial=true \
    --word-symbol-table={} \
    {} {} ark:- ark:- | lattice-scale --acoustic-scale={} ark:- ark:- |\
    gzip -c > {}/lat.{}.gz'''.format(args.min_active, args.max_active,
                                     args.max_mem, args.lattice_beam,
                                     args.beam, args.acoustic_scale,
                                     args.words_file, args.trans_mdl,
                                     args.hclg, args.post_decode_acwt,
                                     args.dumpdir, args.job)

    # Do the decoding (dumping senone posteriors)
    model.eval()
    with torch.no_grad():
        with kaldi_io.open_or_fd(lat_output, 'wb') as f:
            utt_mat = []
            prev_key = b''
            generator = evaluation_batches(dataset)
            # Each minibatch is guaranteed to have at most 1 utterance. We need
            # to append the output of subsequent minibatches corresponding to
            # the same utterances. These are stored in ``utt_mat'', which is
            # just a buffer to accumulate the posterior outputs of minibatches
            # corresponding to the same utterance. The posterior state
            # probabilities are normalized (subtraction in log space), by the
            # log priors in order to produce pseudo-likelihoods useable for
            # for lattice generation with latgen-faster-mapped
            for key, mat in decode_dataset(args,
                                           generator,
                                           model,
                                           device='cpu',
                                           output_idx=args.output_idx):
                if len(utt_mat) > 0 and key != prev_key:
                    kaldi_io.write_mat(f,
                                       np.concatenate(utt_mat,
                                                      axis=0)[:utt_length, :],
                                       key=prev_key.decode('utf-8'))
                    utt_mat = []
                utt_mat.append(mat - args.prior_scale * priors)
                prev_key = key
                utt_length = dataset.utt_lengths[key] // dataset.subsample

            # Flush utt_mat buffer at the end
            if len(utt_mat) > 0:
                kaldi_io.write_mat(f,
                                   np.concatenate(utt_mat,
                                                  axis=0)[:utt_length, :],
                                   key=prev_key.decode('utf-8'))
def write_kaldi(orig_feat_scp, ark_scp_output, max_len):
    """Write the slice feature matrix to ark_scp_output
    """
    with ko.open_or_fd(ark_scp_output,'wb') as f:
        for key,mat in ko.read_mat_scp(orig_feat_scp):
            tensor = tensor_cnn_utt(mat, max_len)
            if tensor.shape[1] != max_len:
                print(tensor.shape)
            ko.write_mat(f, tensor, key=key)
Exemplo n.º 21
0
    def plda_write(self, plda):

        with kaldi_io.open_or_fd(plda, 'wb') as f:
            kaldi_io.write_vec_flt(f, self.mean, key='mean')
            kaldi_io.write_vec_flt(f,
                                   self.within_var.reshape(-1, 1),
                                   key='within_var')
            kaldi_io.write_vec_flt(f,
                                   self.between_var.reshape(-1, 1),
                                   key='between_var')
Exemplo n.º 22
0
def extract_file(wav_lines, wfilename, winstep, winlen, mode):

    ark_scp_output = 'ark:| copy-feats ark:- ark,scp:%s.ark,%s.scp' %(wfilename, wfilename)
    with kaldi_io.open_or_fd(ark_scp_output, 'wb') as wf:
        for line in wav_lines:
            items = line.split()
            key = items[0]
            wav_ = items[5]
            mat = extract(wav_, winstep, winlen, mode)
            kaldi_io.write_mat(wf, mat, key=key)
Exemplo n.º 23
0
def outputfile_mat2ark(mat, filepath):
    logging.debug(filepath)
    if not filepath.endswith('.ark'):
        logging.error('extension error')
        return
    with kaldi_io.open_or_fd(filepath, 'wb') as f:
        for k, m in mat.items():
            if 0 in m.shape:
                continue
            kaldi_io.write_mat(f, m, k)
Exemplo n.º 24
0
def eval_dnn(args):
    """ The main function for doing evaluation on a trained network.

    Args:
        args: a Namespace object with the required parameters
            obtained from the function process_args()
    """

    model_dir = args.model_dir
    use_gpu = args.use_gpu == 'yes'
    min_chunk_size = args.min_chunk_size
    chunk_size = args.chunk_size
    # First change the output files temp ones and at the end rename them
    wspecifier, ark, scp = process_wspecifier(args.vector_wspecifier)

    if ark is not None and os.path.exists(
            ark) and scp is not None and os.path.exists(scp):
        logger.info(
            'Both output ark and scp files exist. Return from this call.')
        return
    model = Model()
    print(args.feature_rspecifier)
    with kaldi_io.open_or_fd(args.feature_rspecifier) as input_fid:
        with kaldi_io.open_or_fd(wspecifier) as output_fid:
            model.make_embedding(input_fid, output_fid, model_dir,
                                 min_chunk_size, chunk_size, use_gpu, logger)

    # rename output files
    if ark is not None:
        os.rename(ark + '.tmp.ark', ark)
    # first load scp and correct them to point to renamed ark file.
    if scp is not None:
        with open(scp + '.tmp.scp', 'rt') as fid_in:
            with open(scp + '.tmp', 'wr') as fid_out:
                text = fid_in.read()
                text = text.replace('ark.tmp.ark', 'ark')
                # Sometimes there is no \n at the end of file ank cause a Kaldi error.
                # For preventing this error juts check the last char and append \n if not exist
                if text[-1] != '\n':
                    text += '\n'
                fid_out.write(text)
        os.rename(scp + '.tmp', scp)
Exemplo n.º 25
0
def main():
    args = get_args()
    inp_feats_scp = args.inp_feats_scp
    out_feats_ark = args.out_feats_ark
    ark_scp_output = 'ark:| copy-feats --compress=true ark:- ark,scp:{p}.ark,{p}.scp'.format(
        p=out_feats_ark)
    with kaldi_io.open_or_fd(ark_scp_output, 'wb') as f:
        for utt, feats in kaldi_io.read_mat_scp(inp_feats_scp):
            mfcc = convert_mfcc_to_fbank(feats)
            np.save('ark_check4/{u}.npy'.format(u=utt), mfcc)
            kaldi_io.write_mat(f, mfcc, key=utt)
Exemplo n.º 26
0
def read_mat_key(file, target_key):
    """read the matrix of the target key/utterance from a kaldi scp file
    """
    fd = ko.open_or_fd(file)
    try:
        for line in fd:
            (key, rxfile) = line.decode().split(' ')
            if key == target_key:
                return ko.read_mat(rxfile)
    finally:
        if fd is not file: fd.close()
Exemplo n.º 27
0
 def plda_read(self,plda):
 
     with kaldi_io.open_or_fd(plda,'rb') as f:
         for key,vec in kaldi_io.read_vec_flt_ark(f):
             if key == 'mean':
                 self.mean = vec.reshape(-1,1)
                 self.dim = self.mean.shape[0]
             elif key == 'within_var':
                 self.within_var = vec.reshape(self.dim, self.dim)
             else:
                 self.between_var = vec.reshape(self.dim, self.dim)
Exemplo n.º 28
0
def read_all_key(file):
    """return all keys/utterances of a kaldi scp file
    """
    key_list = []
    fd = ko.open_or_fd(file)
    try:
        for line in fd:
            (key, _) = line.decode().split(' ')
            key_list.append(key)
    finally:
        if fd is not file: fd.close()
        return key_list
Exemplo n.º 29
0
def write_vectors(utt_list, csvector_list, data_path):
    size = len(utt_list)
    csvector_dict = {}
    for i in range(size):
        sub_list = utt_list[i]
        sub_vector = csvector_list[i]
        for j in range(len(sub_list)):
            csvector_dict[sub_list[j]] = sub_vector[j]

    with kaldi_io.open_or_fd(data_path,'wb') as f:
        for k,v in csvector_dict.items():
            kaldi_io.write_vec_flt(f, v, k)
Exemplo n.º 30
0
 def testInt32VectorReadWrite(self):
     """
     Test read/write for int32 vectors.
     """
     # read,
     i32_vec = { k:v for k,v in kaldi_io.read_vec_int_ark('tests/data/ali.ark') } # binary,
     i32_vec2 = { k:v for k,v in kaldi_io.read_vec_int_ark('tests/data/ali_ascii.ark') } # ascii,
     # re-save the data,
     with kaldi_io.open_or_fd('tests/data_re-saved/ali.ark','wb') as f:
         for k,v in i32_vec.items(): kaldi_io.write_vec_int(f, v, k)
     # read and make sure it is the same,
     for k,v in kaldi_io.read_vec_int_ark('tests/data_re-saved/ali.ark'):
         self.assertTrue(np.array_equal(v,i32_vec[k]), msg="int32 vector same after re-saving")
Exemplo n.º 31
0
    def testWriteReadPosteriors(self):
        data = [[(0, 0.0), (1, 0.1), (2, 0.2)],
                [(0, 0.00), (1, 0.11), (2, 0.22)],
                [(0, 0.000), (1, 0.111), (3, 0.333)]]
        key = 'posterior_test1'
        with kaldi_io.open_or_fd('tests/data_re-saved/posterior_tests.ark',
                                 'wb') as w:
            kaldi_io.write_post(w, data, key=key)

        with kaldi_io.open_or_fd('tests/data_re-saved/posterior_tests.ark',
                                 'rb') as r:
            posts = [(k, posteriors)
                     for k, posteriors in kaldi_io.read_post_ark(r)]
            self.assertEqual(len(posts), 1)
            self.assertEqual(posts[0][0], key)
            rdata = posts[0][1]
            self.assertEqual(len(rdata), len(data))
            for a1, a2 in zip(rdata, data):
                self.assertEqual(len(a1), len(a2))
                for ((idx1, p1), (idx, p)) in zip(a1, a2):
                    self.assertEqual(idx1, idx)
                    self.assertAlmostEqual(p1, p)
Exemplo n.º 32
0
 def testMatrixReadWrite(self):
     """
     Test read/write for float matrices.
     """
     # read,
     flt_mat = { k:m for k,m in kaldi_io.read_mat_scp('tests/data/feats_ascii.scp') } # ascii-scp,
     flt_mat2 = { k:m for k,m in kaldi_io.read_mat_ark('tests/data/feats_ascii.ark') } # ascii-ark,
     flt_mat3 = { k:m for k,m in kaldi_io.read_mat_ark('tests/data/feats.ark') } # ascii-ark,
     # store,
     with kaldi_io.open_or_fd('tests/data_re-saved/mat.ark','wb') as f:
         for k,m in flt_mat3.items(): kaldi_io.write_mat(f, m, k)
     # read and compare,
     for k,m in kaldi_io.read_mat_ark('tests/data_re-saved/mat.ark'):
         self.assertTrue(np.array_equal(m, flt_mat3[k]), msg="flt. matrix same after re-saving")
Exemplo n.º 33
0
    def testFloatVectorReadWrite(self):
        """
        Test read/write for float vectors.
        """
        # read,
        flt_vec = { k:v for k,v in kaldi_io.read_vec_flt_scp('tests/data/conf.scp') } # scp,
        return

        flt_vec2 = { k:v for k,v in kaldi_io.read_vec_flt_ark('tests/data/conf.ark') } # binary-ark,
        flt_vec3 = { k:v for k,v in kaldi_io.read_vec_flt_ark('tests/data/conf_ascii.ark') } # ascii-ark,
        # store,
        with kaldi_io.open_or_fd('tests/data_re-saved/conf.ark','wb') as f:
            for k,v in flt_vec.items(): kaldi_io.write_vec_flt(f, v, k)
        # read and compare,
        for k,v in kaldi_io.read_vec_flt_ark('tests/data_re-saved/conf.ark'):
            self.assertTrue(np.array_equal(v,flt_vec[k]), msg="flt. vector same after re-saving")
Exemplo n.º 34
0
    def testPipeReadWrite(self):
        """
        Test read/write for pipes.

        Note: make sure the "os.environ['KALDI_ROOT']" in "kaldi_io/kaldi_io.py" is correct.
        """
        # the following line disables 'stderr' forwarding, comment it for DEBUG,
        with open("/dev/null","w") as sys.stderr:
            # read,
            flt_mat4 = { k:m for k,m in kaldi_io.read_mat_ark('ark:copy-feats ark:tests/data/feats.ark ark:- |') }
            # write to pipe,
            with kaldi_io.open_or_fd('ark:| copy-feats ark:- ark:tests/data_re-saved/mat_pipe.ark','wb') as f:
                for k,m in flt_mat4.items(): kaldi_io.write_mat(f, m, k)
            # read it again and compare,
            for k,m in kaldi_io.read_mat_ark('tests/data_re-saved/mat_pipe.ark'):
                self.assertTrue(np.array_equal(m, flt_mat4[k]),"flt. matrix same after read/write via pipe")

            # read some other formats from pipe,
            i32_vec3 = { k:v for k,v in kaldi_io.read_vec_int_ark('ark:copy-int-vector ark:tests/data/ali.ark ark:- |') }
            flt_vec4 = { k:v for k,v in kaldi_io.read_vec_flt_ark('ark:copy-vector ark:tests/data/conf.ark ark:- |') }
Exemplo n.º 35
0
if do_training:
  net.train()
  test_flag=0   
  N_batches=int(N_snt/batch_size)
  if rnn==0:
   N_ex_tr=data_set.shape[0]
   N_batches=int(N_ex_tr/batch_size)
   
if do_eval:
 N_batches=N_snt
 net.eval()
 test_flag=1
 batch_size=1
 
 if do_forward:
  post_file=kaldi_io.open_or_fd(out_file,'wb')
  counts = load_counts(count_file)
  

beg_batch=0
end_batch=batch_size   

snt_index=0
beg_snt=0 

loss_sum=0
err_sum=0


  
for i in range(N_batches):