예제 #1
0
 def __getitem__(self, i):
     self.check_index(i)
     feat = kaldi_io.read_mat(self.rxfiles[i])
     if self.specaugment_config is not None and self.specaugment_config != "":
         with data_utils.numpy_seed(self.seed, self.epoch, i):
             feat = specaug(feat, **eval(self.specaugment_config))
     item = torch.from_numpy(feat).float()
     return item
예제 #2
0
 def __getitem__(self, i):
     self.check_index(i)
     ptx = self.data_offsets[i]
     a = self.buffer[ptx:ptx + self.sizes[i]].copy()
     if self.specaugment_config is not None and self.specaugment_config != "":
         with data_utils.numpy_seed(self.seed, self.epoch, i):
             a = specaug(a, **eval(self.specaugment_config))
     return torch.from_numpy(a).float()
예제 #3
0
 def _get_features(self, i):
     if self.input_format == "feat":
         feat = kaldi_io.read_mat(self.rxfiles[i])
     else:
         if self.input_format == "command":
             source = BytesIO(run(self.rxfiles[i][:-1], shell=True, stdout=PIPE).stdout)
         else:
             source = self.rxfiles[i]
         waveform, sample_rate = get_waveform(source, normalization=False, always_2d=True)
         feat = get_torchaudio_fbank_or_mfcc(waveform, sample_rate, n_bins=self.feat_dim, feature_type=self.feature_type)
         if self.feature_transforms is not None:
             feat = self.feature_transforms(feat)
     if self.specaugment_config is not None and self.specaugment_config != "":
         with data_utils.numpy_seed(self.seed, self.epoch, i):
             feat = specaug(feat, **eval(self.specaugment_config))
     return feat
예제 #4
0
    def __getitem__(self, i):
        self.check_index(i)
        if not self.prefetch_called:  # no caching
            feat = kaldi_io.read_mat(self.rxfiles[i])
            return torch.from_numpy(feat).float()
        if i not in self.cache_index:
            assert (
                self.start_pos_for_next_cache < len(self.ordered_indices)
            ), "Position for next cache starting beyond the end of ordered_indices."
            try:
                pos_start = self.ordered_indices.index(
                    i,
                    self.start_pos_for_next_cache,
                )
            except ValueError:
                raise ValueError(
                    "index {} not found in self.ordered_indices. Set "
                    "self.ordered_prefetch to False, and/or call self.prefetch() "
                    "with the full list of indices, and then try again.".
                    format(i))
            pos_end = min(
                pos_start + self.cache_size,
                len(self.ordered_indices),
            )
            self.start_pos_for_next_cache = pos_end if self.ordered_prefetch else 0
            total_size = 0
            for idx in self.ordered_indices[pos_start:pos_end]:
                total_size += self.sizes[idx]
            self.cache = np.empty((total_size, self.feat_dim),
                                  dtype=self.dtype)
            ptx = 0
            self.cache_index.clear()
            for idx in self.ordered_indices[pos_start:pos_end]:
                self.cache_index[idx] = ptx
                length = self.sizes[idx]
                dst = self.cache[ptx:ptx + length]
                feat = kaldi_io.read_mat(self.rxfiles[idx])
                if self.specaugment_config is not None and self.specaugment_config != "":
                    with data_utils.numpy_seed(self.seed, self.epoch, idx):
                        feat = specaug(feat, **eval(self.specaugment_config))
                np.copyto(dst, feat)
                ptx += length

        ptx = self.cache_index[i]
        a = self.cache[ptx:ptx + self.sizes[i]].copy()
        return torch.from_numpy(a).float()