コード例 #1
0
ファイル: raw_audio_dataset.py プロジェクト: Liam0620/fairseq
    def __getitem__(self, index):
        import soundfile as sf
        path_or_fp = os.path.join(self.root_dir, str(self.fnames[index]))
        _path, slice_ptr = parse_path(path_or_fp)
        if len(slice_ptr) == 2:
            byte_data = read_from_stored_zip(_path, slice_ptr[0], slice_ptr[1])
            assert is_sf_audio_data(byte_data)
            path_or_fp = io.BytesIO(byte_data)

        if random.random() < self.noise_rir_prob and self.is_training:
            wav = self.noise_rir_dataset.add_noise_rir(path_or_fp)
            curr_sample_rate = self.sample_rate
        else:
            wav, curr_sample_rate = sf.read(path_or_fp, dtype="float32")

        feats = torch.from_numpy(wav).float()
        feats = self.postprocess(feats, curr_sample_rate)

        if random.random() < self.speed_perturb_prob and self.is_training:
            feats = self.sp(feats)

        if random.random() < self.volume_perturb_prob and self.is_training:
            feats = volume_perturb(feats)

        if self.is_save:
            save_path = os.path.join(
                self.is_save_path,
                _path.split('/')[-1].split('.')[0]) + '_augtment.wav'
            self.save_to_wav(feats, save_path)

        return {"id": index, "source": feats}
コード例 #2
0
ファイル: data_utils.py プロジェクト: scheiblr/fairseq
def get_feature_value_min_max(feature_paths: List[str]):
    v_min, v_max = 1e-8, -1e-8
    for p in tqdm(feature_paths):
        _path, slice_ptr = parse_path(p)
        assert len(slice_ptr) == 2
        byte_data = read_from_stored_zip(_path, slice_ptr[0], slice_ptr[1])
        assert is_npy_data(byte_data)
        path_or_fp = io.BytesIO(byte_data)
        features = np.load(path_or_fp).squeeze()
        v_min = min(v_min, features.min().item())
        v_max = max(v_max, features.max().item())
    return v_min, v_max
コード例 #3
0
def get_features_or_waveform_from_stored_zip(
    path, byte_offset, byte_size, need_waveform=False
):
    assert path.endswith(".zip")
    data = read_from_stored_zip(path, byte_offset, byte_size)
    f = io.BytesIO(data)
    if is_npy_data(data):
        features_or_waveform = np.load(f)
    elif is_sf_audio_data(data):
        features_or_waveform = \
            get_waveform(f, always_2d=False)[0] if need_waveform else get_fbank(f)
    else:
        raise ValueError(f'Unknown file format for "{path}"')
    return features_or_waveform
コード例 #4
0
    def __getitem__(self, index):
        import soundfile as sf

        path_or_fp = os.path.join(self.root_dir, str(self.fnames[index]))
        _path, slice_ptr = parse_path(path_or_fp)
        if len(slice_ptr) == 2:
            byte_data = read_from_stored_zip(_path, slice_ptr[0], slice_ptr[1])
            assert is_sf_audio_data(byte_data)
            path_or_fp = io.BytesIO(byte_data)

        wav, curr_sample_rate = sf.read(path_or_fp, dtype="float32")

        feats = torch.from_numpy(wav).float()
        feats = self.postprocess(feats, curr_sample_rate)
        return {"id": index, "source": feats}
コード例 #5
0
ファイル: hubert_dataset.py プロジェクト: ishine/fairseq
    def get_audio(self, index):
        import soundfile as sf

        wav_path = os.path.join(self.audio_root, self.audio_names[index])
        _path, slice_ptr = parse_path(wav_path)
        if len(slice_ptr) == 0:
            wav, cur_sample_rate = sf.read(_path)
        else:
            assert _path.endswith(".zip")
            data = read_from_stored_zip(_path, slice_ptr[0], slice_ptr[1])
            f = io.BytesIO(data)
            wav, cur_sample_rate = sf.read(f)
        wav = torch.from_numpy(wav).float()
        wav = self.postprocess(wav, cur_sample_rate)
        return wav