コード例 #1
0
def test_multiple_mics(c, fs, s, L, n, reverberation_time, beta, mtype, order,
                       dim, orientation, hp_filter):
    out1 = rir_generator.generate(
        c,
        fs,
        [2, 1.5, 2],
        s,
        L,
        reverberation_time=reverberation_time,
        beta=beta,
        nsample=n,
        mtype=mtype,
        order=order,
        dim=dim,
        orientation=orientation,
        hp_filter=hp_filter,
    )

    out2 = rir_generator.generate(
        c,
        fs,
        [[2, 1.5, 2], [1, 1.5, 2]],
        s,
        L,
        reverberation_time=reverberation_time,
        beta=beta,
        nsample=n,
        mtype=mtype,
        order=order,
        dim=dim,
        orientation=orientation,
        hp_filter=hp_filter,
    )

    assert np.allclose(out1[:, 0], out2[:, 0])
コード例 #2
0
def test_outside_room(r, s):
    with pytest.raises(ValueError):
        rir_generator.generate(
            340,
            16000,
            r,
            s,
            L=[5, 4, 6],
            reverberation_time=0.4,
        )
コード例 #3
0
def test_beta_shape(r, s, beta):
    print(beta)
    with pytest.raises(AssertionError):
        rir_generator.generate(
            340,
            16000,
            r,
            s,
            L=[5, 4, 6],
            beta=beta,
        )
コード例 #4
0
def test_parameters(
    c,
    fs,
    r,
    nMics,
    s,
    L,
    n,
    reverberation_time,
    beta,
    mtype,
    order,
    dim,
    orientation,
    hp_filter,
):
    out = rir_generator.generate(
        c,
        fs,
        r,
        s,
        L,
        reverberation_time=reverberation_time,
        beta=beta,
        nsample=n,
        mtype=mtype,
        order=order,
        dim=dim,
        orientation=orientation,
        hp_filter=hp_filter,
    )

    assert out.shape == (n, nMics)
    assert not np.all(np.isclose(out, 0))
コード例 #5
0
    def run_210313(self):
        # goal: create new dataset for NS
        # 4-microphone array
        # generate simple multi-channel RIR, located microphone array specific location
        save_path = '/media/jeonghwan/HDD2/IS2021/dataset/SSLR/simulated_RIR/anechoic/tr/'
        center = (np.array(self.params['L']) / 2).tolist()
        mic_pos = np.array([[-0.0267, 0.0343, 0], [-0.0267, -0.0343, 0],
                            [0.0313, 0.0343, 0], [0.0313, -0.0343, 0]])
        print(mic_pos)
        self.params['r'] = mic_pos + center

        # create source position list
        src_pos_list = self.get_uniform_dist_circ_pos(center=center,
                                                      radius=self.radius_src,
                                                      resolution=[5, 5],
                                                      dim='3D')

        for i, src_pos in enumerate(src_pos_list):
            self.params['s'] = src_pos
            azi, ele, r = self.cart2sph(src_pos - center)
            h = rir.generate(**self.params)
            np.savez(save_path +
                     'az{}_el{}_r{}.npz'.format(int(azi), int(ele), r),
                     rir=h,
                     params=self.params)
コード例 #6
0
    def run_210313_ULA_sim(self):
        # goal: create new dataset for NS
        # 4-microphone array
        # generate simple multi-channel RIR, located microphone array specific locations
        _use_par = True
        save_path = '/media/jeonghwan/HDD2/IS2021/dataset/Simul_DB_ULA4/simulated_RIR/anechoic/tr/'
        Path(save_path).mkdir(parents=True, exist_ok=True)

        center = (np.array(self.params['L']) / 2).tolist()
        mic_pos = self.get_ULA_array(center, 0.05, 4)
        print(mic_pos)
        self.params['r'] = mic_pos

        # create source position list
        src_pos_list = []
        for r in np.linspace(1, 3, 21):
            src_pos_list += self.get_uniform_dist_circ_pos(center=center,
                                                           radius=r,
                                                           resolution=1,
                                                           dim='2D')

        if _use_par == True:
            _ = parmap.map(self.generate_rir, src_pos_list)
        else:
            for i, src_pos in enumerate(src_pos_list):
                self.params['s'] = src_pos
                azi, ele, r = self.cart2sph(src_pos - center)
                h = rir.generate(**self.params)
                np.savez(save_path +
                         'az{}_el{}_r{}.npz'.format(int(azi), int(ele), r),
                         rir=h,
                         params=self.params)
コード例 #7
0
 def example(self):
     #signal, fs = sf.read("bark.wav", always_2d=True)
     params = self.generate_room_params([1, 1, 1])
     print(params)
     h = rir.generate(**params)
     plt.figure(1)
     plt.plot(h)
     plt.show()
コード例 #8
0
 def generate_rir(self, src_pos):
     self.params['s'] = src_pos
     center = (np.array(self.params['L']) / 2).tolist()
     azi, ele, r = self.cart2sph(src_pos - center)
     h = rir.generate(**self.params)
     np.savez(self.save_path +
              'az{}_el{}_r{}.npz'.format(int(azi), int(ele), r),
              rir=h,
              params=self.params)
コード例 #9
0
    def run(self):
        # generate simple multi-channel RIR, located microphone array specific location
        center = (np.array(self.params['L']) / 2).tolist()
        mic_pos = self.get_ULA_array(center=center, interspace=0.02, nmic=4)
        print(mic_pos)
        # mic_pos = self.get_UCA_array(center=center, radius=self.radius_mic, nmic=self.nmic, visualization=self.visualization)
        self.params['r'] = mic_pos

        # create source position list
        src_pos_list = self.get_uniform_dist_circ_pos(center=center,
                                                      radius=self.radius_src,
                                                      resolution=30,
                                                      dim='2D')
        #save_path = ''
        for i, src_pos in enumerate(src_pos_list):
            self.params['s'] = src_pos
            azi, ele, r = self.cart2sph(src_pos - center)
            h = rir.generate(**self.params)
            np.savez('az{}_el{}_r{}.npz'.format(int(azi), int(ele), r),
                     rir=h,
                     params=self.params)
コード例 #10
0
    def run_210301(self):
        # generate simple multi-channel RIR, located microphone array specific location
        save_path = '/media/jeonghwan/HDD2/IS2021/dataset/simulated_RIR/tr/anechoic/'
        center = (np.array(self.params['L']) / 2).tolist()
        mic_pos = self.get_ULA_array(center=center, interspace=0.05, nmic=2)
        print(mic_pos)
        # mic_pos = self.get_UCA_array(center=center, radius=self.radius_mic, nmic=self.nmic, visualization=self.visualization)
        self.params['r'] = mic_pos

        # create source position list
        src_pos_list = self.get_uniform_dist_circ_pos(center=center,
                                                      radius=self.radius_src,
                                                      resolution=5,
                                                      dim='2D')
        #save_path = ''
        for i, src_pos in enumerate(src_pos_list):
            self.params['s'] = src_pos
            azi, ele, r = self.cart2sph(src_pos - center)
            h = rir.generate(**self.params)
            np.savez(save_path +
                     'az{}_el{}_r{}.npz'.format(int(azi), int(ele), r),
                     rir=h,
                     params=self.params)
コード例 #11
0
ファイル: createMixture.py プロジェクト: SouppuoS/THM
def generateWav(args):
    global N_SRC, N_GEN_TRN, N_GEN_DEV, N_GEN_TST
    N_SRC       = args.src
    N_GEN_TRN   = args.gen_trn
    N_GEN_DEV   = args.gen_dev
    N_GEN_TST   = args.gen_tst

    # Path Information
    P_SRC       = "./THCHS30/data_thchs30"
    P_SRC_TRN   = P_SRC + "/train"
    P_SRC_DEV   = P_SRC + "/dev"
    P_SRC_TST   = P_SRC + "/test"
    P_NOISY     = "./high_res_wham/audio"
    P_LOCAL     = "./local"
    P_META      = P_LOCAL + "/metafile"
    P_JSON      = P_META  + f'/{N_SRC}speakers'
    P_TMP       = P_LOCAL + '/tmp'

    # TODO: only support 8k min mode for now
    P_MIX       = './mix'
    P_MIX_SPK   = P_MIX     + f'/{N_SRC}speakers'
    P_MIX_HZ    = P_MIX_SPK + '/wav8k'
    P_MIX_MODE  = P_MIX_HZ  + '/min'

    cleanMixVer = True if N_SRC > 1 else False

    if args.arrayGeo is not None:
        arrayGeometry = decodeGeo(args.arrayGeo)
        # only support one room setting
        roomInfo      = decodeGeo(args.room)[0]
    else:
        arrayGeometry = None
        roomInfo      = None

    dataset = [
        {'name':'tr', 'n_gen':N_GEN_TRN},
        {'name':'cv', 'n_gen':N_GEN_DEV},
        {'name':'tt', 'n_gen':N_GEN_TST},
    ]
    out_path = [f'/s{v + 1}' for v in range(N_SRC)] + (['/mix_clean', '/mix_both'] if cleanMixVer else ['/mix_both'])
    order    = [v for v in permutations(range(N_SRC))]       # permutation order
    cnt      = 0
    print('Generate wav files', end='')
    for d in dataset:
        cnt += 1
        if cnt % 100 == 0:
            print('.', end='')
        p_recipe = os.path.join(P_JSON, d['name'] + '.json')
        if not os.path.exists(p_recipe):
            raise Exception(r'No metafile {}!'.format(d['name']))
        with open(p_recipe) as f:
            f_recipe = json.load(f)
        
        P_MIX_WAV = P_MIX_MODE + '/' + d['name']
        for path in out_path:
            os.makedirs(P_MIX_WAV + path, exist_ok=True)
        
        if FLAG_SHUFFLE:
            random.shuffle(f_recipe)

        f_recipe   = f_recipe[:d['n_gen']]        # gen `n_gen` mixtures
        f_recipe   = sorted(f_recipe, key=lambda x: x['noisy_path'])
        noisy_path = None
        for r in f_recipe:
            if noisy_path != r['noisy_path']:
                noisy_path = r['noisy_path']
                rdB   = random.randint(6, 18)
                noisy = read_scaled_wav(noisy_path, dB(rdB), True)
            
            pidx  = order[r['permutation']]
            wav   = []
            scale = [dB(v) for v in r['db']]
            for spk in range(N_SRC):
                wav.append(read_scaled_wav(r[f's{spk + 1}_path'], 1, True))
            sample    = [quantize(wav[v][:r['len']] * scale[k]) for v, k in zip(pidx, range(N_SRC))]
            sample_n  = noisy[r['noisy_start'] : r['noisy_start'] + r['len']]
            if 'ssl' in r:
                wav_multi = []
                for _wav, s in zip(sample + [sample_n], r['ssl']):
                    h = rir.generate(
                        c=340, fs=8000,                         # only support 8k
                        r=arrayGeometry, s=s, L=roomInfo,
                        reverberation_time=0.2, nsample=4096,
                    )
                    wav_multi.append(ss.convolve(h[:, None, :], _wav[...,None,None]).transpose(1,0,2).squeeze())
                out_data = sample + ([sum(wav_multi[:N_SRC]), sum(wav_multi)] if cleanMixVer else [sum(wav_multi)])
            else:
                out_data = sample + ([sum(sample), sum(sample) + sample_n] if cleanMixVer else [sum(sample) + sample_n])

            for data, path in zip(out_data, out_path):
                sf.write(os.path.join(P_MIX_WAV + path, r['name']), data, 8000, subtype='FLOAT')
    print('Complete!')