Beispiel #1
0
def gen_audio_stim(MatrixDir, OutDir, indexes):
    if os.path.exists(OutDir):
        shutil.rmtree(OutDir)
        os.makedirs(OutDir)
    wavFiles = globDir(MatrixDir, '*.wav')
    wavFileMatrix = organiseWavs(wavFiles)
    wavDir = os.path.join(OutDir, "wav")
    dir_must_exist(wavDir)
    wavDir = os.path.join(wavDir, "noise-sentences")
    dir_must_exist(wavDir)
    files = []
    n = 0
    o = 0
    for sentenceList in indexes:
        n += 1
        o = 0
        files.append([])
        for ind in sentenceList:
            o += 1
            y, wavInfo, partnames = synthesize_trial(wavFileMatrix, ind)
            fileName = os.path.join(wavDir,
                                    'Trial_{0:02d}_{1:02d}.wav'.format(n, o))
            print("Generating: " + fileName)
            sndio.write(fileName, y, **wavInfo)
            files[-1].append(fileName)

    return files
Beispiel #2
0
def gen_rms_peak(files, OutRMSDir, OutPeakDir):
    rmsFiles = []
    peakFiles = []
    for file in files:
        head, tail = os.path.split(file)
        tail = os.path.splitext(tail)[0]
        tail = tail + "_rms.npy"
        dir_must_exist(OutRMSDir)
        rmsFilepath = os.path.join(OutRMSDir, tail)
        print("Generating: " + rmsFilepath)
        y, fs, _ = sndio.read(file)
        y_rms = window_rms(y, round(0.02 * fs))
        np.save(rmsFilepath, y_rms)
        rmsFiles.append(rmsFilepath)

        head, tail = os.path.split(file)
        tail = os.path.splitext(tail)[0]
        tail = tail + "_peak.npy"
        dir_must_exist(OutPeakDir)
        peakFilepath = os.path.join(OutPeakDir, tail)
        print("Generating: " + peakFilepath)
        peak = np.abs(y).max()
        np.save(peakFilepath, peak)
        peakFiles.append(peakFilepath)
    return rmsFiles, peakFiles
Beispiel #3
0
def gen_noise(OutDir, b, fs):
    print("Generating noise...")
    # Generate 10 minutes of white noise
    x = np.random.randn(int(fs * 60. * 5.))
    x /= x.max()
    noiseDir = os.path.join(OutDir, 'wav')
    noiseRMSDir = os.path.join(OutDir, 'rms')
    dir_must_exist(noiseDir)
    noiseDir = os.path.join(noiseDir, 'noise')
    dir_must_exist(noiseDir)
    y, y_max = block_lfilter_wav(b, [1.0], x,
                                 os.path.join(noiseDir, 'noise.wav'), 65538,
                                 44100)
    block_process_wav(os.path.join(noiseDir, 'noise.wav'),
                      os.path.join(noiseDir, 'noise_norm.wav'), lambda x: x /
                      (y_max * 1.05))
    noise_norm_wav = PySndfile(os.path.join(noiseDir, 'noise_norm.wav'), 'r')
    noise_rms_path = os.path.join(noiseRMSDir, 'noise_rms.npy')
    y = noise_norm_wav.read_frames(fs * 60)
    y = y / (np.abs(y).max() * 0.95)
    # rms = np.sqrt(np.mean(y**2))
    # rms, _, _ = asl_P56(y, fs, 16)
    rms = rms_no_silences(y, fs, -30.)
    print(f"Noise level: {rms}")

    peak = np.abs(y).max()
    np.save(noise_rms_path, rms)
    np.save('./stimulus/peak/noise_peak.npy', peak)
    return y
Beispiel #4
0
 def generate_folder_hierachy(self):
     '''
     '''
     sub_dirs = [
         "mat_test", "tone_test", "pta", "click_test", "info",
         "eeg_story_train", "eeg_mat_train", "eeg_test",
         "eeg_test/stimulus", "parameters"
     ]
     for dir_name in sub_dirs:
         dn = os.path.join(*dir_name.split('/'))
         path = os.path.join(self.participant_dir, dn)
         dir_must_exist(path)
         self.data_paths[dir_name] = path
Beispiel #5
0
def gen_rms(file, OutDir):
    head, tail = os.path.split(file)
    tail = os.path.splitext(tail)[0]
    tail = tail + "_env.npy"
    dir_must_exist(OutDir)
    rmsFilepath = os.path.join(OutDir, tail)
    print("Generating: " + rmsFilepath)
    y, fs, _ = sndio.read(file)

    y = y[:, 0]
    y_rms = window_rms(y, round(0.02 * fs))
    np.save(rmsFilepath, y_rms)
    return rmsFilepath
Beispiel #6
0
    def __init__(self,
                 participant_dir=None,
                 number=None,
                 age=None,
                 gender=None,
                 handedness=None,
                 general_notes=None,
                 parameters={},
                 gen_time=datetime.now()):
        '''
        '''
        dir_must_exist(participant_dir)
        self.participant_dir = participant_dir
        self.data_paths = {}
        self.generate_folder_hierachy()
        self.parameters = parameters
        self.gen_time = gen_time

        self.data = {
            "info": {
                "number": number,
                "age": age,
                "gender": gender,
                "handedness": handedness,
                "general_notes": general_notes
            },
            "mat_test": {
                "notes": ''
            },
            "eeg_story_train": {
                "notes": ''
            },
            "eeg_mat_train": {
                "notes": ''
            },
            "eeg_test": {
                "notes": ''
            },
            "tone_test": {
                "notes": ''
            },
            "click_test": {
                "notes": ''
            },
            "pta": {
                "notes": ''
            }
        }
        self.data['parameters'] = parameters
Beispiel #7
0
def gen_noise(OutDir, b, fs, s_rms):
    print("Generating noise...")
    # Generate 10 minutes of white noise
    x = np.random.randn(int(fs * 60. * 20.))
    x /= x.max()
    noiseDir = os.path.join(OutDir, 'wav')
    noiseRMSDir = os.path.join(OutDir, 'rms')
    dir_must_exist(noiseDir)
    noiseDir = os.path.join(noiseDir, 'noise')
    dir_must_exist(noiseDir)
    y, y_max = block_lfilter_wav(b, [1.0], x,
                                 os.path.join(noiseDir, 'noise.wav'), 65538,
                                 44100)
    block_process_wav(os.path.join(noiseDir, 'noise.wav'),
                      os.path.join(noiseDir, 'noise_norm.wav'), lambda x: x /
                      (y_max * 0.95))
    y = y / (np.abs(y).max() * 0.95)
    noise_rms_path = os.path.join(noiseRMSDir, 'noise_rms.npy')
    rms = np.sqrt(np.mean(y**2))
    np.save(noise_rms_path, rms)
    return y
Beispiel #8
0
def generate_audio_stimulus(MatrixDir, OutDir, indexes, socketio=None):
    # Get matrix wav file paths
    wavFiles = globDir(MatrixDir, '*.wav')
    wavFileMatrix = organiseWavs(wavFiles)

    wav_dir = os.path.join(args['OutDir'], "wav")
    dir_must_exist(wav_dir)
    sentence_dir = os.path.join(wav_dir, "sentence-lists")
    dir_must_exist(sentence_dir)
    # Synthesize audio for each trial using generated word choices
    sentence_lists = {}
    for key in indexes.keys():
        files = []
        list_dir = os.path.join(sentence_dir, key)
        dir_must_exist(list_dir)
        with open(os.path.join(list_dir, 'stim_parts.csv'), 'w') as csvfile:
            partwriter = csv.writer(csvfile)
            inds, strings = indexes[key]
            for sentence_ind, (component_inds, component_strings) in enumerate(
                    zip(inds, strings)):
                if socketio:
                    percent = (l / Length) * 100.
                    socketio.emit('update-progress',
                                  {'data': '{}%'.format(percent)},
                                  namespace='/main')
                y, wavInfo, partnames = synthesize_trial(
                    wavFileMatrix, component_inds)
                partwriter.writerow(component_strings)
                file_name = os.path.join(
                    list_dir, 'Trial_{0:05d}.wav'.format(sentence_ind + 1))
                sndio.write(file_name, y, **wavInfo)
                files.append(file_name)

            sentence_lists[key] = np.array(files)
    return sentence_lists
Beispiel #9
0
def main():
    '''
    '''
    fs = 44100
    f = 1000.0
    n = np.arange(fs * 60 * 5)
    y = np.sin(2 * np.pi * f * n / fs)
    coef = np.load('./out/calibration_coefficients/click_cal_coef.npy')
    y *= coef
    dir_must_exist('./out/calibrated_stim/')
    sndio.write("./out/calibrated_stim/1k_tone.wav",
                y,
                fs,
                format='wav',
                enc='pcm16')
    coef = np.load('./out/calibration_coefficients/da_cal_coef.npy')
    y, fs, enc = sndio.read('./out/stimulus/da_cal_stim.wav')
    sndio.write('./out/calibrated_stim/da_cal_stim.wav',
                y * coef,
                fs,
                format='wav',
                enc='pcm16')
    coef = np.load('./out/calibration_coefficients/mat_cal_coef.npy')
    y, fs, enc = sndio.read('./out/stimulus/mat_cal_stim.wav')
    sndio.write('./out/calibrated_stim/mat_cal_stim.wav',
                y * coef,
                fs,
                format='wav',
                enc='pcm16')
    coef = np.load('./out/calibration_coefficients/story_cal_coef.npy')
    y, fs, enc = sndio.read('./out/stimulus/story_cal_stim.wav')
    sndio.write('./out/calibrated_stim/story_cal_stim.wav',
                y * coef,
                fs,
                format='wav',
                enc='pcm16')
Beispiel #10
0
def gen_rms(files, OutDir):
    rmsFiles = []
    OutPeakDir = './stimulus/peak'
    for sentenceList in files:
        for file in sentenceList:
            head, tail = os.path.split(file)
            tail = os.path.splitext(tail)[0]
            tail_rms = tail + "_rms.npy"
            dir_must_exist(OutDir)
            rmsFilepath = os.path.join(OutDir, tail_rms)
            print("Generating: " + rmsFilepath)
            y, fs, _ = sndio.read(file)
            y_rms = calc_rms(y, round(0.02 * fs))
            np.save(rmsFilepath, y_rms)
            rmsFiles.append(rmsFilepath)

            y, fs, _ = sndio.read(file)
            tail_peak = tail + "_peak.npy"
            dir_must_exist(OutPeakDir)
            peakFilepath = os.path.join(OutPeakDir, tail_peak)
            print("Generating: " + peakFilepath)
            peak = np.abs(y).max()
            np.save(peakFilepath, peak)
    return rmsFiles
Beispiel #11
0
def main():
    '''
    '''
    da_files = [
        "../tone_stim/stimulus/tone_2000/tone_3000_2000Hz.wav",
        "../tone_stim/stimulus/tone_500/tone_3000_500Hz.wav"
    ]
    story_dir = "../eeg_story_stim/stimulus"
    mat_dir = "../matrix_test/speech_components"
    noise_file = "../matrix_test/behavioural_stim/stimulus/wav/noise/noise_norm.wav"
    da_noise_file = "../da_stim/noise/wav/noise/noise_norm.wav"

    story_wavs = globDir(story_dir, '*.wav')
    mat_wavs = globDir(mat_dir, '*.wav')

    out_dir = "./out"
    out_red_dir = os.path.join(out_dir, 'reduction_coefficients')
    out_stim_dir = os.path.join(out_dir, 'stimulus')
    dir_must_exist(out_dir)
    dir_must_exist(out_red_dir)
    dir_must_exist(out_stim_dir)

    story_coef = calc_potential_max(story_wavs, noise_file, out_red_dir,
                                    "story_red_coef")
    mat_coef = calc_potential_max(mat_wavs, noise_file, out_red_dir,
                                  "mat_red_coef")
    da_coef = calc_potential_max(da_files, da_noise_file, out_red_dir,
                                 "da_red_coef")

    mat_cal_stim = "../matrix_test/long_concat_stim/out/stim/stim_0.wav"
    da_cal_stim = "./out/stimulus/1k_tone.wav"
    # click_cal_stim = "../tone_stim/stimulus/tone_2000/tone_3000_2000Hz.wav"
    story_cal_stim = "../eeg_story_stim/stimulus/odin_1_1.wav"

    mat_out_stim = os.path.join(out_stim_dir, "mat_cal_stim.wav")
    # click_out_stim = os.path.join(out_stim_dir, "click_cal_stim.wav")
    da_out_stim = os.path.join(out_stim_dir, "1k_cal_stim.wav")
    story_out_stim = os.path.join(out_stim_dir, "story_cal_stim.wav")

    block_process_wav(mat_cal_stim, mat_out_stim, lambda x: x * mat_coef)
    block_process_wav(story_cal_stim, story_out_stim, lambda x: x * story_coef)
    block_process_wav(da_cal_stim, da_out_stim, lambda x: x * da_coef)
Beispiel #12
0
def main():
    '''
    '''
    dir_must_exist('./stimulus')
    gen_da_stim(1500, './stimulus/3000_da.wav')
Beispiel #13
0
    def loadStimulus(self):
        '''
        '''
        self.participant.load('mat_test')
        try:
            srt_50=self.participant.data['mat_test']['srt_50']
            s_50=self.participant.data['mat_test']['s_50']
        except KeyError:
            raise KeyError("Behavioural matrix test results not available, make "
                           "sure the behavioural test has been run before "
                           "running this test.")

        #reduction_coef = float(np.load(os.path.join(self.listDir, 'reduction_coef.npy')))

        # Calculate SNRs based on behavioural measures
        s_50 *= 0.01
        shuffle(self.si)
        x = logit(self.si * 0.01)
        snrs = (x/(4*s_50))+srt_50
        self.snr_fs = 10**(-snrs/20)
        self.snr_fs = np.append(self.snr_fs, np.inf)
        self.si = np.append(self.si, np.inf)
        snrs = np.append(snrs, np.inf)
        self.snr_fs[self.snr_fs == np.inf] = 0.
        if (self.snr_fs == -np.inf).any():
            raise ValueError("Noise infinitely louder than signal for an SNR (SNRs: {})".format(self.snr_fs))

        wavs = globDir(self.stim_folder, "*.wav")
        questions = globDir(self.stim_folder, "stim_questions_*.csv")
        if not len(questions):
            raise FileNotFoundError("No question files found in {}".format(self.stim_dir))
        rms_files = globDir(self.stim_folder, "stim_*_rms.npy")
        if not len(rms_files):
            raise FileNotFoundError("No rms files found in {}".format(self.stim_dir))

        self.socketio.emit('test_stim_load', namespace='/main')
        # Add noise to audio files at set SNRs and write to participant
        # directory
        self.data_path = self.participant.data_paths[self.test_name]
        out_dir = os.path.join(self.data_path, "stimulus")
        out_info = os.path.join(out_dir, "stim_info.csv")
        dir_must_exist(out_dir)

        with open(out_info, 'w') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(['wav', 'snr_fs', 'rms', 'si', 'snr'])
            for wav, snr_fs, rms, si, snr in zip(wavs, self.snr_fs, rms_files, self.si, snrs):
                out_wavpath =  os.path.join(out_dir, os.path.basename(wav))
                stim_rms = np.load(rms)
                match_ratio = stim_rms/self.noise_rms
                block_mix_wavs(wav, self.noise_path, out_wavpath, 1.*self.reduction_coef, snr_fs*match_ratio*self.reduction_coef)
                self.stim_paths.append(out_wavpath)
                writer.writerow([wav, snr_fs, rms, si, snr])
                # TODO: Output SI/snrs of each file to a CSV file


        for q_file_path in questions:
            q = []
            with open(q_file_path, 'r') as q_file:
                q_reader = csv.reader(q_file)
                for line in q_reader:
                    q.append(line)
            self.question.append(q)
        self.answers = np.empty(np.shape(self.question)[:2])
        self.answers[:] = np.nan
Beispiel #14
0
                        type=PathType(exists=None, type='dir'),
                        default='./stimulus',
                        help='Output directory')
    parser.add_argument('--CalcRMS', action='store_true')
    args = {
        k: v
        for k, v in vars(parser.parse_args()).items() if v is not None
    }

    rmsDir = os.path.join(args['OutDir'], "rms")
    if args['CalcRMS']:
        indexes = gen_indexes()
        wavFiles = gen_audio_stim(args['MatrixDir'], args['OutDir'], indexes)
        rmsFiles = gen_rms(wavFiles, rmsDir)
    else:
        wavDir = os.path.join(args['OutDir'], "wav")
        dir_must_exist(wavDir)
        wavFiles = globDir(wavDir, '*.wav')
        wf = []
        for listInd in range(50):
            wf.append([])
            for sentenceInd in range(10):
                wf[listInd].append(wavFiles[listInd * 10 + sentenceInd])
        wavFiles = wf

        rmsFiles = globDir(rmsDir, 'Trial*.npy')
    silences = detect_silences(rmsFiles, 44100)
    s_rms = calc_speech_rms(wavFiles, silences, rmsDir)
    b = calc_spectrum(wavFiles, silences)
    y = gen_noise(args['OutDir'], b, 44100)
Beispiel #15
0
def main():
    stim_dir = "../behavioural_stim/stimulus"
    wav_dir = "../behavioural_stim/stimulus/wav"
    base_dir = "../behavioural_stim/stimulus/wav/sentence-lists/"
    noise_dir = "../behavioural_stim/stimulus/wav/noise/"
    out_dir = "./out"
    dir_must_exist(base_dir)
    dir_must_exist(out_dir)
    dir_must_exist(wav_dir)
    dir_must_exist(noise_dir)

    noise_filepath = "../behavioural_stim/stimulus/wav/noise/noise_norm.wav"

    folders = os.listdir(base_dir)
    folders = natsorted(folders)[1:15]
    folders = list(zip(folders[::2], folders[1::2]))
    calc_potential_max(base_dir, noise_filepath, out_dir)
    n_questions = 4
    fs = 44100

    for ind, (list_folder_1, list_folder_2) in enumerate(folders):
        out_folder_name = 'Stim_{}'.format(ind)
        out_folder = os.path.join(out_dir, out_folder_name)
        delete_if_exists(out_folder)
        dir_must_exist(out_folder)
        out_wav_path = os.path.join(out_folder, "stim.wav")
        out_csv_path = os.path.join(out_folder, "markers.csv")
        out_rms_path = os.path.join(out_folder, "rms.npy")
        out_q_path = [
            os.path.join(out_folder, "questions_{}.csv".format(x))
            for x in range(n_questions)
        ]
        out_wav = PySndfile(out_wav_path, 'w',
                            construct_format('wav', 'pcm16'), 3, 44100)
        list_1_wav = globDir(os.path.join(base_dir, list_folder_1), '*.wav')
        list_2_wav = globDir(os.path.join(base_dir, list_folder_2), '*.wav')
        list_1_csv = globDir(os.path.join(base_dir, list_folder_1), '*.csv')
        list_2_csv = globDir(os.path.join(base_dir, list_folder_2), '*.csv')
        merged_wavs = list_1_wav + list_2_wav
        merged_csvs = list_1_csv + list_2_csv
        words = []
        for c in merged_csvs:
            with open(c, 'r') as csvfile:
                for line in csv.reader(csvfile):
                    words.append(line)
        c = list(zip(merged_wavs, words))
        shuffle(c)
        merged_wavs, words = zip(*c)
        sum_sqrd = 0.
        n = 0
        with open(out_csv_path, 'w') as csvfile, ExitStack() as stack:
            # Open all question files
            qfiles = [
                stack.enter_context(open(qfile, 'w')) for qfile in out_q_path
            ]
            writer = csv.writer(csvfile)
            qwriters = [csv.writer(qfile) for qfile in qfiles]

            counter = 0
            stim_count = len(merged_wavs)
            stim_count_half = stim_count // 2
            q_inds = np.array([
                sample(range(0, stim_count_half), n_questions),
                sample(range(stim_count_half, stim_count - 1), n_questions)
            ]).T
            a = 0
            silence = np.zeros((88200, 3))
            idx = np.arange(0, silence.shape[0])
            trigger = gen_trigger(idx, 2., 0.01, fs)
            silence[:, 2] = trigger
            out_wav.write_frames(silence)
            for ind, (wav, txt) in enumerate(zip(merged_wavs, words)):
                csv_line = [counter]
                silence = np.zeros((int(
                    np.random.uniform(int(0.3 * 44100), int(0.4 * 44100),
                                      1)), 3))
                idx = np.arange(counter, counter + silence.shape[0])
                trigger = gen_trigger(idx, 2., 0.01, fs)
                silence[:, 2] = trigger
                out_wav.write_frames(silence)
                counter += silence.shape[0]
                csv_line.append(counter)
                csv_line.append("#")
                writer.writerow(csv_line)
                csv_line = [counter]
                x, fs, enc = sndio.read(wav)
                sum_sqrd += np.sum(x**2)
                n += x.size

                y = np.vstack([x, x, np.zeros(x.size)]).T
                idx = np.arange(counter, counter + y.shape[0])
                trigger = gen_trigger(idx, 2., 0.01, fs)
                y[:, 2] = trigger
                out_wav.write_frames(y)
                counter += y.shape[0]
                csv_line.append(counter)
                csv_line.append(" ".join(txt))
                writer.writerow(csv_line)
                if ind in q_inds:
                    writer_ind = int(np.where(ind == q_inds)[0])
                    blank_ind = randint(0, len(txt) - 1)
                    q_list = copy(txt)
                    q_list[blank_ind] = '_'
                    qwriters[writer_ind].writerow(
                        [" ".join(q_list), txt[blank_ind]])
                    a += 1
            if a != 8:
                pdb.set_trace()

            csv_line = [counter]
            silence = np.zeros(
                (int(np.random.uniform(int(0.3 * 44100), int(0.4 * 44100),
                                       1)), 3))
            idx = np.arange(counter, counter + silence.shape[0])
            trigger = gen_trigger(idx, 2., 0.01, fs)
            silence[:, 2] = trigger
            out_wav.write_frames(silence)
            counter += silence.size
            csv_line.append(counter)
            csv_line.append("#")
            writer.writerow(csv_line)
            rms = np.sqrt(sum_sqrd / n)
            np.save(out_rms_path, rms)

            x, fs, enc = sndio.read(out_wav_path)
Beispiel #16
0
                        type=PathType(exists=True, type='dir'),
                        default='../speech_components',
                        help='Matrix test speech data location')
    parser.add_argument('--OutDir',
                        type=PathType(exists=None, type='dir'),
                        default='./out',
                        help='Output directory')
    parser.add_argument('--Length',
                        type=int,
                        default=3600,
                        help='Concatenated length of trials in seconds')
    args = {
        k: v
        for k, v in vars(parser.parse_args()).items() if v is not None
    }
    dir_must_exist(args['OutDir'])

    # Check directory for storing generated noise exists
    #noiseDir = os.path.join(args['OutDir'], 'noise')
    #dir_must_exist(noiseDir)
    #decoderDir = os.path.join(args['OutDir'], 'decoder')
    #dir_must_exist(noiseDir)
    ##generateDecoderAudio(args['OutDir'], noiseDir, decoderDir)
    #pdb.set_trace()
    #if os.path.exists(args['OutDir']):
    #    shutil.rmtree(args['OutDir'])
    #    os.makedirs(args['OutDir'])

    # Generate output directory if it doesn't exist
    outDir = args.pop("OutDir")
    prepareOutDir(outDir)
Beispiel #17
0
    def loadStimulus(self):
        '''
        '''
        self.participant.load('mat_test')
        try:
            srt_50 = self.participant.data['mat_test']['srt_50']
            s_50 = self.participant.data['mat_test']['s_50']
        except KeyError:
            raise KeyError(
                "Behavioural matrix test results not available, make "
                "sure the behavioural test has been run before "
                "running this test.")

        #reduction_coef = float(np.load(os.path.join(self.listDir, 'reduction_coef.npy')))

        # Calculate SNRs based on behavioural measures
        '''
        s_50 *= 0.01
        shuffle(self.si)
        x = logit(self.si * 0.01)
        snrs = (x/(4*s_50))+srt_50
        snrs = np.append(snrs, np.inf)
        snrs = np.append(snrs, 10.0)
        self.si = np.append(self.si, np.inf)
        self.si = np.append(self.si, 10.0)

        self.snr_fs = 10**(-snrs/20)
        self.snr_fs[self.snr_fs == np.inf] = 0.
        if (self.snr_fs == -np.inf).any():
            raise ValueError("Noise infinitely louder than signal for an SNR (SNRs: {})".format(self.snr_fs))
        '''

        snrs = np.squeeze(self.participant.data['parameters']['tone_SNRs'])
        snrs[~np.isinf(snrs)] += srt_50
        self.snr_fs = 10**(-snrs / 20)
        self.snr_fs[self.snr_fs == np.inf] = 0.
        if (self.snr_fs == -np.inf).any():
            raise ValueError(
                "Noise infinitely louder than signal for an SNR (SNRs: {})".
                format(self.snr_fs))

        self.data_path = self.participant.data_paths[self.test_name]
        out_dir = os.path.join(self.data_path, "stimulus")
        delete_if_exists(out_dir)
        out_info = os.path.join(out_dir, "stim_info.csv")
        dir_must_exist(out_dir)

        stim_dirs = [
            x for x in os.listdir(self.stim_folder)
            if os.path.isdir(os.path.join(self.stim_folder, x))
        ]

        ordered_stim_dirs = []
        for freq in self.participant_parameters['tone_freqs']:
            for folder in stim_dirs:
                if re.match(f'tone_({int(freq)})', folder):
                    ordered_stim_dirs.append(folder)
        ordered_stim_dirs *= int(len(snrs))

        for ind, dir_name in enumerate(ordered_stim_dirs):
            stim_dir = os.path.join(self.stim_folder, dir_name)
            wavs = globDir(stim_dir, "*.wav")
            intensity_files = globDir(stim_dir, "*intensity.npy")

            self.socketio.emit('test_stim_load', namespace='/main')
            # Add noise to audio files at set SNRs and write to participant
            # directory

            with open(out_info, 'w') as csvfile:
                writer = csv.writer(csvfile)
                writer.writerow(['wav', 'snr_fs', 'intensity', 'snr'])
                for wav, snr_fs, intensity, snr in zip(wavs, self.snr_fs,
                                                       intensity_files, snrs):
                    fp = os.path.splitext(
                        os.path.basename(wav))[0] + "_{}.wav".format(snr)
                    out_wavpath = os.path.join(out_dir, fp)
                    stim_intensity = np.load(intensity)
                    match_ratio = stim_intensity / self.noise_intensity
                    block_mix_wavs(wav,
                                   self.noise_path,
                                   out_wavpath,
                                   self.reduction_coef,
                                   snr_fs * match_ratio * self.reduction_coef,
                                   mute_left=False)
                    self.stim_paths.extend([out_wavpath])
                    writer.writerow([wav, snr_fs, intensity, snr])
Beispiel #18
0
    # Create commandline interface
    parser = argparse.ArgumentParser(description='Generate stimulus for '
                                     'training TRF decoder by concatenating '
                                     'matrix test materials')
    parser.add_argument('--OutDir',
                        type=PathType(exists=None, type='dir'),
                        default='./noise',
                        help='Output directory')
    parser.add_argument('--CalcRMS', action='store_true')
    args = {
        k: v
        for k, v in vars(parser.parse_args()).items() if v is not None
    }

    rmsDir = os.path.join(args['OutDir'], "rms")
    dir_must_exist(rmsDir)
    peakDir = os.path.join(args['OutDir'], "peak")
    dir_must_exist(peakDir)
    wavDir = os.path.join(args['OutDir'], "wav")
    dir_must_exist(wavDir)
    if args['CalcRMS']:
        daFile = globDir('./noise_source', 'male_speech_resamp.wav')[0]
        rmsFiles, peakFiles = gen_rms_peak([daFile], rmsDir, peakDir)
        rmsFile = rmsFiles[0]
        peakFile = peakFiles[0]
    else:
        daFile = globDir('./noise_source', 'male_speech_resamp.wav')[0]
        rmsFile = globDir(rmsDir, '*.npy')[0]
        peakFile = globDir(peakDir, '*.npy')[0]
    #silences = detect_silences([rmsFile], 44100, None)
    s_rms = calc_speech_rms(['./stimulus/3000_da.wav'], [[]], rmsDir)