예제 #1
0
파일: gen_noise.py 프로젝트: Pezz89/BPLabs
def gen_audio_stim(MatrixDir, OutDir, indexes):
    if os.path.exists(OutDir):
        shutil.rmtree(OutDir)
        os.makedirs(OutDir)
    wavFiles = globDir(MatrixDir, '*.wav')
    wavFileMatrix = organiseWavs(wavFiles)
    wavDir = os.path.join(OutDir, "wav")
    dir_must_exist(wavDir)
    wavDir = os.path.join(wavDir, "noise-sentences")
    dir_must_exist(wavDir)
    files = []
    n = 0
    o = 0
    for sentenceList in indexes:
        n += 1
        o = 0
        files.append([])
        for ind in sentenceList:
            o += 1
            y, wavInfo, partnames = synthesize_trial(wavFileMatrix, ind)
            fileName = os.path.join(wavDir,
                                    'Trial_{0:02d}_{1:02d}.wav'.format(n, o))
            print("Generating: " + fileName)
            sndio.write(fileName, y, **wavInfo)
            files[-1].append(fileName)

    return files
예제 #2
0
def generate_audio_stimulus(MatrixDir, OutDir, indexes, socketio=None):
    # Get matrix wav file paths
    wavFiles = globDir(MatrixDir, '*.wav')
    wavFileMatrix = organiseWavs(wavFiles)

    wav_dir = os.path.join(args['OutDir'], "wav")
    dir_must_exist(wav_dir)
    sentence_dir = os.path.join(wav_dir, "sentence-lists")
    dir_must_exist(sentence_dir)
    # Synthesize audio for each trial using generated word choices
    sentence_lists = {}
    for key in indexes.keys():
        files = []
        list_dir = os.path.join(sentence_dir, key)
        dir_must_exist(list_dir)
        with open(os.path.join(list_dir, 'stim_parts.csv'), 'w') as csvfile:
            partwriter = csv.writer(csvfile)
            inds, strings = indexes[key]
            for sentence_ind, (component_inds, component_strings) in enumerate(
                    zip(inds, strings)):
                if socketio:
                    percent = (l / Length) * 100.
                    socketio.emit('update-progress',
                                  {'data': '{}%'.format(percent)},
                                  namespace='/main')
                y, wavInfo, partnames = synthesize_trial(
                    wavFileMatrix, component_inds)
                partwriter.writerow(component_strings)
                file_name = os.path.join(
                    list_dir, 'Trial_{0:05d}.wav'.format(sentence_ind + 1))
                sndio.write(file_name, y, **wavInfo)
                files.append(file_name)

            sentence_lists[key] = np.array(files)
    return sentence_lists
예제 #3
0
def main():
    '''
    '''
    fs = 44100
    f = 1000.0
    n = np.arange(fs * 60 * 5)
    y = np.sin(2*np.pi*f*n/fs)
    y = np.array([y, y]).T
    sndio.write("./out/stimulus/1k_tone.wav", y, fs, format='wav', enc='pcm16')
예제 #4
0
def main():
    wavs = globDir('./', 'stim.wav')
    for wav in wavs:
        x, fs, enc, fmt = sndio.read(wav, return_format=True)
        y = x[:, :2]
        head, tail = os.path.splitext(wav)
        out_filepath = "{0}_old{1}".format(head, tail)
        os.rename(wav, out_filepath)
        sndio.write(wav, y, rate=fs, format=fmt, enc=enc)
예제 #5
0
def write_wav_skip_existing(path, y, sr):
    if not os.path.exists(path):
        if not os.path.exists(os.path.dirname(path)):
            os.makedirs(os.path.dirname(path), exists_ok=True)
        sndio.write(path, data=y, rate=sr, format="wav", enc='pcm16')
        #soundfile.write(path, y, sr, "PCM_16")
    else:
        print("WARNING: Tried writing audio to " + path +
              ", but audio file exists already. Skipping file!")
    return Sample.from_array(path, y, sr)
예제 #6
0
def main():
    '''
    '''
    wavs = globDir("./stimulus", "*.wav")
    for wav in wavs:
        x, fs, enc, fmt = sndio.read(wav, return_format=True)
        idx = np.arange(x.shape[0])
        breakpoint()
        y = np.vstack([x, x, np.zeros(x.shape[0])]).T
        trigger = gen_trigger(idx, 2., 0.01, fs)
        y[:, 2] = trigger
        wav_out = os.path.splitext(wav)[0] + "_trig.wav"
        sndio.write(wav_out, y, rate=fs, format=fmt, enc=enc)
예제 #7
0
파일: test_1.py 프로젝트: Pezz89/BPLabs
def main():
    '''
    Generate train of equally spaced clicks
    '''
    fs = 44100
    n = np.arange(fs * 10)
    trig_s = np.where(np.mod(n, fs / 2.) == 0)
    click = np.ones(int(0.01 * fs))
    y = np.zeros(n.size)
    for i in trig_s[0]:
        y[i:i + click.size] = click

    sndio.write("./trig_test.wav", y, fs, format='wav', enc='pcm16')

    pdb.set_trace()
예제 #8
0
파일: gen_click.py 프로젝트: Pezz89/BPLabs
def main():
    '''
    '''
    freq = 20.0
    fs = 44100
    period = fs / freq
    length = period * 3000.
    y = (np.arange(length) % period == 0).astype(float)
    y[np.where(y == 1.0)[0][1::2]] = -1.0
    y = np.concatenate([np.zeros(fs), y, np.zeros(fs)])

    idx = np.arange(y.size)
    trigger = gen_trigger(idx, 2., 0.01, fs)
    y = np.vstack((y, y, trigger)).T

    print("Number of clicks generated: {}".format(
        np.sum(np.abs(y[:, 0]) == 1.0)))
    sndio.write('./click_3000_20Hz.wav', y, rate=fs, format='wav', enc='pcm16')
예제 #9
0
    def _save_estimates(self,
                        user_estimates,
                        track,
                        estimates_dir,
                        write_stems=False):
        track_estimate_dir = op.join(estimates_dir, track.subset, track.name)
        if not os.path.exists(track_estimate_dir):
            os.makedirs(track_estimate_dir)

        # write out tracks to disk
        for target, estimate in list(user_estimates.items()):
            target_path = op.join(track_estimate_dir, target + '.wav')
            try:
                sndio.write(target_path,
                            estimate,
                            track.rate,
                            format="wav",
                            enc="pcm16")
            except NameError:
                sf.write(target_path, estimate, track.rate)
        pass
예제 #10
0
def flattenRMS(AudioFile, AnnotationFile):
    with open(AnnotationFile, 'r') as f:
        csvData = pd.read_csv(f)
    data, fs, encStr, fmtStr = sndio.read(AudioFile, return_format=True)
    csvData['start'] *= fs
    csvData['start'] = csvData['start'].astype(int)
    csvData['stop'] *= fs
    csvData['stop'] = csvData['stop'].astype(int)

    zerox = np.where(np.diff(np.sign(data)))[0]
    # get silent sections
    silences = csvData.loc[csvData['name'] == '#']
    audio = csvData.loc[csvData['name'] != '#']

    # Find nearest zero-crossing to start and stop times of silences
    nearestZerox = zerox[np.abs(zerox - csvData['start'][:, np.newaxis]).argmin(axis=1)]
    csvData['start'] = nearestZerox
    nearestZerox = zerox[np.abs(zerox - csvData['stop'][:, np.newaxis]).argmin(axis=1)]
    csvData['stop'] = nearestZerox

    csvData['rms'] = np.nan
    for ind, chunk in csvData.iterrows():
        if not chunk['name'] == '#':
            rms = np.sqrt(np.mean(data[chunk['start']:chunk['stop']]**2))
            csvData.iloc[ind, csvData.columns.get_loc('rms')] = rms
    avgRMS = csvData['rms'][csvData['rms'].notnull()].mean()

    silentData = np.zeros(int(0.3*fs))
    out = np.array([])
    for ind, chunk in csvData.iterrows():
        if chunk['name'] == '#':
            out = np.append(out, silentData)
        else:
            rmsCorFactor = avgRMS / chunk['rms']

            out = np.append(out, data[chunk['start']:chunk['stop']])#*rmsCorFactor)
            print(np.sqrt(np.mean((data[chunk['start']:chunk['stop']]*rmsCorFactor)**2)))

    sndio.write('./out.wav', out, rate=fs, format=fmtStr, enc=encStr)
예제 #11
0
파일: gen_da.py 프로젝트: Pezz89/BPLabs
def gen_da_stim(n, outpath):
    da_file = './BioMAP_da-40ms.wav'
    da_stim, fs, enc, fmt = sndio.read(da_file, return_format=True)
    prestim_size = 0.0158
    # Repetition rate in Hz
    repetition_rate = 10.9
    full_stim_size = 1. / repetition_rate
    da_size = da_stim.size / fs
    prestim = np.zeros(int(fs * prestim_size))
    poststim = np.zeros(int(fs * ((full_stim_size - prestim_size) - da_size)))
    y_part = np.concatenate([prestim, da_stim, poststim])
    pdb.set_trace()
    y_part_inv = -y_part
    loc_part = np.zeros(y_part.size)
    loc_part[prestim.size + 1] = 1

    y_2part = np.concatenate([y_part, y_part_inv])
    loc = np.concatenate([loc_part, loc_part])
    y_r = np.tile(y_2part, n)
    loc = np.tile(loc, n)
    loc = np.insert(loc, 0, np.zeros(fs))
    loc = np.where(loc)[0]

    y_r = np.insert(y_r, 0, np.zeros(fs))
    y_r = resampy.resample(y_r, fs, 44100)
    rat = 44100 / fs
    fs = 44100
    y_l = np.zeros(y_r.size)
    loc = loc * rat
    loc = loc.round().astype(int)
    np.save('./stimulus/3000_da_locs.npy', loc)

    idx = np.arange(y_l.size)
    trigger = gen_trigger(idx, 2., 0.01, fs)

    y = np.vstack((y_l, y_r, trigger)).T
    sndio.write(outpath, y, rate=44100, format=fmt, enc=enc)
    return outpath
예제 #12
0
def main():
    '''
    '''
    fs = 44100
    f = 1000.0
    n = np.arange(fs * 60 * 5)
    y = np.sin(2 * np.pi * f * n / fs)
    coef = np.load('./out/calibration_coefficients/click_cal_coef.npy')
    y *= coef
    dir_must_exist('./out/calibrated_stim/')
    sndio.write("./out/calibrated_stim/1k_tone.wav",
                y,
                fs,
                format='wav',
                enc='pcm16')
    coef = np.load('./out/calibration_coefficients/da_cal_coef.npy')
    y, fs, enc = sndio.read('./out/stimulus/da_cal_stim.wav')
    sndio.write('./out/calibrated_stim/da_cal_stim.wav',
                y * coef,
                fs,
                format='wav',
                enc='pcm16')
    coef = np.load('./out/calibration_coefficients/mat_cal_coef.npy')
    y, fs, enc = sndio.read('./out/stimulus/mat_cal_stim.wav')
    sndio.write('./out/calibrated_stim/mat_cal_stim.wav',
                y * coef,
                fs,
                format='wav',
                enc='pcm16')
    coef = np.load('./out/calibration_coefficients/story_cal_coef.npy')
    y, fs, enc = sndio.read('./out/stimulus/story_cal_stim.wav')
    sndio.write('./out/calibrated_stim/story_cal_stim.wav',
                y * coef,
                fs,
                format='wav',
                enc='pcm16')
예제 #13
0
# Create directory for saving the pre-processed data if it does not exist
if not os.path.exists(args.WAV_path):
    os.makedirs(args.WAV_path)
    print("Created " + args.WAV_path + " directory.")

# Create directory for saving the pre-processed data if it does not exist
if not os.path.exists(args.frames_path):
    os.makedirs(args.frames_path)
    print("Created " + args.frames_path + " directory.")

for filename in tqdm(os.listdir(args.WAV_path)):
    samples, sample_freq = load_audio(args.WAV_path + filename)
    resample_freq = sample_freq
    samples = samples.flatten()
    if args.resample_freq is not None:
        secs = len(samples) / sample_freq  # Calculate seconds of wav audio
        sample_count = secs * float(
            args.resample_freq
        )  # Calculate samples needed in the resampled signal with the desired frequency
        samples = resample(samples, int(sample_count))  # Resample
        sample_freq = args.resample_freq
        resample_freq = int(args.resample_freq)
    seq_length = args.win_length * sample_freq
    frames = enframe2(samples, args.win_length, args.win_overlap)
    # Save frames to wav files
    for idx, frame in enumerate(frames):
        write(args.frames_path + "_" + str(idx) + "_" + filename,
              frame,
              rate=resample_freq)
예제 #14
0
    def loadStimulus(self):
        '''
        '''
        self.participant.load('mat_test')
        try:
            srt_50 = self.participant.data['mat_test']['srt_50']
            s_50 = self.participant.data['mat_test']['s_50']
        except KeyError:
            raise KeyError(
                "Behavioural matrix test results not available, make "
                "sure the behavioural test has been run before "
                "running this test.")
        save_dir = self.participant.data_paths['eeg_test/stimulus']
        '''
        # Estimate speech intelligibility thresholds using predicted
        # psychometric function
        s_50 *= 0.01
        x = logit(self.si * 0.01)
        snrs = (x/(4*s_50))+srt_50
        snrs = np.append(snrs, np.inf)
        snr_map = pd.DataFrame({"speech_intel" : np.append(self.si, 0.0), "snr": snrs})
        snr_map_path = os.path.join(save_dir, "snr_map.csv")
        snr_map.to_csv(snr_map_path)
        snrs = np.repeat(snrs[np.newaxis], 4, axis=0)
        snrs = roll_independant(snrs, np.array([0,-1,-2,-3]))
        stim_dirs = [x for x in os.listdir(self.listDir) if os.path.isdir(os.path.join(self.listDir, x))]
        shuffle(stim_dirs)
        '''
        snrs = self.participant.data['parameters']['decoder_test_SNRs'] + srt_50
        stim_dirs = [
            x for x in os.listdir(self.listDir)
            if os.path.isdir(os.path.join(self.listDir, x))
        ]

        ordered_stim_dirs = []
        for ind in self.participant_parameters['decoder_test_lists']:
            for folder in stim_dirs:
                if re.match(f'Stim_({int(ind)})', folder):
                    ordered_stim_dirs.append(folder)

        # ordered_stim_dirs *= int(len(snrs))
        noise_file = PySndfile(self.noise_path, 'r')
        wav_files = []
        wav_metas = []
        question = []
        marker_files = []
        self.socketio.emit('test_stim_load', namespace='/main')
        for ind, dir_name in enumerate(ordered_stim_dirs[:snrs.shape[1]]):
            logger.debug(
                f"Processing list directory {ind+1} of {snrs.shape[1]}")
            stim_dir = os.path.join(self.listDir, dir_name)
            wav = globDir(stim_dir, "*.wav")[0]
            csv_files = natsorted(globDir(stim_dir, "*.csv"))
            marker_file = csv_files[0]
            question_files = csv_files[1:]
            # rms_file = globDir(stim_dir, "*.npy")[0]
            # speech_rms = float(np.load(rms_file))
            snr = snrs[:, ind]
            audio, fs, enc, fmt = sndio.read(wav, return_format=True)

            speech = audio[:, :2]
            triggers = audio[:, 2]
            #speech_rms, _, _ = asl_P56(speech, fs, 16.)
            rms_no_silences(speech, fs, -30.)

            wf = []
            wm = []
            for ind2, s in enumerate(snr):
                start = randint(0, noise_file.frames() - speech.shape[0])
                noise_file.seek(start)
                noise = noise_file.read_frames(speech.shape[0])
                noise_rms = np.sqrt(np.mean(noise**2))
                # noise_rms = asl_P56(noise, fs, 16)
                snr_fs = 10**(-s / 20)
                if snr_fs == np.inf:
                    snr_fs = 0.
                elif snr_fs == -np.inf:
                    raise ValueError(
                        "Noise infinitely louder than signal at snr: {}".
                        format(snr))
                noise = noise * (speech_rms / noise_rms)
                out_wav_path = os.path.join(
                    save_dir, "Stim_{0}_{1}.wav".format(ind, ind2))
                out_meta_path = os.path.join(
                    save_dir, "Stim_{0}_{1}.npy".format(ind, ind2))
                with np.errstate(divide='raise'):
                    try:
                        out_wav = (speech + (np.stack([noise, noise], axis=1) *
                                             snr_fs)) * self.reduction_coef
                    except:
                        set_trace()
                out_wav = np.concatenate([out_wav, triggers[:, np.newaxis]],
                                         axis=1)
                sndio.write(out_wav_path, out_wav, fs, fmt, enc)
                np.save(out_meta_path, s)
                wf.append(out_wav_path)
                wm.append(out_meta_path)
            wav_metas.append(wm)
            wav_files.append(wf)
            out_marker_path = os.path.join(save_dir,
                                           "Marker_{0}.csv".format(ind))
            marker_files.append(out_marker_path)
            copyfile(marker_file, out_marker_path)
            for q_file in question_files:
                out_q_path = os.path.join(
                    save_dir, "Questions_{0}_{1}.csv".format(ind, ind2))
                self.question_files.append(out_q_path)
                copyfile(q_file, out_q_path)

            for q_file_path in question_files:
                q = []
                with open(q_file_path, 'r') as q_file:
                    q_reader = csv.reader(q_file)
                    for line in q_reader:
                        q.append(line)
                question.append(q)

        self.wav_files = [item for sublist in wav_files for item in sublist]
        self.wav_metas = [item for sublist in wav_metas for item in sublist]

        self.question.extend(question)

        for item in marker_files:
            self.marker_files.extend([item] * 4)

        self.answers = np.empty(np.shape(self.question)[:2])
        self.answers[:] = np.nan