예제 #1
0
 def loadStimulus(self):
     '''
     '''
     self.wav_files = natsorted(globDir(self.stimDir, '*.wav'))
     q_files = natsorted(globDir(self.stimDir, '*.csv'))
     for wav_file, q_file in zip(self.wav_files, q_files):
         q_lines = []
         with open(q_file, 'r') as csvfile:
             reader = csv.reader(csvfile)
             for line in reader:
                 q_lines.append((int(line[0]), line[1:]))
         q_ind = randint(0, len(q_lines) - 1)
         self.question.append(q_lines[q_ind][1])
예제 #2
0
    def loadStimulus(self):

        # Get folder path of all lists in the list directory
        lists = next(os.walk(self.listDir))[1]
        lists.pop(lists.index("demo"))
        # Don't reload an lists that have already been loaded
        pop = [lists.index(x) for x in self.loadedLists]
        for i in sorted(pop, reverse=True):
            del lists[i]
        # Randomly select n lists
        inds = self.inds
        # random.shuffle(inds)
        # Pick first n shuffled lists
        for ind in inds:
            # Get filepaths to the audiofiles and word csv file for the current
            # list
            listAudiofiles = globDir(os.path.join(self.listDir, lists[ind]), "*.wav")
            listCSV = globDir(os.path.join(self.listDir, lists[ind]), "*.csv")
            levels = globDir(os.path.join(self.listDir, lists[ind]), "*.mat")

            with open(listCSV[0]) as csv_file:
                csv_reader = csv.reader(csv_file)
                # Allocate empty lists to store audio samples, RMS and words of
                # each list sentence
                self.lists.append([])
                self.listsRMS.append([])
                self.listsString.append([])
                # Get data for each sentence
                for fp, words, level_file in zip(listAudiofiles, csv_reader, levels):
                    # Read in audio file and calculate it's RMS
                    x, self.fs, _ = sndio.read(fp)
                    logger.info(f"Calculating level for {Path(fp).name}")
                    # x_rms, _, _ = asl_P56(x, self.fs, 16.)
                    x_rms = rms_no_silences(x, self.fs, -30.)
                    self.lists[-1].append(x)
                    self.listsRMS[-1].append(x_rms)
                    self.listsString[-1].append(words)

        # Number of trials to split between adaptive tracks
        n = len(self.lists[0])*len(inds)
        #Number of adaptive tracks active
        tn = len(self.adaptiveTracks)
        self.trackOrder = list(np.repeat(np.arange(tn), np.floor(n/tn)))
        random.shuffle(self.trackOrder)

        # Shuffle order of sentence presentation
        self.availableSentenceInds = list(range(len(self.lists[0])))
        random.shuffle(self.availableSentenceInds)
예제 #3
0
    def loadStimulus(self):
        '''
        '''
        self.participant.load('mat_test')
        try:
            srt_50 = self.participant.data['mat_test']['srt_50']
            s_50 = self.participant.data['mat_test']['s_50']
        except KeyError:
            raise KeyError(
                "Behavioural matrix test results not available, make "
                "sure the behavioural test has been run before "
                "running this test.")

        #reduction_coef = float(np.load(os.path.join(self.listDir, 'reduction_coef.npy')))

        # Calculate SNRs based on behavioural measures
        '''
        s_50 *= 0.01
        shuffle(self.si)
        x = logit(self.si * 0.01)
        snrs = (x/(4*s_50))+srt_50
        snrs = np.append(snrs, np.inf)
        snrs = np.append(snrs, 10.0)
        self.si = np.append(self.si, np.inf)
        self.si = np.append(self.si, 10.0)

        self.snr_fs = 10**(-snrs/20)
        self.snr_fs[self.snr_fs == np.inf] = 0.
        if (self.snr_fs == -np.inf).any():
            raise ValueError("Noise infinitely louder than signal for an SNR (SNRs: {})".format(self.snr_fs))
        '''

        snrs = np.squeeze(self.participant.data['parameters']['tone_SNRs'])
        snrs[~np.isinf(snrs)] += srt_50
        self.snr_fs = 10**(-snrs / 20)
        self.snr_fs[self.snr_fs == np.inf] = 0.
        if (self.snr_fs == -np.inf).any():
            raise ValueError(
                "Noise infinitely louder than signal for an SNR (SNRs: {})".
                format(self.snr_fs))

        self.data_path = self.participant.data_paths[self.test_name]
        out_dir = os.path.join(self.data_path, "stimulus")
        delete_if_exists(out_dir)
        out_info = os.path.join(out_dir, "stim_info.csv")
        dir_must_exist(out_dir)

        stim_dirs = [
            x for x in os.listdir(self.stim_folder)
            if os.path.isdir(os.path.join(self.stim_folder, x))
        ]

        ordered_stim_dirs = []
        for freq in self.participant_parameters['tone_freqs']:
            for folder in stim_dirs:
                if re.match(f'tone_({int(freq)})', folder):
                    ordered_stim_dirs.append(folder)
        ordered_stim_dirs *= int(len(snrs))

        for ind, dir_name in enumerate(ordered_stim_dirs):
            stim_dir = os.path.join(self.stim_folder, dir_name)
            wavs = globDir(stim_dir, "*.wav")
            intensity_files = globDir(stim_dir, "*intensity.npy")

            self.socketio.emit('test_stim_load', namespace='/main')
            # Add noise to audio files at set SNRs and write to participant
            # directory

            with open(out_info, 'w') as csvfile:
                writer = csv.writer(csvfile)
                writer.writerow(['wav', 'snr_fs', 'intensity', 'snr'])
                for wav, snr_fs, intensity, snr in zip(wavs, self.snr_fs,
                                                       intensity_files, snrs):
                    fp = os.path.splitext(
                        os.path.basename(wav))[0] + "_{}.wav".format(snr)
                    out_wavpath = os.path.join(out_dir, fp)
                    stim_intensity = np.load(intensity)
                    match_ratio = stim_intensity / self.noise_intensity
                    block_mix_wavs(wav,
                                   self.noise_path,
                                   out_wavpath,
                                   self.reduction_coef,
                                   snr_fs * match_ratio * self.reduction_coef,
                                   mute_left=False)
                    self.stim_paths.extend([out_wavpath])
                    writer.writerow([wav, snr_fs, intensity, snr])
예제 #4
0
    def loadStimulus(self):
        '''
        '''
        self.participant.load('mat_test')
        try:
            srt_50 = self.participant.data['mat_test']['srt_50']
            s_50 = self.participant.data['mat_test']['s_50']
        except KeyError:
            raise KeyError(
                "Behavioural matrix test results not available, make "
                "sure the behavioural test has been run before "
                "running this test.")
        save_dir = self.participant.data_paths['eeg_test/stimulus']
        '''
        # Estimate speech intelligibility thresholds using predicted
        # psychometric function
        s_50 *= 0.01
        x = logit(self.si * 0.01)
        snrs = (x/(4*s_50))+srt_50
        snrs = np.append(snrs, np.inf)
        snr_map = pd.DataFrame({"speech_intel" : np.append(self.si, 0.0), "snr": snrs})
        snr_map_path = os.path.join(save_dir, "snr_map.csv")
        snr_map.to_csv(snr_map_path)
        snrs = np.repeat(snrs[np.newaxis], 4, axis=0)
        snrs = roll_independant(snrs, np.array([0,-1,-2,-3]))
        stim_dirs = [x for x in os.listdir(self.listDir) if os.path.isdir(os.path.join(self.listDir, x))]
        shuffle(stim_dirs)
        '''
        snrs = self.participant.data['parameters']['decoder_test_SNRs'] + srt_50
        stim_dirs = [
            x for x in os.listdir(self.listDir)
            if os.path.isdir(os.path.join(self.listDir, x))
        ]

        ordered_stim_dirs = []
        for ind in self.participant_parameters['decoder_test_lists']:
            for folder in stim_dirs:
                if re.match(f'Stim_({int(ind)})', folder):
                    ordered_stim_dirs.append(folder)

        # ordered_stim_dirs *= int(len(snrs))
        noise_file = PySndfile(self.noise_path, 'r')
        wav_files = []
        wav_metas = []
        question = []
        marker_files = []
        self.socketio.emit('test_stim_load', namespace='/main')
        for ind, dir_name in enumerate(ordered_stim_dirs[:snrs.shape[1]]):
            logger.debug(
                f"Processing list directory {ind+1} of {snrs.shape[1]}")
            stim_dir = os.path.join(self.listDir, dir_name)
            wav = globDir(stim_dir, "*.wav")[0]
            csv_files = natsorted(globDir(stim_dir, "*.csv"))
            marker_file = csv_files[0]
            question_files = csv_files[1:]
            # rms_file = globDir(stim_dir, "*.npy")[0]
            # speech_rms = float(np.load(rms_file))
            snr = snrs[:, ind]
            audio, fs, enc, fmt = sndio.read(wav, return_format=True)

            speech = audio[:, :2]
            triggers = audio[:, 2]
            #speech_rms, _, _ = asl_P56(speech, fs, 16.)
            rms_no_silences(speech, fs, -30.)

            wf = []
            wm = []
            for ind2, s in enumerate(snr):
                start = randint(0, noise_file.frames() - speech.shape[0])
                noise_file.seek(start)
                noise = noise_file.read_frames(speech.shape[0])
                noise_rms = np.sqrt(np.mean(noise**2))
                # noise_rms = asl_P56(noise, fs, 16)
                snr_fs = 10**(-s / 20)
                if snr_fs == np.inf:
                    snr_fs = 0.
                elif snr_fs == -np.inf:
                    raise ValueError(
                        "Noise infinitely louder than signal at snr: {}".
                        format(snr))
                noise = noise * (speech_rms / noise_rms)
                out_wav_path = os.path.join(
                    save_dir, "Stim_{0}_{1}.wav".format(ind, ind2))
                out_meta_path = os.path.join(
                    save_dir, "Stim_{0}_{1}.npy".format(ind, ind2))
                with np.errstate(divide='raise'):
                    try:
                        out_wav = (speech + (np.stack([noise, noise], axis=1) *
                                             snr_fs)) * self.reduction_coef
                    except:
                        set_trace()
                out_wav = np.concatenate([out_wav, triggers[:, np.newaxis]],
                                         axis=1)
                sndio.write(out_wav_path, out_wav, fs, fmt, enc)
                np.save(out_meta_path, s)
                wf.append(out_wav_path)
                wm.append(out_meta_path)
            wav_metas.append(wm)
            wav_files.append(wf)
            out_marker_path = os.path.join(save_dir,
                                           "Marker_{0}.csv".format(ind))
            marker_files.append(out_marker_path)
            copyfile(marker_file, out_marker_path)
            for q_file in question_files:
                out_q_path = os.path.join(
                    save_dir, "Questions_{0}_{1}.csv".format(ind, ind2))
                self.question_files.append(out_q_path)
                copyfile(q_file, out_q_path)

            for q_file_path in question_files:
                q = []
                with open(q_file_path, 'r') as q_file:
                    q_reader = csv.reader(q_file)
                    for line in q_reader:
                        q.append(line)
                question.append(q)

        self.wav_files = [item for sublist in wav_files for item in sublist]
        self.wav_metas = [item for sublist in wav_metas for item in sublist]

        self.question.extend(question)

        for item in marker_files:
            self.marker_files.extend([item] * 4)

        self.answers = np.empty(np.shape(self.question)[:2])
        self.answers[:] = np.nan
예제 #5
0
    def loadStimulus(self):
        '''
        '''
        self.participant.load('mat_test')
        try:
            srt_50=self.participant.data['mat_test']['srt_50']
            s_50=self.participant.data['mat_test']['s_50']
        except KeyError:
            raise KeyError("Behavioural matrix test results not available, make "
                           "sure the behavioural test has been run before "
                           "running this test.")

        #reduction_coef = float(np.load(os.path.join(self.listDir, 'reduction_coef.npy')))

        # Calculate SNRs based on behavioural measures
        s_50 *= 0.01
        shuffle(self.si)
        x = logit(self.si * 0.01)
        snrs = (x/(4*s_50))+srt_50
        self.snr_fs = 10**(-snrs/20)
        self.snr_fs = np.append(self.snr_fs, np.inf)
        self.si = np.append(self.si, np.inf)
        snrs = np.append(snrs, np.inf)
        self.snr_fs[self.snr_fs == np.inf] = 0.
        if (self.snr_fs == -np.inf).any():
            raise ValueError("Noise infinitely louder than signal for an SNR (SNRs: {})".format(self.snr_fs))

        wavs = globDir(self.stim_folder, "*.wav")
        questions = globDir(self.stim_folder, "stim_questions_*.csv")
        if not len(questions):
            raise FileNotFoundError("No question files found in {}".format(self.stim_dir))
        rms_files = globDir(self.stim_folder, "stim_*_rms.npy")
        if not len(rms_files):
            raise FileNotFoundError("No rms files found in {}".format(self.stim_dir))

        self.socketio.emit('test_stim_load', namespace='/main')
        # Add noise to audio files at set SNRs and write to participant
        # directory
        self.data_path = self.participant.data_paths[self.test_name]
        out_dir = os.path.join(self.data_path, "stimulus")
        out_info = os.path.join(out_dir, "stim_info.csv")
        dir_must_exist(out_dir)

        with open(out_info, 'w') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow(['wav', 'snr_fs', 'rms', 'si', 'snr'])
            for wav, snr_fs, rms, si, snr in zip(wavs, self.snr_fs, rms_files, self.si, snrs):
                out_wavpath =  os.path.join(out_dir, os.path.basename(wav))
                stim_rms = np.load(rms)
                match_ratio = stim_rms/self.noise_rms
                block_mix_wavs(wav, self.noise_path, out_wavpath, 1.*self.reduction_coef, snr_fs*match_ratio*self.reduction_coef)
                self.stim_paths.append(out_wavpath)
                writer.writerow([wav, snr_fs, rms, si, snr])
                # TODO: Output SI/snrs of each file to a CSV file


        for q_file_path in questions:
            q = []
            with open(q_file_path, 'r') as q_file:
                q_reader = csv.reader(q_file)
                for line in q_reader:
                    q.append(line)
            self.question.append(q)
        self.answers = np.empty(np.shape(self.question)[:2])
        self.answers[:] = np.nan