Пример #1
0
 def test_trial(self, trial):
     # present instruction trial
     self.image.image = self.stimuli_folder + trial['Picture']
     self.image.draw()
     self.win.callOnFlip(self.clock.reset)
     self.isi.complete()
     self.win.flip()
     if trial['trialAudio'] != '':
         audio.play(self.instructions[trial['trialAudio']], wait=True)
     if trial['answer_type'] == 'spoken':
         audio.write(self.log_prefix + '_' + trial['Picture'][:-4] + '.wav',
                     audio.record(25, wait=True))
     else:
         keys = event.waitKeys(keyList=['escape'] +
                               trial['keyboard'].split(' '),
                               timeStamped=self.clock)
         trial['keypress'], trial['RT'] = keys[0]
         if trial['keypress'] == 'escape':
             core.quit()
         if trial['keypress'] == trial['key']:
             trial['ACC'] = 1
         else:
             trial['ACC'] = 0
     self.win.callOnFlip(self.isi.start,
                         float(trial['ITI']) / 1000 - self.frame_dur)
     # flip buffer again and start ISI timer
     self.win.flip()
     return trial
Пример #2
0
    def run(self):
        # set up presentation window color, and size
        bgcolor = 'black'
        txtcolor = 'white'
        self.win = visual.Window(fullscr=True, color=bgcolor)
        #self.win = visual.Window((1200, 900), color=bgcolor)  # temporary presentation window setup, exchange for line above when running actual experiment
        self.text = visual.TextStim(self.win, color=txtcolor)

        for i in range(5, 0, -1):
            self.text.text = '+' * (2 * i - 1)
            self.text.draw()
            self.win.flip()
            core.wait(1)

        self.text.text = '-'
        self.text.draw()
        self.win.flip()
        # record
        samples = audio.record(60, wait=True)

        self.text.text = '+'
        self.text.draw()
        self.win.flip()
        audio.write(self.log_fname, samples)
        core.wait(3)

        self.win.close()
Пример #3
0
    def run(self):
        # set up presentation window color, and size
        bgcolor = 'black'
        txtcolor = 'white'
        self.win = visual.Window(fullscr=True, color=bgcolor)
        #self.win = visual.Window((1200, 900), color=bgcolor)  # temporary presentation window setup, exchange for line above when running actual experiment
        self.text = visual.TextStim(self.win, color=txtcolor)

        words = [
            audio.read(self.stimuli_prefix + str(i + 1) + '.wav')
            for i in range(24)
        ]
        recordings = []

        self.text.text = '||'
        self.text.draw()
        self.win.flip()
        audio.play(audio.read(self.instructions_folder + self.mode + '.wav'),
                   wait=True)
        key = event.waitKeys(keyList=['return'])

        for word in words:
            self.text.text = '+'
            self.text.draw()
            self.win.flip()
            audio.play(word, wait=True)
            self.text.text = '-'
            self.text.draw()
            self.win.flip()
            recordings += [audio.record(((len(word) / 44100) + 1), wait=True)]

        for i in range(len(words)):
            audio.write(self.log_prefix + str(i + 1) + '.wav', recordings[i])

        self.win.close()
Пример #4
0
 def playback(a):
     p = a.back
     gain = int(255.0 * p.outputgain.val)
     a.val = 1.0
     a.fixact()
     panel.drawpanel()
     audio.setoutgain(gain)
     audio.write(p.sample)
     audio.setoutgain(0)
     a.val = 0.0
     a.fixact()
Пример #5
0
def writeSamples(midiPath, audioPath, outputPath):
    midi = midiProxy.loadMidiDrums(midiPath)
    audioLoad = audio.load(audioPath)
    wave = audioLoad[1]
    rate = audioLoad[0]

    #check correctness
    spectrogram, samplingRate = audio.performFFTs(audioLoad)
    audio.visualizeSpectrogram(wave=None,
                               spectrogram=spectrogram,
                               midi=midi,
                               name=midiPath)

    #lowest frequency = 10Hz = 0.1s per wave
    #time between 16th notes : 200bpm = 300 b/ms = 0.3 b/s = 0.075 16th/s
    step = 0.5
    samples = int(step * rate)
    preDelay = 0.05
    for midiEvent in midi:
        #get the name and the time of the midi event
        eventName = midiEvent[
            'notes']  #midiProxy.getVectorToNote(midiEvent['notes']) #from [0,0,1,0...] to [40, 36]
        time = midiEvent["startTime"]

        #if the event is not in the wave
        min = int(rate * (time - preDelay))
        max = int(rate * (time - preDelay)) + samples
        if min < 0 or max > len(wave):
            continue

        #create folder for the samples
        directory = outputPath + "/" + str(eventName)
        if not os.path.exists(directory):
            os.makedirs(directory)

        # fadein and fadeout to prevent aliasing in the fft ?
        # fadedWave = np.array([[int(wave[min+i][0] * fadeMask[i]), int(wave[min+i][1] * fadeMask[i])] for i in xrange(samples)], dtype = wave.dtype)

        #write the isolated wave from the sample
        audio.write(
            directory + "/" + audioPath.split("/")[-1] + str(time) + ".wav",
            rate, wave[min:max])
        #         audio.write(directory + "/" + str(time) + "f.wav", rate, fadedWave)

        print directory + "/" + audioPath.split("/")[-1] + str(time) + ".wav"
Пример #6
0
def dtmfout(dtmf, fname, ftype):
    # aliases for the non-numeric keys
    STAR = 10
    HASH = 11

    # DTMF frequencies, indexed by the digit
    hi_freq = [1336, 1209, 1336, 1477, 1209, 1336, 1477, 1209, 1336, 1477, 1209, 1477]
    lo_freq = [941, 697, 697, 697, 770, 770, 770, 852, 852, 852, 941, 941]

    # power levels: lo_freq group has -3dB relative to hi_freq group
    # going to use sqrt() from math which is probably overkill
    # could have approximated with 0.707
    hi_pwr = 1
    lo_pwr = 1 / sqrt(2)

    # sampling rate and period
    Fs = 8000
    dt = 1 / float(Fs)

    # define the number of samples for silence (100ms) and dtmf digit (200ms)
    # silence will be added before and after each dtmf digit ie. total 200ms
    n_silence = int(0.1 * Fs)
    n_dtmf = int(0.2 * Fs)
    silence = [0] * n_silence

    # signal will be the final output
    signal = []

    for n in range(len(dtmf)):
        ch = ord(dtmf[n])

        if ch >= ord('0') and ch <= ord('9'):
            # numeric keys
            fhi = hi_freq[ch - ord('0')]
            flo = lo_freq[ch - ord('0')]
        elif dtmf[n] == '*':
            fhi = hi_freq[STAR]
            flo = lo_freq[STAR]
        elif dtmf[n] == '#':
            fhi = hi_freq[HASH]
            flo = lo_freq[HASH]
        else:
            # silently drop invalid characters
            continue

        dig = [0] * n_dtmf
        for m in range(n_dtmf):
            dig[m] = int((hi_pwr * cos(2 * pi * fhi * m * dt) +
                          lo_pwr * cos(2 * pi * flo * m * dt)) * 64)

            # wav samples need to be unsigned values so add a DC component
            if ftype == audio.TYPE_WAV:
                dig[m] = dig[m] + 127

        signal = signal + silence + dig + silence

    return audio.write(fname, ftype, signal, Fs)
Пример #7
0
    def run(self):
        # set up presentation window color, and size
        bgcolor = 'black'
        txtcolor = 'white'
        self.win = visual.Window(fullscr=True, color=bgcolor)
        #self.win = visual.Window((1200, 900), color=bgcolor)  # temporary presentation window setup, exchange for line above when running actual experiment
        self.text = visual.TextStim(self.win, color=txtcolor)

        with open(self.trials_fname, 'rU') as trial_file:
            # read trial structure
            trials = list(csv.DictReader(trial_file, delimiter='\t'))

            # preload stimuli
            stimuli = [audio.read(self.stimuli_folder + trial['stimulus']) for trial in trials]

            recordings = []

            self.text.text = '||'
            self.text.draw()
            self.win.flip()
            audio.play(audio.read(self.instructions_folder + self.mode + '.wav'), wait=True)
            key = event.waitKeys(keyList=['return'])
            self.win.flip()

            for stimulus in stimuli:
                self.text.text = '+'
                self.text.draw()
                self.win.flip()
                audio.play(stimulus, wait=True)
                self.text.text = '-'
                self.text.draw()
                self.win.flip()
                recordings += [audio.record((len(stimulus) / 44100.0) + 1, wait=True)]
                keys = event.getKeys(['escape'])
                if 'escape' in keys:
                    break

            for i in range(len(recordings)):
                audio.write(self.log_prefix + trials[i]['stimulus'], recordings[i])

            self.win.close()
Пример #8
0
def writeSamples(midiPath, audioPath, outputPath):
    midi = midiProxy.loadMidiDrums(midiPath)
    audioLoad = audio.load(audioPath)
    wave = audioLoad[1]
    rate = audioLoad[0]
    

    #check correctness
#     spectrogram, samplingRate = audio.performFFTs(audioLoad)
#     audio.visualizeSpectrogram(wave=None, spectrogram=spectrogram, midi=midi, name=midiPath)

    #lowest frequency = 10Hz = 0.1s per wave
    #time between 16th notes : 200bpm = 300 b/ms = 0.3 b/s = 0.075 16th/s
    step = 0.525 #window of the sound saved in seconds
    samples = int(step * rate) #number of samples to save
    preDelay = 0.05
    for midiEvent in midi:  
        #get the name and the time of the midi event
        eventName = midiEvent['notes'] #midiProxy.getVectorToNote(midiEvent['notes']) #from [0,0,1,0...] to [40, 36]
        time = midiEvent["startTime"]
        
        #if the event is not in the wave
        min = int(rate * (time - preDelay))
        max = int(rate * (time - preDelay)) + samples
        if min < 0 or max > len(wave):
            continue
        
        #create folder for the samples
        directory = outputPath + "/" + str(eventName)
        if not os.path.exists(directory):
            os.makedirs(directory)

        # fadein and fadeout to prevent aliasing in the fft ?
        # fadedWave = np.array([[int(wave[min+i][0] * fadeMask[i]), int(wave[min+i][1] * fadeMask[i])] for i in xrange(samples)], dtype = wave.dtype)
     
        #write the isolated wave from the sample
        audio.write(directory + "/" + audioPath.split("/")[-1] + str(time) + ".wav", rate, wave[min:max])
#         audio.write(directory + "/" + str(time) + "f.wav", rate, fadedWave)
        
        print directory + "/" +  audioPath.split("/")[-1] + str(time) + ".wav"
Пример #9
0
		del fp
	else:
		del fp
		data = readfile(tempname)
	if G.debug: print len(data), 'bytes read from', tempname
	if G.busy:
		G.busy = 0
		dummy = audio.stop_playing()
	#
	# Completely reset the audio device
	audio.setrate(G.rate)
	audio.setduration(0)
	audio.setoutgain(G.gain)
	#
	if G.synchronous:
		audio.write(data)
		audio.setoutgain(0)
	else:
		try:
			audio.start_playing(data)
			G.busy = 1
		except:
			stdwin.fleep()
	del data

def readfile(filename):
	return readfp(open(filename, 'r'))

def readfp(fp):
	data = ''
	while 1:
Пример #10
0
		del fp
	else:
		del fp
		data = readfile(tempname)
	if G.debug: print len(data), 'bytes read from', tempname
	if G.busy:
		G.busy = 0
		dummy = audio.stop_playing()
	#
	# Completely reset the audio device
	audio.setrate(G.rate)
	audio.setduration(0)
	audio.setoutgain(G.gain)
	#
	if G.synchronous:
		audio.write(data)
		audio.setoutgain(0)
	else:
		try:
			audio.start_playing(data)
			G.busy = 1
		except:
			stdwin.fleep()
	del data

def readfile(filename):
	return readfp(open(filename, 'r'))

def readfp(fp):
	data = ''
	while 1: