Пример #1
0
    def __init__(self, mode="max", prescale=1):
        devices = audiere.get_devices()
        if "oss" in devices:
            self.device = audiere.open_device("oss")
        elif "alsa" in devices:
            self.device = audiere.open_device("alsa")
        else:
            raise RuntimeError("no suitable audio device found!")

        self.mode = mode
        self.channels = numpy.zeros((CRATES, SLOTS, CHANNELS), dtype=numpy.float32)
        self.prescale = numpy.ones_like(self.channels) * prescale

        self.speakers = numpy.empty_like(self.channels, dtype=object)
        self.speakers[:, :, :] = VRateSpeaker(self.channels, prescale)
Пример #2
0
    def __init__(self, mode='max', prescale=1):
        devices = audiere.get_devices()
        if 'oss' in devices:
            self.device = audiere.open_device('oss')
        elif 'alsa' in devices:
            self.device = audiere.open_device('alsa')
        else:
            raise RuntimeError('no suitable audio device found!')

        self.mode = mode
        self.channels = numpy.zeros((CRATES, SLOTS, CHANNELS),
                                    dtype=numpy.float32)
        self.prescale = numpy.ones_like(self.channels) * prescale

        self.speakers = numpy.empty_like(self.channels, dtype=object)
        self.speakers[:, :, :] = VRateSpeaker(self.channels, prescale)
Пример #3
0
 def init(self):        
     super(AuditoryOddball,self).init()
     self.DIR_DEV = 'C:/stim_test/dev'    
     self.DIR_STD = 'C:/stim_test/std'
     self.stimuli = 'predefined'
     self.nStim = 15
     self.au = audiere.open_device()  
     self.dev_perc = 0.3  
 def init(self):
     super(AuditoryOddball, self).init()
     self.DIR_DEV = 'C:/stim_test/dev'
     self.DIR_STD = 'C:/stim_test/std'
     self.stimuli = 'predefined'
     self.nStim = 15
     self.au = audiere.open_device()
     self.dev_perc = 0.3
Пример #5
0
    def __init__(self, playlistfile="playlist.txt"):
        # Open default audio device
        self.dev = audiere.open_device()

        # Load playlist
        #playlistf = open(playlistfile, 'r')
        #self.playlist = [song for song in playlistf.read().split('\n')]
        #playlistf.close()
        self.songStartingTime = 0
        self.songDuration = 0
Пример #6
0
	def playOne(self,buff,fs,pan=0):
		'''Plays a sound buffer with blocking, matlab-style
		'''
		import audiere
		from time import sleep
		d = audiere.open_device()
		s = d.open_array(buff,fs)
		s.pan = pan
		s.play()
		while s.playing:
			sleep(.01)
Пример #7
0
	def play(self,buffs,fs,pan=0):
		'''Plays a sound buffer with blocking, matlab-style
		'''
		import audiere
		from time import sleep
		d = audiere.open_device()
		sons = [d.open_array(buff,fs) for buff in buffs]
		for son in sons:
			son.play()
			while son.playing:
				sleep(0.01)
Пример #8
0
	def playOne(self,buff,fs,pan=0):
		"""
		Joue le son correspondant au signal "buff",
		à la fréquence d'échantillonnage "fs".
		"""
		import audiere
		from time import sleep
                print audiere.get_devices()
		d = audiere.open_device()
		s = d.open_array(buff,fs)
		s.pan = pan
		s.play()
		while s.playing:
			sleep(.01)
Пример #9
0
    def __init__(self, parent, id, title):
        wx.Frame.__init__(self, parent, id, title, wx.DefaultPosition, size=wx.DisplaySize())

        self.device = audiere.open_device()
        self._soundgen = self.device.create_tone
        self.tone = self.device.create_tone(250)
        self._pan = 0
        self.tone.pan = self._pan
        self._gen_toggle = False
        panel = wx.Panel(self,-1)
        panel.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
        panel.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
        panel.SetFocus()
        self.Centre()
        self.Show(True)
Пример #10
0
	def play(self,buffs,fs,pan=0):
		"""
		Joue successivement les sons correspondants aux signaux
		stockés dans la matrice "buffs",
		à la fréquence d'échantillonnage "fs".
		"""
		import audiere
		from time import sleep
                print audiere.get_devices()
		d = audiere.open_device()
		sons = [d.open_array(buff,fs) for buff in buffs]
		for son in sons:
			son.play()
			while son.playing:
				sleep(0.01)
Пример #11
0
	def __init__(self):
		print("INIT")
		self.playlist = []
		
		self.device = audiere.open_device()
		print()
Пример #12
0
 def __init__(self):
     self.intervals = []
     self.intervalTones = []
     self.tones = {}
     self.notes = {
         "F8": 5587.65,
         "E8": 5274.04,
         "Ds8": 4978.03,
         "Eb8": 4978.03,
         "D8": 4698.64,
         "Cs8": 4434.92,
         "Db8": 4434.92,
         "C8": 4186.01,
         "B7": 3951.07,
         "As7": 3729.31,
         "Bb7": 3729.31,
         "A7": 3520,
         "Gs7": 3322.44,
         "Ab7": 3322.44,
         "G7": 3135.96,
         "Fs7": 2959.96,
         "Gb7": 2959.96,
         "F7": 2793.83,
         "E7": 2637.02,
         "Ds7": 2489.02,
         "Eb7": 2489.02,
         "D7": 2349.32,
         "Cs7": 2217.46,
         "Db7": 2217.46,
         "C7": 2093,
         "B6": 1975.53,
         "As6": 1864.66,
         "Bb6": 1864.66,
         "A6": 1760,
         "Gs6": 1661.22,
         "Ab6": 1661.22,
         "G6": 1567.98,
         "Fs6": 1479.98,
         "Gb6": 1479.98,
         "F6": 1396.91,
         "E6": 1318.51,
         "Ds6": 1244.51,
         "Eb6": 1244.51,
         "D6": 1174.66,
         "Cs6": 1108.73,
         "Db6": 1108.73,
         "C6": 1046.5,
         "B5": 987.767,
         "As5": 932.328,
         "Bb5": 932.328,
         "A5": 880,
         "Gs5": 830.609,
         "Ab5": 830.609,
         "G5": 783.991,
         "Fs5": 739.989,
         "Gb5": 739.989,
         "F5": 698.456,
         "E5": 659.255,
         "Ds5": 622.254,
         "Eb5": 622.254,
         "D5": 587.33,
         "Cs5": 554.365,
         "Db5": 554.365,
         "C5": 523.251,
         "B4": 493.883,
         "As4": 466.164,
         "Bb4": 466.164,
         "A4": 440,
         "Gs4": 415.305,
         "Ab4": 415.305,
         "G4": 391.995,
         "Fs4": 369.994,
         "Gb4": 369.994,
         "F4": 349.228,
         "E4": 329.628,
         "Ds4": 311.127,
         "Eb4": 311.127,
         "D4": 293.665,
         "Cs4": 277.183,
         "Db4": 277.183,
         "C4": 261.626,
         "B3": 246.942,
         "As3": 233.082,
         "Bb3": 233.082,
         "A3": 220,
         "Gs3": 207.652,
         "Ab3": 207.652,
         "G3": 195.998,
         "Fs3": 184.997,
         "Gb3": 184.997,
         "F3": 174.614,
         "E3": 164.814,
         "Ds3": 155.563,
         "Eb3": 155.563,
         "D3": 146.832,
         "Cs3": 138.591,
         "Db3": 138.591,
         "C3": 130.813,
         "B2": 123.471,
         "As2": 116.541,
         "Bb2": 116.541,
         "A2": 110,
         "Gs2": 103.826,
         "Ab2": 103.826,
         "G2": 97.9989,
         "Fs2": 92.4986,
         "Gb2": 92.4986,
         "F2": 87.3071,
         "E2": 82.4069,
         "Ds2": 77.7817,
         "Eb2": 77.7817,
         "D2": 73.4162,
         "Cs2": 69.2957,
         "Db2": 69.2957,
         "C2": 65.4064,
         "B1": 61.7354,
         "As1": 58.2705,
         "Bb1": 58.2705,
         "A1": 55,
         "Gs1": 51.9131,
         "Ab1": 51.9131,
         "G1": 48.9994,
         "Fs1": 46.2493,
         "Gb1": 46.2493,
         "F1": 43.6535,
         "E1": 41.2034,
         "Ds1": 38.8909,
         "Eb1": 38.8909,
         "D1": 36.7081,
         "Cs1": 34.6478,
         "Db1": 34.6478,
         "C1": 32.7032,
         "B0": 30.8677,
         "As0": 29.1352,
         "Bb0": 29.1352,
         "A0": 27.5,
         "Gs0": 25.9565,
         "Ab0": 25.9565,
         "G0": 24.4997,
         "Fs0": 23.1247,
         "Gb0": 23.1247,
         "F0": 21.8268,
         "E0": 20.6017,
         "Ds0": 19.4454,
         "Eb0": 19.4454,
         "D0": 18.354,
         "Cs0": 17.3239,
         "Db0": 17.3239,
         "C0": 16.3516
     }
     d = audiere.open_device()
     global tempo
     tempo = 2.40
     print("Initializing Tones...")
     for key in self.notes:
         self.tones[key] = d.create_tone(self.notes[key])
     usedList = []
     for key in self.notes:
         flag = False
         for i in usedList:
             if i == self.notes[key]:
                 flag = True
         if not flag:
             self.intervals.append(self.notes[key])
             self.intervalTones.append(d.create_tone(self.notes[key]))
         usedList.append(self.notes[key])
     self.intervals = sorted(self.intervals)
     print("Done")
    def run(self):
        "This sequence is performed according to the available\
        time to do it, because the main thread is used by gtk.main()"

        # ======================
        # |1| HCI Initialization
        # ======================
        ## -- default variables
        codes, distractors, source_id, distractor, repeats = ["None"], [], 0, None, 0
        ## -- default attributes
        self.submenu, self.HOME, self.targets, self.current_subutton, self.current_len, self.video_len, self.ready, self.repeat, self.PLAY = (
            False,
            True,
            [None],
            "unknown_task",
            "unknown_len",
            0,
            False,
            0,
            False,
        )
        ## -- starting a device to play sounds by means of the 'audiere' library
        device = audiere.open_device()
        ## -- loading the cue-sounds
        beep = device.open_file(beep_loc)
        correct = device.open_file(correct_loc)
        wrong = device.open_file(wrong_loc)
        click = device.open_file(click_loc)
        ## -- loading the distractor-sounds
        # for track in distractor_locs: distractors.append(device.open_file(track))
        ## -- intro report
        HCI_Presentation()
        ## -- waiting a client request (i.e., BCI System Connection)
        print "Waiting for connection . . ."
        BCICliSock, addr = HCISerSock.accept()
        print ". . . connected from:", addr
        # ==================================================================
        # |2| Infinite loop to not lose the control thread (OnLine Paradigm)
        # ==================================================================
        while not self.quit:
            # (a) +++++ Receiving an instruction from BCI client +++++
            instruction = BCICliSock.recv(BUFSIZ)
            if not instruction:
                break
            print "The received instruction is:", instruction
            # (b) +++++ Decoding the received instruction +++++
            TrigNum = instruction.split("*")[0]
            instruction = instruction.split("*")[-1]
            command = instruction.split("_")[0]
            target = instruction.split("_")[-1]
            # (c) +++++ Reset of Variables +++++
            code, self.GUI_VE = codes[0], ""
            # (d) +++++ Calling the appropiated method according to the TCP-message +++++
            # --> Start saving continuos eeg data (PauseOff) or
            #     Pause saving continous eeg data (PauseOn)
            if command.isdigit():
                time.sleep(1)
                ParallelPort(TrigNum, 0)
                time.sleep(1)
            # --> Type Of System: Tutorial_MI or Tutorial_CMD
            elif command == "Tutorial":
                ## - variable declaration
                UT_status, sys_type, trigger = True, target, TrigNum
                ## - configuration of the Tutorial_Command System
                if target == "CMD":
                    self.submenu, self.HOME = False, True
                    gobject.idle_add(self.tutorial.UT_CMDSysConfig)
                ## - trigger delivery
                ParallelPort(trigger, 0)
            # --> Type Of System: Training_GUI & Testing_GUI
            elif any([command == "Training", command == "Testing"]):
                ## - switch to first tab & interface~reset
                self.ready = False
                gobject.idle_add(self.GUI_ResetTabs, self.submenu)
                while not self.ready:
                    None
                ## - variable reset
                UT_status, sys_type, trigger, self.submenu, self.HOME = False, command, TrigNum, False, True
                ## - trigger delivery
                ParallelPort(trigger, 0)
            # --> Type Of System: Cue-Driven & Target-Driven
            elif any([command == "Cue-Driven", command == "Target-Driven"]):
                ## - initialize the video~play
                Config_Video(command)
                ## - switch to first tab & interface~reset
                self.ready = False
                gobject.idle_add(self.GUI_ResetTabs, self.submenu)
                while not self.ready:
                    None
                ## - variable reset
                UT_status, sys_type, trigger, self.submenu, self.HOME = False, command, TrigNum, False, True
                ## - load of the system-targets
                codes = HCI_codes[sys_type]
                ## - trigger delivery
                ParallelPort(trigger, 0)
            # --> Warning Sign: Preparation for the MI Movement
            elif command == "warning":
                ## - GUI modification
                self.ready = False
                gobject.idle_add(self.Warning_Sign, UT_status, sys_type)
                while not self.ready:
                    None
                ## - trigger delivery
                ParallelPort(trigger, TrigNum)
            # --> Cue Onset: MI Movement Performance
            elif command == "cue":
                ## - beep performance
                beep.play()
                ## - GUI modification
                self.ready = False
                gobject.idle_add(self.Cue_Onset, UT_status, target, sys_type)
                while not self.ready:
                    None
                ## - trigger delivery
                ParallelPort(trigger, TrigNum)
            # --> Blank: random inter-trial interval (only Tutorial_MI, Tutorial_CMD & Training_GUI)
            #    "Execution of the sent cue in order to do a demostration of the command"
            elif command == "blank":
                ## - CASE 1: Tutorial_MI
                if sys_type == "MI":
                    gobject.idle_add(self.tutorial.UT_BlankMI, target)
                ## - CASE 2: Tutorial_CMD and Training_GUI
                else:
                    self.ready = False
                    if target == "left":
                        click.play()
                        gobject.idle_add(self.Select_Command, UT_status, target, sys_type, code)
                    elif target == "right":
                        click.play()
                        gobject.idle_add(self.Navigate_Command, UT_status, target, sys_type, code)
                    elif target == "idle":
                        gobject.idle_add(self.No_Command, UT_status, sys_type, target, command)
                    while not self.ready:
                        None
                ## - trigger delivery
                ParallelPort(trigger, TrigNum)
            # --> Left: user's command interpretation
            elif command == "left":
                self.ready = False
                ## - CASE 1 & 2: Testing_GUI & Cue-Driven System
                if any([sys_type == "Testing", sys_type == "Cue-Driven"]):
                    if command == target:
                        correct.play()
                        gobject.idle_add(self.Select_Command, UT_status, target, sys_type, code)
                    else:
                        wrong.play()
                        gobject.idle_add(self.No_Command, UT_status, sys_type, target, command)
                ## - CASE 3: Target-Driven System
                elif sys_type == "Target-Driven":
                    click.play()
                    gobject.idle_add(self.Select_Command, UT_status, target, sys_type, code)
                ## - trigger delivery
                while not self.ready:
                    None
                ParallelPort(trigger, TrigNum)
            # --> Right: user's command interpretation
            elif command == "right":
                self.ready = False
                ## - CASES 1 & 2: Testing GUI & Cue-Driven System
                if any([sys_type == "Testing", sys_type == "Cue-Driven"]):
                    if command == target:
                        correct.play()
                        gobject.idle_add(self.Navigate_Command, UT_status, target, sys_type, code)
                    else:
                        wrong.play()
                        gobject.idle_add(self.No_Command, UT_status, sys_type, target, command)
                ## - CASE 3: Target-Driven System
                elif sys_type == "Target-Driven":
                    click.play()
                    gobject.idle_add(self.Navigate_Command, UT_status, target, sys_type, code)
                ## - trigger delivery
                while not self.ready:
                    None
                ParallelPort(trigger, TrigNum)
            # --> Idle: user's command interpretation
            elif command == "idle":
                self.ready = False
                ## - CASES 1 & 2: Testing GUI & Cue-Driven System
                if any([sys_type == "Testing", sys_type == "Cue-Driven"]):
                    if command == target:
                        correct.play()
                    else:
                        wrong.play()
                ## - GUI modification
                gobject.idle_add(self.No_Command, UT_status, sys_type, target, command)
                ## - trigger delivery
                while not self.ready:
                    None
                ParallelPort(trigger, TrigNum)
            # --> Stopping HCI manipulation
            elif command == "quit":
                ## - System Exit
                print "The TCP connection has been closed."
                break
            # --> Audio-Target for Target-Driven Systems
            else:
                ## -- Current Table
                self.container = self.TabTables[self.general_menu.get_current_page()]
                ## -- Sequence of Instructions
                for idx in range(len(Instructions)):
                    # variable assignment
                    m, i, wait = Instructions[idx][0], Instructions[idx][1], Instructions[idx][2]
                    # GUI modification according to the current tab
                    self.ready = False
                    gobject.idle_add(self.HCI_InstON, m, i, idx, codes)
                    while not self.ready:
                        None
                    # wait for 4 or 1.5 seconds
                    time.sleep(wait)
                    # play record
                    if idx == 1:
                        track = "\\".join(["Sounds\\Target-DrivenSys", target])
                        track = device.open_file(track)
                        # playing 3 times the target-
                        track.play()
                        while track.playing == 1:
                            None
                        time.sleep(1)
                        track.play()
                        while track.playing == 1:
                            None
                    # GUI modification according to the current tab
                    self.ready = False
                    gobject.idle_add(self.HCI_InstOFF)
                    while not self.ready:
                        None
            # (e) +++++ self.GUI_VE (HCIresponse) Assessment +++++
            # e.1 CASE 1: HCI-codes (next target if one option from the correct submenu is selected)
            if self.GUI_VE == code:
                # --> play feedback
                TCPmsg, codes = self.HCI_Feedback(sys_type, codes, device, source_id)
                # --> play distractor randomly
                # if sys_type == 'Target-Driven':
                #    distractor, repeats, rand_time = distractors.pop(0), distractor_rpts.pop(0), random.randint(40000, 80000)
                #    source_id = gobject.timeout_add(rand_time, self.HCI_Distractors, repeats)
            # e.2 Any other case (there are no targets to pursue)
            else:
                # --> TCP message (ready + current_subutton): Cue-Driven System Proposes
                TCPmsg = "_".join(["Task/MenuLength", str(self.current_subutton), str(self.current_len)])
            # (f) +++++ Play of the Distractor if it is available +++++
            # if all([self.PLAY, self.repeat > 0]):
            #    distractor.play()
            #    self.PLAY = False
            # (g) +++++ Reply to BCI-System +++++
            BCICliSock.send(TCPmsg)
            print "Satisfactory reply to the TCP client: ", TCPmsg
        # ================================
        # |3| Closure of TCP-communication
        # ================================
        BCICliSock.close()
        HCISerSock.close()
Пример #14
0
 def __init__(self, callsign=None):
     threading.Thread.__init__(self)
     self.queue = Queue.Queue()
     self.device = audiere.open_device()
     self.callsign = callsign
Пример #15
0
 def AlphaPeak(self, widget, event):
     'Method to calculate the individual alpha frequency'
     
     # ==================================================
     # Data Acquisition
     # ==================================================
     print '\n\n***** INDIVIDUAL ALPHA FREQUENCY (IAF) *****'
     # ***** Data Storage *****
     EEG_EPOCHS = []
     device     = audiere.open_device()
     start      = device.open_file('Sounds\\iaf_start.wav')
     stop       = device.open_file('Sounds\\iaf_stop.wav')
     # ***** Recording EC&EO Condition *****
     for msg in ['Start recording the eyes close condition?', 'Start recording the eyes open condition?']:
         DialogBox(msg, 'Individual Alpha Frequency')   
         start.play()
         # (a) Local Variables Declaration
         TCP_ch, TCP_array, time_record, Fs = 72, 432, 180, 128
         current_samples, data, TCP_samples, daq_samples = 0, '', TCP_array//TCP_ch//3, time_record*Fs 
         eeg_epoch = np.zeros((TCP_ch, 1))             
         # (b) BioSemi TCP Communication Start
         Client_T11 = socket(AF_INET, SOCK_STREAM)
         Client_T11.connect(('localhost', 778))
         # ...........Data Acquisition per epoch.............
         while current_samples < daq_samples:        
         # (c) Default Variables into the Loop
             tempo = np.zeros((TCP_ch, TCP_samples))            
         # (d) Sample Collection per TCP_array
             # --- loop to ensure a complete TCP_array collection
             while len(data) < TCP_array: data += Client_T11.recv(TCP_array)
             # --- saving data till to get the require length (i.e., daq_samples)
             BYTES = data[:TCP_array]
             data =  data[TCP_array:]
         # (e) Conversion from 24bits to Voltage 
             BYTES = bits_float(BYTES)                
             # --- Converting in microvolts
             BYTES = BYTES * 31.25e-3
         # (f) Data Re-Organization into Channels               
             new_ch_idx = 0
             for ch in range(TCP_ch): tempo[new_ch_idx, :], new_ch_idx = BYTES[ch::TCP_ch], new_ch_idx+1
             eeg_epoch = np.append(eeg_epoch, tempo, axis = 1)
             current_samples += TCP_samples
         # (g) delete the first column of eeg_epoch created by default         
         eeg_epoch = np.delete(eeg_epoch, 0, axis = 1)
         print '==> Raw_Data: ', np.shape(eeg_epoch)
         EEG_EPOCHS.append(eeg_epoch)
         # (h) BioSemi TCP client closure
         Client_T11.close()
         stop.play()
     # (i) Data storage
     cPickle.dump(EEG_EPOCHS, open(root + 'IAF.p', 'wb'))       
     # ==================================================
     # Signal Conditioning
     # ==================================================    
     EEG_DSP = []
     for eeg_epoch in EEG_EPOCHS:   
         # (a) Variable Declaration
         sigcon_samples = 128 * 180
         eeg_tempo = np.zeros((64, daq_samples))
         eeg_dsp = np.zeros((64, sigcon_samples))     
         ref, BW, bandrej, DC = ['LargeLaplacian',[]], ['on',[0,0], [0,0]], ['off',(0,0)], ['off',(0,0)]   
         ## -- filter design
         BW[1][0], BW[1][1] = spectral_filter(128, 7,  0, 4, 'highpass')   
         BW[2][0], BW[2][1] = spectral_filter(128, 0, 14, 7, 'lowpass')
         # (b) Spectral Filtering
         for ch in range(64):eeg_tempo[ch,:] = SiGCoN(ch,eeg_epoch,layout,ref,BW,bandrej,DC,1,'spectral',[])
         # (c) Spatial Filtering + Downsampling
         for ch in range(64):  eeg_dsp[ch,:] = SiGCoN(ch,eeg_tempo,layout,ref,BW,bandrej,DC,1,'spatial', [])
         # (d) Data storage
         EEG_DSP.append(eeg_dsp)    
         print '==> Conditioned_Data: ', np.shape(eeg_dsp)
     # ==================================================
     # Power Spectral Density
     # ==================================================          
     FIG = plt.figure(num = 1, figsize = (15,10), facecolor = '#A0AEC1', edgecolor = 'white')
     FIG.subplots_adjust(left = 0.075, right = 0.95, bottom = 0.05, top = 0.925, hspace = 0.3)
     # (a) Eyes Closed (EC) Condition
     ## -- axis 1 configuration
     ax1 = FIG.add_subplot(3, 1, 1)
     ax1.tick_params(labelsize = 9) 
     ax1.grid(True)
     ax1.set_xlabel('F r e q u e n c y [Hz]', fontsize = 10, fontname='Byington')
     ax1.set_ylabel('PowerSpectralDensity[db/Hz]', fontsize = 10, fontname='Byington')
     ax1.set_title('EYES CLOSE CONDITION', color = 'black', fontsize = 11, fontname='Byington')
     ## -- plot of EC condition
     signal = np.mean(EEG_DSP[0], axis = 0)
     print '==> ECspectrum_Data: ', np.shape(signal)
     Pxx1, freqs1 = ax1.psd(signal, NFFT=512, Fs=128, noverlap=50, color=blue, linewidth=1.75)
     ## -- maximum values
     max_values = np.max(Pxx1)
     max_idxs   = np.where(Pxx1 == max_values)[0]
     for idxs in max_idxs: ax1.text(freqs1[idxs],10*np.log10(Pxx1[idxs]),str(freqs1[idxs]),color=blue,fontsize=11,fontname='Byington')
     # (b) Eyes Open (EO) Condition
     ## -- axis 2 configuration
     ax2 = FIG.add_subplot(3, 1, 2)
     ax2.tick_params(labelsize = 9) 
     ax2.grid(True)
     ax2.set_xlabel('F r e q u e n c y [Hz]', fontsize = 10, fontname='Byington')
     ax2.set_ylabel('PowerSpectralDensity[db/Hz]', fontsize = 10, fontname='Byington')
     ax2.set_title('EYES OPEN CONDITION', color = 'black', fontsize = 11, fontname='Byington')
     ## -- plot of EO condition
     signal = np.mean(EEG_DSP[-1], axis = 0)
     print '==> EOspectrum_Data: ', np.shape(signal)
     Pxx2, freqs2 = ax2.psd(signal, NFFT=512, Fs=128, noverlap=50, color=orange, linewidth=1.75)
     ## -- maximum values
     max_values = np.max(Pxx2)
     max_idxs   = np.where(Pxx2 == max_values)[0]
     for idxs in max_idxs: ax2.text(freqs2[idxs],10*np.log10(Pxx2[idxs]),str(freqs2[idxs]),color=orange,fontsize=11,fontname='Byington')
     # (c) Comparison between EC and EO Conditions
     ## -- axis 3 configuration
     ax3 = FIG.add_subplot(3, 1, 3)
     ax3.tick_params(labelsize = 9) 
     ax3.grid(True)
     ax3.set_xlabel('F r e q u e n c y [Hz]', fontsize = 10, fontname='Byington')
     ax3.set_ylabel('PowerSpectralDensity[db/Hz]', fontsize = 10, fontname='Byington')
     ax3.set_title('DIFFERENCE BETWEEN EC & EO CONDITIONS', color = 'black', fontsize = 11, fontname='Byington')
     ## -- plot of ECvsEO condition
     signal = 10*np.log10(Pxx1) - 10*np.log10(Pxx2)
     print '==> ECvsEOspectrum_Data: ', np.shape(signal)
     ax3.plot(freqs1, signal, color = '#AD0066', linewidth = 1.75)
     ## -- maximum values
     max_values = np.max(signal)
     max_idxs   = np.where(signal == max_values)[0]
     for idxs in max_idxs: ax3.text(freqs1[idxs],signal[idxs],str(freqs1[idxs]),color='#AD0066',fontsize=11,fontname='Byington')
     ## -- display & storage
     url = root + 'IAF.png'
     plt.savefig(url, facecolor = '#A0AEC1', edgecolor = 'white')
     plt.show()
     print '********************************************\n'
Пример #16
0
 def init(self):
     super(TactileOddball, self).init()
     self.au = audiere.open_device()
Пример #17
0
    def AlphaPeak(self, widget, event):
        'Method to calculate the individual alpha frequency'

        # ==================================================
        # Data Acquisition
        # ==================================================
        print '\n\n***** INDIVIDUAL ALPHA FREQUENCY (IAF) *****'
        # ***** Data Storage *****
        EEG_EPOCHS = []
        device = audiere.open_device()
        start = device.open_file('Sounds\\iaf_start.wav')
        stop = device.open_file('Sounds\\iaf_stop.wav')
        # ***** Recording EC&EO Condition *****
        for msg in [
                'Start recording the eyes close condition?',
                'Start recording the eyes open condition?'
        ]:
            DialogBox(msg, 'Individual Alpha Frequency')
            start.play()
            # (a) Local Variables Declaration
            TCP_ch, TCP_array, time_record, Fs = 72, 432, 180, 128
            current_samples, data, TCP_samples, daq_samples = 0, '', TCP_array // TCP_ch // 3, time_record * Fs
            eeg_epoch = np.zeros((TCP_ch, 1))
            # (b) BioSemi TCP Communication Start
            Client_T11 = socket(AF_INET, SOCK_STREAM)
            Client_T11.connect(('localhost', 778))
            # ...........Data Acquisition per epoch.............
            while current_samples < daq_samples:
                # (c) Default Variables into the Loop
                tempo = np.zeros((TCP_ch, TCP_samples))
                # (d) Sample Collection per TCP_array
                # --- loop to ensure a complete TCP_array collection
                while len(data) < TCP_array:
                    data += Client_T11.recv(TCP_array)
                # --- saving data till to get the require length (i.e., daq_samples)
                BYTES = data[:TCP_array]
                data = data[TCP_array:]
                # (e) Conversion from 24bits to Voltage
                BYTES = bits_float(BYTES)
                # --- Converting in microvolts
                BYTES = BYTES * 31.25e-3
                # (f) Data Re-Organization into Channels
                new_ch_idx = 0
                for ch in range(TCP_ch):
                    tempo[new_ch_idx, :], new_ch_idx = BYTES[
                        ch::TCP_ch], new_ch_idx + 1
                eeg_epoch = np.append(eeg_epoch, tempo, axis=1)
                current_samples += TCP_samples
            # (g) delete the first column of eeg_epoch created by default
            eeg_epoch = np.delete(eeg_epoch, 0, axis=1)
            print '==> Raw_Data: ', np.shape(eeg_epoch)
            EEG_EPOCHS.append(eeg_epoch)
            # (h) BioSemi TCP client closure
            Client_T11.close()
            stop.play()
        # (i) Data storage
        cPickle.dump(EEG_EPOCHS, open(root + 'IAF.p', 'wb'))
        # ==================================================
        # Signal Conditioning
        # ==================================================
        EEG_DSP = []
        for eeg_epoch in EEG_EPOCHS:
            # (a) Variable Declaration
            sigcon_samples = 128 * 180
            eeg_tempo = np.zeros((64, daq_samples))
            eeg_dsp = np.zeros((64, sigcon_samples))
            ref, BW, bandrej, DC = ['LargeLaplacian',
                                    []], ['on', [0, 0],
                                          [0, 0]], ['off',
                                                    (0, 0)], ['off', (0, 0)]
            ## -- filter design
            BW[1][0], BW[1][1] = spectral_filter(128, 7, 0, 4, 'highpass')
            BW[2][0], BW[2][1] = spectral_filter(128, 0, 14, 7, 'lowpass')
            # (b) Spectral Filtering
            for ch in range(64):
                eeg_tempo[ch, :] = SiGCoN(ch, eeg_epoch, layout, ref, BW,
                                          bandrej, DC, 1, 'spectral', [])
            # (c) Spatial Filtering + Downsampling
            for ch in range(64):
                eeg_dsp[ch, :] = SiGCoN(ch, eeg_tempo, layout, ref, BW,
                                        bandrej, DC, 1, 'spatial', [])
            # (d) Data storage
            EEG_DSP.append(eeg_dsp)
            print '==> Conditioned_Data: ', np.shape(eeg_dsp)
        # ==================================================
        # Power Spectral Density
        # ==================================================
        FIG = plt.figure(num=1,
                         figsize=(15, 10),
                         facecolor='#A0AEC1',
                         edgecolor='white')
        FIG.subplots_adjust(left=0.075,
                            right=0.95,
                            bottom=0.05,
                            top=0.925,
                            hspace=0.3)
        # (a) Eyes Closed (EC) Condition
        ## -- axis 1 configuration
        ax1 = FIG.add_subplot(3, 1, 1)
        ax1.tick_params(labelsize=9)
        ax1.grid(True)
        ax1.set_xlabel('F r e q u e n c y [Hz]',
                       fontsize=10,
                       fontname='Byington')
        ax1.set_ylabel('PowerSpectralDensity[db/Hz]',
                       fontsize=10,
                       fontname='Byington')
        ax1.set_title('EYES CLOSE CONDITION',
                      color='black',
                      fontsize=11,
                      fontname='Byington')
        ## -- plot of EC condition
        signal = np.mean(EEG_DSP[0], axis=0)
        print '==> ECspectrum_Data: ', np.shape(signal)
        Pxx1, freqs1 = ax1.psd(signal,
                               NFFT=512,
                               Fs=128,
                               noverlap=50,
                               color=blue,
                               linewidth=1.75)
        ## -- maximum values
        max_values = np.max(Pxx1)
        max_idxs = np.where(Pxx1 == max_values)[0]
        for idxs in max_idxs:
            ax1.text(freqs1[idxs],
                     10 * np.log10(Pxx1[idxs]),
                     str(freqs1[idxs]),
                     color=blue,
                     fontsize=11,
                     fontname='Byington')
        # (b) Eyes Open (EO) Condition
        ## -- axis 2 configuration
        ax2 = FIG.add_subplot(3, 1, 2)
        ax2.tick_params(labelsize=9)
        ax2.grid(True)
        ax2.set_xlabel('F r e q u e n c y [Hz]',
                       fontsize=10,
                       fontname='Byington')
        ax2.set_ylabel('PowerSpectralDensity[db/Hz]',
                       fontsize=10,
                       fontname='Byington')
        ax2.set_title('EYES OPEN CONDITION',
                      color='black',
                      fontsize=11,
                      fontname='Byington')
        ## -- plot of EO condition
        signal = np.mean(EEG_DSP[-1], axis=0)
        print '==> EOspectrum_Data: ', np.shape(signal)
        Pxx2, freqs2 = ax2.psd(signal,
                               NFFT=512,
                               Fs=128,
                               noverlap=50,
                               color=orange,
                               linewidth=1.75)
        ## -- maximum values
        max_values = np.max(Pxx2)
        max_idxs = np.where(Pxx2 == max_values)[0]
        for idxs in max_idxs:
            ax2.text(freqs2[idxs],
                     10 * np.log10(Pxx2[idxs]),
                     str(freqs2[idxs]),
                     color=orange,
                     fontsize=11,
                     fontname='Byington')
        # (c) Comparison between EC and EO Conditions
        ## -- axis 3 configuration
        ax3 = FIG.add_subplot(3, 1, 3)
        ax3.tick_params(labelsize=9)
        ax3.grid(True)
        ax3.set_xlabel('F r e q u e n c y [Hz]',
                       fontsize=10,
                       fontname='Byington')
        ax3.set_ylabel('PowerSpectralDensity[db/Hz]',
                       fontsize=10,
                       fontname='Byington')
        ax3.set_title('DIFFERENCE BETWEEN EC & EO CONDITIONS',
                      color='black',
                      fontsize=11,
                      fontname='Byington')
        ## -- plot of ECvsEO condition
        signal = 10 * np.log10(Pxx1) - 10 * np.log10(Pxx2)
        print '==> ECvsEOspectrum_Data: ', np.shape(signal)
        ax3.plot(freqs1, signal, color='#AD0066', linewidth=1.75)
        ## -- maximum values
        max_values = np.max(signal)
        max_idxs = np.where(signal == max_values)[0]
        for idxs in max_idxs:
            ax3.text(freqs1[idxs],
                     signal[idxs],
                     str(freqs1[idxs]),
                     color='#AD0066',
                     fontsize=11,
                     fontname='Byington')
        ## -- display & storage
        url = root + 'IAF.png'
        plt.savefig(url, facecolor='#A0AEC1', edgecolor='white')
        plt.show()
        print '********************************************\n'
Пример #18
0
#        # -label update-
#        message.set_text(m)
#        # -image widget-
#        cartoon = Image(i)
#        table.attach(cartoon, 0, 1, 0, 1)        
#        # -wait for 4 or 1.5 seconds-
#        time.sleep(wait)  
#        # -play record-
#        if idx == 2:
#            track = '\\'.join(['Sounds\\Target-DrivenSys', target])
#            track = device.open_file(track)
#            # playing 3 times the target-
#            track.play()
#            while track.playing == 1: None
#            track.play()
#            while track.playing == 1: None
#            track.play()
#            while track.playing == 1: None   
#        # -image widget deleting-
#        table.remove(cartoon)    
    ## -- window exit

def destroy(widget): gtk.main_quit()
    
device = audiere.open_device()
DialogBox(device, None)
gtk.main()
time.sleep(3)


Пример #19
0
			self.__incSeq(1)
			if seq_idx < len(self.seqs):
				self.sound_stack.append(self.cur_seq)
				self.cur_seq = self.seqs[self_idx]
				self.__loop()
			else:
				self.__loop()
		elif c == 7:
			if len(self.sound_stack) == 0:
				self.toneOff()
				self.is_active = 0
			else:
				self.cur_seq = self.sound_stack.pop()
				self.__loop()
		elif c == 8:
			self.action = 8
			self.word_28f = self.cur_seq[:2]	
			self.__incSeq(2)
			self.byte_298 = self.cur_seq[0]
			self.__incSeq(1)
			self.byte_299 = self.cur_seq[0]
			self.__incSeq(1)
		else:
			print 'invalid sequence case (0x%x)!!' % c

sm = SoundMachine(sequences, audiere.open_device())
sm.launch(1, SEQUENCE_NO)

while True:
	if not sm.launch(2): break
Пример #20
0
	def __init__(self):
		self.sound_class = Sound_Audiere
		self.audiere = audiere.open_device()
Пример #21
0
 def run(self):
     'This sequence is performed according to the available\
     time to do it, because the main thread is used by gtk.main()'
     
     # ======================
     # |1| HCI Initialization
     # ======================
     ## -- default variables
     codes, distractors, source_id, distractor, repeats = ['None'], [], 0, None, 0
     ## -- default attributes
     self.submenu,self.HOME,self.targets,self.current_subutton,self.current_len,self.video_len,self.ready,self.repeat,self.PLAY = \
     False       ,True     ,[None]      ,'unknown_task'       ,'unknown_len'   ,0             ,False     ,0          ,False
     ## -- starting a device to play sounds by means of the 'audiere' library
     device = audiere.open_device()
     ## -- loading the cue-sounds
     beep    = device.open_file(beep_loc)
     correct = device.open_file(correct_loc)
     wrong   = device.open_file(wrong_loc)
     click   = device.open_file(click_loc)
     ## -- loading the distractor-sounds
     #for track in distractor_locs: distractors.append(device.open_file(track))
     ## -- intro report
     HCI_Presentation()
     ## -- waiting a client request (i.e., BCI System Connection)
     print 'Waiting for connection . . .'
     BCICliSock, addr = HCISerSock.accept()
     print '. . . connected from:', addr    
     # ==================================================================         
     # |2| Infinite loop to not lose the control thread (OnLine Paradigm)
     # ==================================================================
     while not self.quit:       
         # (a) +++++ Receiving an instruction from BCI client +++++
         instruction = BCICliSock.recv(BUFSIZ)
         if not instruction: break
         print 'The received instruction is:', instruction           
         # (b) +++++ Decoding the received instruction +++++
         TrigNum = instruction.split('*')[0]
         instruction = instruction.split('*')[-1]
         command = instruction.split('_')[0] 
         target  = instruction.split('_')[-1]  
         # (c) +++++ Reset of Variables +++++
         code, self.GUI_VE = codes[0], '' 
         # (d) +++++ Calling the appropiated method according to the TCP-message +++++
         # --> Start saving continuos eeg data (PauseOff) or
         #     Pause saving continous eeg data (PauseOn)
         if command.isdigit():
             time.sleep(1)
             ParallelPort(TrigNum, 0)
             time.sleep(1)
         # --> Type Of System: Tutorial_MI or Tutorial_CMD
         elif command == 'Tutorial':
             ## - variable declaration
             UT_status, sys_type, trigger = True, target, TrigNum 
             ## - configuration of the Tutorial_Command System                
             if target == 'CMD': 
                 self.submenu, self.HOME = False, True
                 gobject.idle_add(self.tutorial.UT_CMDSysConfig)   
             ## - trigger delivery
             ParallelPort(trigger, 0)
         # --> Type Of System: Training_GUI & Testing_GUI
         elif any([command=='Training',command=='Testing']):   
             ## - switch to first tab & interface~reset
             self.ready = False
             gobject.idle_add(self.GUI_ResetTabs, self.submenu)  
             while not self.ready: None
             ## - variable reset
             UT_status, sys_type, trigger, self.submenu, self.HOME = False, command, TrigNum, False, True 
             ## - trigger delivery
             ParallelPort(trigger, 0)            
         # --> Type Of System: Cue-Driven & Target-Driven
         elif any([command=='Cue-Driven',command=='Target-Driven']):
             ## - initialize the video~play
             Config_Video(command)   
             ## - switch to first tab & interface~reset
             self.ready = False
             gobject.idle_add(self.GUI_ResetTabs, self.submenu)
             while not self.ready: None
             ## - variable reset
             UT_status, sys_type, trigger, self.submenu, self.HOME = False, command, TrigNum, False, True 
             ## - load of the system-targets
             codes = HCI_codes[sys_type]                
             ## - trigger delivery
             ParallelPort(trigger, 0)
         # --> Warning Sign: Preparation for the MI Movement
         elif command == 'warning':
             ## - GUI modification            
             self.ready = False
             gobject.idle_add(self.Warning_Sign, UT_status, sys_type) 
             while not self.ready: None
             ## - trigger delivery
             ParallelPort(trigger, TrigNum)                               
         # --> Cue Onset: MI Movement Performance
         elif command == 'cue':
             ## - beep performance
             beep.play()
             ## - GUI modification
             self.ready = False
             gobject.idle_add(self.Cue_Onset, UT_status, target, sys_type)
             while not self.ready: None 
             ## - trigger delivery
             ParallelPort(trigger, TrigNum)                   
         # --> Blank: random inter-trial interval (only Tutorial_MI, Tutorial_CMD & Training_GUI)
         #    "Execution of the sent cue in order to do a demostration of the command"
         elif command == 'blank':
             ## - CASE 1: Tutorial_MI
             if sys_type == 'MI':
                 gobject.idle_add(self.tutorial.UT_BlankMI, target)
             ## - CASE 2: Tutorial_CMD and Training_GUI 
             else:
                 self.ready = False
                 if target == 'left':
                     click.play()
                     gobject.idle_add(self.Select_Command, UT_status, target, sys_type, code) 
                 elif target == 'right':
                     click.play()
                     gobject.idle_add(self.Navigate_Command, UT_status, target, sys_type, code)       
                 elif target == 'idle':
                     gobject.idle_add(self.No_Command, UT_status, sys_type, target, command)  
                 while not self.ready: None
             ## - trigger delivery
             ParallelPort(trigger, TrigNum) 
         # --> Left: user's command interpretation
         elif command == 'left': 
             self.ready = False            
             ## - CASE 1 & 2: Testing_GUI & Cue-Driven System
             if any([sys_type == 'Testing', sys_type == 'Cue-Driven']):
                 if command == target:
                     correct.play()
                     gobject.idle_add(self.Select_Command, UT_status, target, sys_type, code)
                 else:
                     wrong.play()
                     gobject.idle_add(self.No_Command, UT_status, sys_type, target, command) 
             ## - CASE 3: Target-Driven System
             elif sys_type == 'Target-Driven':
                 click.play()    
                 gobject.idle_add(self.Select_Command, UT_status, target, sys_type, code)  
             ## - trigger delivery
             while not self.ready: None
             ParallelPort(trigger, TrigNum)                                    
         # --> Right: user's command interpretation
         elif command == 'right': 
             self.ready = False
             ## - CASES 1 & 2: Testing GUI & Cue-Driven System
             if any([sys_type == 'Testing', sys_type == 'Cue-Driven']):
                 if command == target:
                     correct.play()
                     gobject.idle_add(self.Navigate_Command, UT_status, target, sys_type, code)
                 else:
                     wrong.play()
                     gobject.idle_add(self.No_Command, UT_status, sys_type, target, command)
             ## - CASE 3: Target-Driven System
             elif sys_type == 'Target-Driven':
                 click.play()  
                 gobject.idle_add(self.Navigate_Command, UT_status, target, sys_type, code)   
             ## - trigger delivery
             while not self.ready: None
             ParallelPort(trigger, TrigNum)         
         # --> Idle: user's command interpretation
         elif command == 'idle':
             self.ready = False
             ## - CASES 1 & 2: Testing GUI & Cue-Driven System
             if any([sys_type == 'Testing', sys_type == 'Cue-Driven']):
                 if command == target:
                     correct.play()
                 else:
                     wrong.play()                                             
             ## - GUI modification
             gobject.idle_add(self.No_Command, UT_status, sys_type, target, command)        
             ## - trigger delivery
             while not self.ready: None
             ParallelPort(trigger, TrigNum)             
         # --> Stopping HCI manipulation
         elif command == 'quit':
             ## - System Exit
             print 'The TCP connection has been closed.'                
             break                                                       
         # --> Audio-Target for Target-Driven Systems
         else:
             ## -- Current Table
             self.container = self.TabTables[self.general_menu.get_current_page()]
             ## -- Sequence of Instructions
             for idx in range(len(Instructions)):
                 # variable assignment
                 m, i, wait = Instructions[idx][0], Instructions[idx][1], Instructions[idx][2]
                 # GUI modification according to the current tab
                 self.ready = False
                 gobject.idle_add(self.HCI_InstON, m, i, idx, codes)
                 while not self.ready: None                      
                 # wait for 4 or 1.5 seconds
                 time.sleep(wait)  
                 # play record
                 if idx == 1:
                     track = '\\'.join(['Sounds\\Target-DrivenSys', target])
                     track = device.open_file(track)
                     # playing 3 times the target-
                     track.play()
                     while track.playing == 1: None
                     time.sleep(1)
                     track.play()
                     while track.playing == 1: None   
                 # GUI modification according to the current tab
                 self.ready = False
                 gobject.idle_add(self.HCI_InstOFF)
                 while not self.ready: None                                                      
         # (e) +++++ self.GUI_VE (HCIresponse) Assessment +++++
         # e.1 CASE 1: HCI-codes (next target if one option from the correct submenu is selected)
         if self.GUI_VE == code:
             # --> play feedback
             TCPmsg, codes = self.HCI_Feedback(sys_type, codes, device, source_id)
             # --> play distractor randomly
             #if sys_type == 'Target-Driven': 
             #    distractor, repeats, rand_time = distractors.pop(0), distractor_rpts.pop(0), random.randint(40000, 80000)
             #    source_id = gobject.timeout_add(rand_time, self.HCI_Distractors, repeats)
         # e.2 Any other case (there are no targets to pursue)                               
         else:
             # --> TCP message (ready + current_subutton): Cue-Driven System Proposes
             TCPmsg = '_'.join(['Task/MenuLength', str(self.current_subutton), str(self.current_len)])
         # (f) +++++ Play of the Distractor if it is available +++++
         #if all([self.PLAY, self.repeat > 0]): 
         #    distractor.play()
         #    self.PLAY = False            
         # (g) +++++ Reply to BCI-System +++++
         BCICliSock.send(TCPmsg)   
         print 'Satisfactory reply to the TCP client: ', TCPmsg
     # ================================
     # |3| Closure of TCP-communication
     # ================================ 
     BCICliSock.close()
     HCISerSock.close()
Пример #22
0
 def init(self):        
     super(TactileOddball,self).init()
     self.au = audiere.open_device()        
Пример #23
0
 def __init__(self):
     threading.Thread.__init__(self)
     self.device = audiere.open_device()
     self.stream = self.device.open_file(CALL_FILE)
Пример #24
0
def init_tanooki():
    global device
    global stream
    device = audiere.open_device()
    stream = None
Пример #25
0
#Author: 
#Steven Richards <sbrichards@{mit.edu, gnu.org}>
#'FASCII' Based AES256 Point-to-Point Light Encoding
 
import audiere
import base64
import binascii
import os
#from Crypto.Cipher import AES
from time import sleep
from math import sqrt
 
device = audiere.open_device()#Open and assign the audio device



def playing(fasciidata):
        loop = 2
        for char in fasciidata:
                if loop % 2 == 0:
                        tone = device.create_tone((2000 + (ord(char) * 100) + 100))
                        tone.play()
                        sleep(0.03)
                        tone.stop()
                        loop += 1
                        print char
                        print ((2000 + (ord(char) * 100) + 100))
                        print ((2000 + (ord(char) * 100)))
                else:
                        tone = device.create_tone((2000 + (ord(char) * 100) - 100))
                        tone.play()
Пример #26
0
import sys
sys.path.append("c:\\users\\bill\\my documents\\syscompy")
import smr_io
import audiere
import numpy as np
filepath = "c:\\users\\bill\\my documents\\syscompy\\2004ParkfieldEquake\\BMR06001.ASC"
x, y, z = smr_io.get_smr_data(filepath)

xy = np.column_stack((x,y))
xz = np.column_stack((x,z))
zy = np.column_stack((z,y))

d = audiere.open_device()
wx = d.open_array(x, 200)
wy = d.open_array(y, 200)
wz = d.open_array(z, 200)

wxy = d.open_array(xy, 200)
wxz = d.open_array(xz, 200)
wzy = d.open_array(zy, 200)

wx.pitchshift = 3
wy.pitchshift = 3
wz.pitchshift = 3
wx.play()
Пример #27
0
def playaudio(audio_array):
    ds = audiere.open_device()
    os = ds.open_array(audio_array, 44100)
    os.play()