Exemple #1
0
class Audio(Stimulus):
    """A simple audio stimulus."""

    def __init__(self, window, sound, text=None, *args, **kwargs):
        """Constructor for the Audio stimulus.

        Arguments:
        sound - A number (pitch in Hz), string for a note,
                or string for a filename.
                For more info, see:
                http://www.psychopy.org/api/sound.html
        text - Text to display on screen (Optional).

        Additional args and kwargs are passed to the 
        sound.Sound constructor.
        """
        super(Audio, self).__init__(window)
        self.sound = Sound(sound, *args, **kwargs)
        self.text = text

    def show(self):
        self.display_text(self.text)
        self.play_sound()
        return super(Audio, self).show()

    def play_sound(self):
        self.sound.play()
        core.wait(self.sound.getDuration())
        return None
Exemple #2
0
 def __init__(self, TR=1.0, volumes=10, sync='5', skip=0, sound=False):
     """Class for a character-emitting metronome thread (emulate MR sync pulse).
         
         Aim: Allow testing of temporal robustness of fMRI scripts by emulating 
         a hardware sync pulse. Adds an arbitrary 'sync' character to the key 
         buffer, with sub-millisecond precision (less precise if CPU is maxed). 
         Recommend: TR=1.000 or higher and less than 100% CPU. Shorter TR
         --> higher CPU load. 
         
         Parameters:
             TR:      seconds per whole-brain volume
             volumes: number of 3D volumes to obtain in a given scanning run
             sync:    character used as flag for sync timing, default='5'
             skip:    how many frames to silently omit initially during T1 
                      stabilization, no sync pulse. Not needed to test script
                      timing, but will give more accurate feel to start of run
             sound:   play a tone, slightly shorter duration than TR
     """
     if TR < 0.1: 
         raise ValueError, 'SyncGenerator:  whole-brain TR < 0.1 not supported'
     self.TR = TR
     self.hogCPU = 0.035
     self.timesleep = self.TR
     self.volumes = int(volumes)
     self.sync = sync
     self.skip = skip
     self.playSound = sound
     if self.playSound:
         self.sound = Sound(secs=self.TR-.08, octave=6, autoLog=False)
         self.sound.setVolume(0.15)
     
     self.clock = core.Clock()
     self.stopflag = False
     threading.Thread.__init__(self, None, 'SyncGenerator', None)
     self.running = False
    def __init__(self,
                 TR=1.0,
                 TA=1.0,
                 volumes=10,
                 sync='5',
                 skip=0,
                 sound=False,
                 **kwargs):
        """Class for a character-emitting metronome thread
        (emulate MR sync pulse).

        Aim: Allow testing of temporal robustness of fMRI scripts by emulating
        a hardware sync pulse. Adds an arbitrary 'sync' character to the key
        buffer, with sub-millisecond precision (less precise if CPU is maxed).
        Recommend: TR=1.000 or higher and less than 100% CPU. Shorter TR
        --> higher CPU load.

        Parameters:
            TR:      seconds between volume acquisitions
            TA:      seconds to acquire one volume
            volumes: number of 3D volumes to obtain in a given scanning run
            sync:    character used as flag for sync timing, default='5'
            skip:    how many frames to silently omit initially during T1
                     stabilization, no sync pulse. Not needed to test script
                     timing, but will give more accurate feel to start of run.
                     aka "discdacqs".
            sound:   simulate scanner noise
        """
        if TR < 0.1:
            msg = 'SyncGenerator:  whole-brain TR < 0.1 not supported'
            raise ValueError(msg)
        self.TR = TR
        self.TA = TA
        self.hogCPU = 0.035
        self.timesleep = self.TR
        self.volumes = int(volumes)
        self.sync = sync
        self.skip = skip
        self.playSound = sound
        if self.playSound:  # pragma: no cover
            self.sound1 = Sound(800, secs=self.TA, volume=0.15, autoLog=False)
            self.sound2 = Sound(813, secs=self.TA, volume=0.15, autoLog=False)

        self.clock = core.Clock()
        self.stopflag = False
        threading.Thread.__init__(self, None, 'SyncGenerator', None)
        self.running = False
Exemple #4
0
def play_file(path, msg, trigger_port, trigger_twice=False):
    all_events.append({
        'what': "audio played",
        'when': core.getTime() - experiment_start,
        'content': path,
        'message': msg,
        'response': None
    })
    msg = visual.TextStim(win, text=msg)
    msg.draw()
    win.flip()
    mySound = Sound(path)
    if (trigger_port):
        sendTrigger(trigger_port, duration=0.01)
    mySound.play()
    core.wait(mySound.getDuration())
    if (trigger_port and trigger_twice):
        sendTrigger(trigger_port, duration=0.01)
Exemple #5
0
    def __init__(self, TR=1.0, TA=1.0, volumes=10, sync='5', skip=0,
                 sound=False, **kwargs):
        """Class for a character-emitting metronome thread
        (emulate MR sync pulse).

        Aim: Allow testing of temporal robustness of fMRI scripts by emulating
        a hardware sync pulse. Adds an arbitrary 'sync' character to the key
        buffer, with sub-millisecond precision (less precise if CPU is maxed).
        Recommend: TR=1.000 or higher and less than 100% CPU. Shorter TR
        --> higher CPU load.

        Parameters:
            TR:      seconds between volume acquisitions
            TA:      seconds to acquire one volume
            volumes: number of 3D volumes to obtain in a given scanning run
            sync:    character used as flag for sync timing, default='5'
            skip:    how many frames to silently omit initially during T1
                     stabilization, no sync pulse. Not needed to test script
                     timing, but will give more accurate feel to start of run.
                     aka "discdacqs".
            sound:   simulate scanner noise
        """
        if TR < 0.1:
            msg = 'SyncGenerator:  whole-brain TR < 0.1 not supported'
            raise ValueError(msg)
        self.TR = TR
        self.TA = TA
        self.hogCPU = 0.035
        self.timesleep = self.TR
        self.volumes = int(volumes)
        self.sync = sync
        self.skip = skip
        self.playSound = sound
        if self.playSound:  # pragma: no cover
            self.sound1 = Sound(800, secs=self.TA, volume=0.15, autoLog=False)
            self.sound2 = Sound(813, secs=self.TA, volume=0.15, autoLog=False)

        self.clock = core.Clock()
        self.stopflag = False
        threading.Thread.__init__(self, None, 'SyncGenerator', None)
        self.running = False
Exemple #6
0
class SyncGenerator(threading.Thread):
    def __init__(self, TR=1.0, volumes=10, sync="5", skip=0, sound=False):
        """Class for a character-emitting metronome thread (emulate MR sync pulse).
            
            Aim: Allow testing of temporal robustness of fMRI scripts by emulating 
            a hardware sync pulse. Adds an arbitrary 'sync' character to the key 
            buffer, with sub-millisecond precision (less precise if CPU is maxed). 
            Recommend: TR=1.000 or higher and less than 100% CPU. Shorter TR
            --> higher CPU load. 
            
            Parameters:
                TR:      seconds per whole-brain volume
                volumes: number of 3D volumes to obtain in a given scanning run
                sync:    character used as flag for sync timing, default='5'
                skip:    how many frames to silently omit initially during T1 
                         stabilization, no sync pulse. Not needed to test script
                         timing, but will give more accurate feel to start of run.
                         aka "discdacqs".
                sound:   play a tone, slightly shorter duration than TR
        """
        if TR < 0.1:
            raise ValueError, "SyncGenerator:  whole-brain TR < 0.1 not supported"
        self.TR = TR
        self.hogCPU = 0.035
        self.timesleep = self.TR
        self.volumes = int(volumes)
        self.sync = sync
        self.skip = skip
        self.playSound = sound
        if self.playSound:
            self.sound = Sound(secs=self.TR - 0.08, octave=6, autoLog=False)
            self.sound.setVolume(0.15)

        self.clock = core.Clock()
        self.stopflag = False
        threading.Thread.__init__(self, None, "SyncGenerator", None)
        self.running = False

    def run(self):
        self.running = True
        if self.skip:
            if self.playSound:
                self.sound.play()
            core.wait(self.TR * self.skip)  # emulate T1 stabilization without data collection
        self.clock.reset()
        for vol in range(1, self.volumes + 1):
            if self.playSound:
                self.sound.play()
            if self.stopflag:
                break
            # "emit" a sync pulse by placing a key in the buffer:
            event._onPygletKey(symbol=self.sync, modifiers=None, emulated=True)
            # wait for start of next volume, doing our own hogCPU for tighter sync:
            core.wait(self.timesleep - self.hogCPU, hogCPUperiod=0)
            while self.clock.getTime() < vol * self.TR:
                pass  # hogs the CPU for tighter sync
        self.running = False

    def stop(self):
        self.stopflag = True
Exemple #7
0
class AudioScene(object):
    def __init__(self, win, manager, soundFile, fixationCross):
        self.win = win
        self.manager = manager
        self.sound = Sound(soundFile)
        self.fixationCross = fixationCross
        self.max_frame = math.ceil(
            self.sound.getDuration() * constants.FRAME_RATE +
            constants.AUDIO_DELAY * constants.FRAME_RATE)
        self.delay_frames = math.ceil(constants.AUDIO_DELAY *
                                      constants.FRAME_RATE)
        self.current_frame = 0

    def update(self):
        self.current_frame += 1
        if self.current_frame == self.delay_frames:
            self.sound.play()
            self.manager.eyeTracker.sound_start()
        if self.current_frame >= self.max_frame:
            self.manager.set_response_scene()
        self.draw()

    def draw(self):
        self.fixationCross.draw()
Exemple #8
0
    def __init__(self, window, sound, text=None, *args, **kwargs):
        """Constructor for the Audio stimulus.

        Arguments:
        sound - A number (pitch in Hz), string for a note,
                or string for a filename.
                For more info, see:
                http://www.psychopy.org/api/sound.html
        text - Text to display on screen (Optional).

        Additional args and kwargs are passed to the 
        sound.Sound constructor.
        """
        super(Audio, self).__init__(window)
        self.sound = Sound(sound, *args, **kwargs)
        self.text = text
Exemple #9
0
def test_update(sound_mock):
    manager = Mock()
    sound_mock.return_value = Sound()
    sound_mock.return_value.duration = 1
    scene = AudioScene(Mock(), manager, Mock(), Mock())
    scene.draw = Mock()

    #Act
    scene.update()
    #Assert
    manager.set_response_scene.assert_not_called()
    #Act
    [
        scene.update() for _ in range(
            math.ceil(constants.FRAME_RATE +
                      constants.FRAME_RATE * constants.AUDIO_DELAY - 1))
    ]
    #Assert
    manager.set_response_scene.assert_called_once()
    assert scene.draw.call_count == math.ceil(constants.FRAME_RATE +
                                              constants.FRAME_RATE *
                                              constants.AUDIO_DELAY)
Exemple #10
0
def scene(sound_mock, fileMock):
    sound_mock.return_value = Sound()
    sound_mock.return_value.duration = 1
    return AudioScene(Mock(), Mock(), sound_mock, Mock())
Exemple #11
0
                           height=0.15,
                           antialias=True,
                           bold=False,
                           italic=False,
                           alignHoriz='center',
                           alignVert='center',
                           fontFiles=(),
                           wrapWidth=None,
                           flipHoriz=False,
                           flipVert=False,
                           languageStyle='LTR',
                           name=None,
                           autoLog=None)

#instantiating the sound stimulus to save time
stimulus = Sound(os.path.join(path_stimuli, stimulus_name + '.wav'))
stop_stim = Sound(stop_wav)
#initializing the index to get the first image in the array
stim_idx = 0

#creation of the duration variables
task_duration, baseline_duration = 20, 20

end_stimuli = 420  #20*11baseline + 20*10baseline
baselines_onset = np.arange(1, end_stimuli, 40)
baselines_offset = baselines_onset + 19

task_onset = np.arange(21, end_stimuli - 20, 40)
task_offset = task_onset + 19

###############################################################################
    def __init__(self, tracker, win):
        '''Initialize

        tracker: an EyeLink instance (connection)
        win: the PsychoPy window we use for calibration'''

        pylink.EyeLinkCustomDisplay.__init__(self)

        # background and target color
        self._backgroundColor = win.color
        self._foregroundColor = 'black'

        # window to use for calibration
        self._display = win
        # make the mouse cursor invisible
        self._display.mouseVisible = False

        # display width & height
        self._w, self._h = win.size

        # resolution fix for Mac retina displays
        if 'Darwin' in platform.system():
            sys_cmd = 'system_profiler SPDisplaysDataType | grep Retina'
            is_ret = os.system(sys_cmd)
            if is_ret == 0:
                self._w = int(self._w / 2.0)
                self._h = int(self._h / 2.0)

        # store camera image pixels in an array
        self._imagebuffer = array.array('I')

        # store the color palette for camera image drawing
        self._pal = None

        # initial size of the camera image
        self._size = (384, 320)

        # initial mouse configuration
        self._mouse = event.Mouse(False)
        self.last_mouse_state = -1

        # camera image title
        self._msgHeight = self._size[1] / 16.0
        self._title = visual.TextStim(self._display,
                                      '',
                                      wrapWidth=self._w,
                                      color=self._foregroundColor)

        # calibration target
        self._targetSize = self._w / 64.
        self._tar = visual.Circle(self._display,
                                  size=self._targetSize,
                                  lineColor=self._foregroundColor,
                                  lineWidth=self._targetSize / 2)

        # calibration sounds (beeps)
        self._target_beep = Sound('type.wav', stereo=True)
        self._error_beep = Sound('error.wav', stereo=True)
        self._done_beep = Sound('qbeep.wav', stereo=True)

        # a reference to the tracker connection
        self._tracker = tracker

        # for a clearer view we always enlarge the camera image
        self.imgResize = None
class EyeLinkCoreGraphicsPsychoPy(pylink.EyeLinkCustomDisplay):
    def __init__(self, tracker, win):
        '''Initialize

        tracker: an EyeLink instance (connection)
        win: the PsychoPy window we use for calibration'''

        pylink.EyeLinkCustomDisplay.__init__(self)

        # background and target color
        self._backgroundColor = win.color
        self._foregroundColor = 'black'

        # window to use for calibration
        self._display = win
        # make the mouse cursor invisible
        self._display.mouseVisible = False

        # display width & height
        self._w, self._h = win.size

        # resolution fix for Mac retina displays
        if 'Darwin' in platform.system():
            sys_cmd = 'system_profiler SPDisplaysDataType | grep Retina'
            is_ret = os.system(sys_cmd)
            if is_ret == 0:
                self._w = int(self._w / 2.0)
                self._h = int(self._h / 2.0)

        # store camera image pixels in an array
        self._imagebuffer = array.array('I')

        # store the color palette for camera image drawing
        self._pal = None

        # initial size of the camera image
        self._size = (384, 320)

        # initial mouse configuration
        self._mouse = event.Mouse(False)
        self.last_mouse_state = -1

        # camera image title
        self._msgHeight = self._size[1] / 16.0
        self._title = visual.TextStim(self._display,
                                      '',
                                      wrapWidth=self._w,
                                      color=self._foregroundColor)

        # calibration target
        self._targetSize = self._w / 64.
        self._tar = visual.Circle(self._display,
                                  size=self._targetSize,
                                  lineColor=self._foregroundColor,
                                  lineWidth=self._targetSize / 2)

        # calibration sounds (beeps)
        self._target_beep = Sound('type.wav', stereo=True)
        self._error_beep = Sound('error.wav', stereo=True)
        self._done_beep = Sound('qbeep.wav', stereo=True)

        # a reference to the tracker connection
        self._tracker = tracker

        # for a clearer view we always enlarge the camera image
        self.imgResize = None

    def setup_cal_display(self):
        '''Set up the calibration display '''

        self._display.clearBuffer()

    def clear_cal_display(self):
        '''Clear the calibration display'''

        self._display.color = self._backgroundColor
        self._display.flip()

    def exit_cal_display(self):
        '''Exit the calibration/validation routine'''

        self.clear_cal_display()

    def record_abort_hide(self):
        '''This function is called if aborted'''

        pass

    def erase_cal_target(self):
        '''Erase the target'''

        self.clear_cal_display()

    def draw_cal_target(self, x, y):
        '''Draw the target'''

        self.clear_cal_display()

        # target position
        xVis = (x - self._w / 2.0)
        yVis = (self._h / 2.0 - y)

        # draw the calibration target
        self._tar.pos = (xVis, yVis)
        self._tar.draw()
        self._display.flip()

    def play_beep(self, beepid):
        ''' Play a sound during calibration/drift-correction.'''

        if beepid in [pylink.CAL_TARG_BEEP, pylink.DC_TARG_BEEP]:
            self._target_beep.play()
        elif beepid in [pylink.CAL_ERR_BEEP, pylink.DC_ERR_BEEP]:
            self._error_beep.play()
        elif beepid in [pylink.CAL_GOOD_BEEP, pylink.DC_GOOD_BEEP]:
            self._done_beep.play()
        core.wait(0.4)

    def getColorFromIndex(self, colorindex):
        '''Retrieve the colors for camera image elements, e.g., crosshair'''

        if colorindex == pylink.CR_HAIR_COLOR:
            return (255, 255, 255)
        elif colorindex == pylink.PUPIL_HAIR_COLOR:
            return (255, 255, 255)
        elif colorindex == pylink.PUPIL_BOX_COLOR:
            return (0, 255, 0)
        elif colorindex == pylink.SEARCH_LIMIT_BOX_COLOR:
            return (255, 0, 0)
        elif colorindex == pylink.MOUSE_CURSOR_COLOR:
            return (255, 0, 0)
        else:
            return (128, 128, 128)

    def draw_line(self, x1, y1, x2, y2, colorindex):
        '''Draw a line '''

        color = self.getColorFromIndex(colorindex)

        # scale the coordinates
        w, h = self._img.im.size
        x1 = int(x1 / 192 * w)
        x2 = int(x2 / 192 * w)
        y1 = int(y1 / 160 * h)
        y2 = int(y2 / 160 * h)

        # draw the line
        if not any([x < 0 for x in [x1, x2, y1, y2]]):
            self._img.line([(x1, y1), (x2, y2)], color)

    def draw_lozenge(self, x, y, width, height, colorindex):
        ''' draw a lozenge to show the defined search limits '''

        color = self.getColorFromIndex(colorindex)

        # scale the coordinates
        w, h = self._img.im.size
        x = int(x / 192 * w)
        y = int(y / 160 * h)
        width = int(width / 192 * w)
        height = int(height / 160 * h)

        # draw the lozenge
        if width > height:
            rad = int(height / 2.)
            if rad == 0:
                return
            else:
                self._img.line([(x + rad, y), (x + width - rad, y)], color)
                self._img.line([(x + rad, y + height),
                                (x + width - rad, y + height)], color)
                self._img.arc([x, y, x + rad * 2, y + rad * 2], 90, 270, color)
                self._img.arc([x + width - rad * 2, y, x + width, y + height],
                              270, 90, color)
        else:
            rad = int(width / 2.)
            if rad == 0:
                return
            else:
                self._img.line([(x, y + rad), (x, y + height - rad)], color)
                self._img.line([(x + width, y + rad),
                                (x + width, y + height - rad)], color)
                self._img.arc([x, y, x + rad * 2, y + rad * 2], 180, 360,
                              color)
                self._img.arc(
                    [x, y + height - rad * 2, x + rad * 2, y + height], 0, 180,
                    color)

    def get_mouse_state(self):
        '''Get the current mouse position and status'''

        w, h = self._display.size
        X, Y = self._mouse.getPos()

        # scale the mouse position so the cursor stay on the camera image
        mX = (X + w / 2.0) / w * self._size[0] / 2.0
        mY = (h / 2.0 - Y) / h * self._size[1] / 2.0

        state = self._mouse.getPressed()[0]

        return ((mX, mY), state)

    def get_input_key(self):
        '''This function is repeatedly pooled to check
        keyboard events'''

        ky = []
        for keycode, modifier in event.getKeys(modifiers=True):
            k = pylink.JUNK_KEY
            if keycode == 'f1': k = pylink.F1_KEY
            elif keycode == 'f2': k = pylink.F2_KEY
            elif keycode == 'f3': k = pylink.F3_KEY
            elif keycode == 'f4': k = pylink.F4_KEY
            elif keycode == 'f5': k = pylink.F5_KEY
            elif keycode == 'f6': k = pylink.F6_KEY
            elif keycode == 'f7': k = pylink.F7_KEY
            elif keycode == 'f8': k = pylink.F8_KEY
            elif keycode == 'f9': k = pylink.F9_KEY
            elif keycode == 'f10': k = pylink.F10_KEY
            elif keycode == 'pageup': k = pylink.PAGE_UP
            elif keycode == 'pagedown': k = pylink.PAGE_DOWN
            elif keycode == 'up': k = pylink.CURS_UP
            elif keycode == 'down': k = pylink.CURS_DOWN
            elif keycode == 'left': k = pylink.CURS_LEFT
            elif keycode == 'right': k = pylink.CURS_RIGHT
            elif keycode == 'backspace': k = ord('\b')
            elif keycode == 'return': k = pylink.ENTER_KEY
            elif keycode == 'space': k = ord(' ')
            elif keycode == 'escape': k = 27
            elif keycode == 'tab': k = ord('\t')
            elif keycode in string.ascii_letters:
                k = ord(keycode)
            elif k == pylink.JUNK_KEY:
                k = 0

            # plus & minus signs for CR adjustment
            if keycode in ['num_add', 'equal']:
                k = ord('+')
            if keycode in ['num_subtract', 'minus']:
                k = ord('-')

            # handles key modifier
            if modifier['alt'] is True: mod = 256
            elif modifier['ctrl'] is True: mod = 64
            elif modifier['shift'] is True: mod = 1
            else:
                mod = 0

            ky.append(pylink.KeyInput(k, mod))

        return ky

    def exit_image_display(self):
        '''Clear the camera image'''

        self.clear_cal_display()
        self._display.flip()

    def alert_printf(self, msg):
        '''Print error messages.'''

        print("Error: " + msg)

    def setup_image_display(self, width, height):
        ''' set up the camera image

        return 1 to show high-resolution camera images'''

        self.last_mouse_state = -1
        self._size = (width, height)

        return 1

    def image_title(self, text):
        '''Draw title text below the camera image'''

        self._title.text = text

    def draw_image_line(self, width, line, totlines, buff):
        '''Display image pixel by pixel, line by line'''

        for i in range(width):
            try:
                self._imagebuffer.append(self._pal[buff[i]])
            except:
                pass

        if line == totlines:
            bufferv = self._imagebuffer.tostring()
            img = Image.frombytes("RGBX", (width, totlines), bufferv)
            self._img = ImageDraw.Draw(img)
            # draw the cross hairs
            self.draw_cross_hair()
            # scale the camera image
            self.imgResize = img.resize((width * 2, totlines * 2))
            cam_img = visual.ImageStim(self._display,
                                       image=self.imgResize,
                                       units='pix')
            cam_img.draw()
            # draw the camera image title
            self._title.pos = (0, -totlines - self._msgHeight)
            self._title.draw()
            self._display.flip()

            # clear the camera image buffer
            self._imagebuffer = array.array('I')

    def set_image_palette(self, r, g, b):
        '''Given a set of RGB colors, create a list of 24bit numbers
        representing the color palette.
        For instance, RGB of (1,64,127) would be saved as 82047,
        or 00000001 01000000 011111111'''

        self._imagebuffer = array.array('I')

        sz = len(r)
        i = 0
        self._pal = []
        while i < sz:
            rf = int(b[i])
            gf = int(g[i])
            bf = int(r[i])
            self._pal.append((rf << 16) | (gf << 8) | (bf))
            i = i + 1
Exemple #14
0
class LocalGame:

    action = None
    sound = None
    slidenum = None
    slideons = None

    def __init__(self):
        self.immerseT = []
        self.countT = []
        self.imgBank_instruct = []
        self.imgBank_proc = []
        self.slidenum = 0
        self.slideons = None
        self.localGraphics = GameGraphics()
        self.state = Interact()
        self.action = None
        self.sound = Sound(resourceRoot + soundfile)
        self.saveLoc = DATA_FOLDER + "PREEMPT2_%s/" + SAVE_NAME
        self.start()

    def start(self):
        self.generateTimeBank()
        self.generateImgBank()

    def generateTimeBank(self):
        times = timeorder[cycle - 1]
        for x in times:
            self.immerseT.append(TB[x][0])
            self.countT.append(TB[x][1])
        self.countT = self.countT[:-1]

    def generateImgBank(self):
        self.imgBank_proc = ["Slide11.jpg", "Slide12.jpg"]
        for x in range(1, 11):
            self.imgBank_instruct.append("Slide" + str(x) + ".jpg")
        countorder[cycle - 1].extend((23, 24))
        for x in countorder[cycle - 1]:
            self.imgBank_proc.append("Slide" + str(x) + ".jpg")

    def runInstructions(self):
        for x in self.imgBank_instruct:
            self.slidenum += 1
            self.localGraphics.baseScreen(x)
            self.slideons = str(datetime.now())
            self.action = self.state.actionCont()
            self.state.saveDataInst()
        self.slidenum = 0
        return

    def runFInstructions(self):
        self.localGraphics.baseScreen("Slide25.jpg")
        self.action = self.state.actionCont()

    def runMast(self):
        retcount = 0
        self.localGraphics.baseScreen(self.imgBank_proc[0])
        self.action = self.state.sleeper(3)
        for x in range(0, 5):
            self.slidenum += 1
            self.localGraphics.baseScreen(self.imgBank_proc[1])
            self.sound.play()
            self.state.saveDataMast(self.immerseT[x], 'Immersion')
            self.action = self.state.sleeper(self.immerseT[x])
            if x > 0:
                retcount = 1
            if x < 4:
                self.slidenum += 1
                self.localGraphics.baseScreen(self.imgBank_proc[2 + retcount])
                self.sound.play()
                self.state.saveDataMast(self.countT[x], 'Subtraction')
                self.action = self.state.sleeper(self.countT[x])
        self.sound.play()
        self.localGraphics.baseScreen(self.imgBank_proc[4])
        self.action = self.state.sleeper(90)
        self.localGraphics.baseScreen(self.imgBank_proc[5])
        self.action = self.state.actionCont()
        return

    def fixation(self, time):
        self.localGraphics.fixationScreen()
        self.action = self.state.sleeper(time)
Exemple #15
0
 def setup_sounds(self):
     self.cashsnd = Sound('resources/cash.wav')
     self.firesnd = Sound('resources/bbhit.wav')
     self.buzzsnd = Sound('resources/buzz.wav')
Exemple #16
0
from tools import sound_freq_sweep, compound_sound
import numpy as np
from psychopy.sound import Sound
from psychopy import visual

p = dict(
    # Display:
    monitor='CRT_NEC_FE992',
    full_screen=True,
    screen_number=1,
    # Sounds:
    correct_sound=Sound(sound_freq_sweep(2000, 2000, .1)),
    incorrect_sound=Sound(sound_freq_sweep(8000, 200, .1)),
    # General:
    n_trials=150,
    fixation_size=0.1,
    rgb=np.array([1., 1., 1.]),
    # Element array:
    sf=4,
    elems_per_row=30,
    elem_size=2.5,
    elem_spacing=1,
    jitter=0.08,
    res=128,
    # Cue:
    cue_size=[2, 2],
    line_width=5,
    # Timing:
    cue_dur=0.2,
    cue_to_ea=0.6,
    #texture_dur =  0.05,
###############################################################################
# .. code-block:: python
#
#   import serial
#   ser = serial.Serial('COM4')  # Change this value according to your setup

#%%
# Create an Oximeter instance, initialize recording and record for 10 seconds

oxi = Oximeter(serial=ser, sfreq=75, add_channels=4).setup()

#%%
# Create an Oxymeter instance, initialize recording and record for 10 seconds

beat = Sound('C', secs=0.1)
diastole1 = Sound('E', secs=0.1)
diastole2 = Sound('G', secs=0.1)
diastole3 = Sound('Bfl', secs=0.1)

systoleTime1, systoleTime2, systoleTime3 = None, None, None
tstart = time.time()
while time.time() - tstart < 30:

    # Check if there are new data to read
    while oxi.serial.inWaiting() >= 5:

        # Convert bytes into list of int
        paquet = list(oxi.serial.read(5))

        if oxi.check(paquet):  # Data consistency
Exemple #18
0
    def __init__(self, subject_initials, index_number, tr, start_block,
                 config):
        super(StopSignalSession, self).__init__(
            subject_initials,
            index_number,
            tr=tr,
            simulate_mri_trigger=False,
            # NB: DO NOT use this MRI simulation option, but rather another!
            mri_trigger_key=config.get('mri', 'mri_trigger_key'))

        self.config = config
        self.start_block = start_block  # allows for starting at a later block than 1
        self.warmup_trs = config.get('mri', 'warmup_trs')

        if tr == 2:
            self.trial_duration = 8 - .5
        elif tr == 3:
            self.trial_duration = 9 - 0.5
        if self.subject_initials == 'pilot':
            self.trial_duration = [8.5, 7.5, 8.5, 7.5]

        if config.get('audio', 'engine') == 'psychopy':
            # BEFORE moving on, ensure that the correct audio driver is selected
            from psychopy import prefs
            prefs.general['audioLib'] = config.get('audio', 'backend')
            from psychopy.sound import Sound

            self.bleeper = Sound(secs=0.1,
                                 octave=5,
                                 loops=0,
                                 sampleRate=44100,
                                 name='')
            # self.bleeper.play()

        elif config.get('audio', 'engine') == 'TK':
            self.setup_sound_system()
            self.read_sound_file('sounds/0.wav', '0')

            # to test sound:
            # self.play_sound(sound_index=0)

        self.response_button_signs = [
            config.get('input', 'response_button_left'),
            config.get('input', 'response_button_right')
        ]

        screen = self.create_screen(
            engine='psychopy',
            size=config.get('screen', 'size'),
            full_screen=config.get('screen', 'full_screen'),
            background_color=config.get('screen', 'background_color'),
            gamma_scale=config.get('screen', 'gamma_scale'),
            physical_screen_distance=config.get('screen',
                                                'physical_screen_distance'),
            physical_screen_size=config.get('screen', 'physical_screen_size'),
            max_lums=config.get('screen', 'max_lums'),
            wait_blanking=config.get('screen', 'wait_blanking'),
            screen_nr=config.get('screen', 'screen_nr'),
            mouse_visible=config.get('screen', 'mouse_visible'))

        # Try this
        # TODO: think about really including this?
        self.screen.recordFrameIntervals = True

        self.phase_durations = np.array([
            -0.0001,  # wait for scan pulse
            -5,
            1,
            -5
        ])  # the strings will be filled before every trial

        self.load_design()
        self.prepare_objects()
        self.prepare_staircase()
Exemple #19
0
class StopSignalSession(MRISession):
    def __init__(self, subject_initials, index_number, tr, start_block,
                 config):
        super(StopSignalSession, self).__init__(
            subject_initials,
            index_number,
            tr=tr,
            simulate_mri_trigger=False,
            # NB: DO NOT use this MRI simulation option, but rather another!
            mri_trigger_key=config.get('mri', 'mri_trigger_key'))

        self.config = config
        self.start_block = start_block  # allows for starting at a later block than 1
        self.warmup_trs = config.get('mri', 'warmup_trs')

        if tr == 2:
            self.trial_duration = 8 - .5
        elif tr == 3:
            self.trial_duration = 9 - 0.5
        if self.subject_initials == 'pilot':
            self.trial_duration = [8.5, 7.5, 8.5, 7.5]

        if config.get('audio', 'engine') == 'psychopy':
            # BEFORE moving on, ensure that the correct audio driver is selected
            from psychopy import prefs
            prefs.general['audioLib'] = config.get('audio', 'backend')
            from psychopy.sound import Sound

            self.bleeper = Sound(secs=0.1,
                                 octave=5,
                                 loops=0,
                                 sampleRate=44100,
                                 name='')
            # self.bleeper.play()

        elif config.get('audio', 'engine') == 'TK':
            self.setup_sound_system()
            self.read_sound_file('sounds/0.wav', '0')

            # to test sound:
            # self.play_sound(sound_index=0)

        self.response_button_signs = [
            config.get('input', 'response_button_left'),
            config.get('input', 'response_button_right')
        ]

        screen = self.create_screen(
            engine='psychopy',
            size=config.get('screen', 'size'),
            full_screen=config.get('screen', 'full_screen'),
            background_color=config.get('screen', 'background_color'),
            gamma_scale=config.get('screen', 'gamma_scale'),
            physical_screen_distance=config.get('screen',
                                                'physical_screen_distance'),
            physical_screen_size=config.get('screen', 'physical_screen_size'),
            max_lums=config.get('screen', 'max_lums'),
            wait_blanking=config.get('screen', 'wait_blanking'),
            screen_nr=config.get('screen', 'screen_nr'),
            mouse_visible=config.get('screen', 'mouse_visible'))

        # Try this
        # TODO: think about really including this?
        self.screen.recordFrameIntervals = True

        self.phase_durations = np.array([
            -0.0001,  # wait for scan pulse
            -5,
            1,
            -5
        ])  # the strings will be filled before every trial

        self.load_design()
        self.prepare_objects()
        self.prepare_staircase()

    # creating a mixture class would be a lot nicer but I can't be bothered so I'll cheat and include everything
    # here
    def setup_sound_system(self):
        """initialize pyaudio backend, and create dictionary of sounds."""
        self.pyaudio = pyaudio.PyAudio()
        self.sound_files = \
        subprocess.Popen('ls ' + os.path.join('.', 'sounds', '*.wav'), shell=True,
                         stdout=subprocess.PIPE).communicate()[0].split('\n')[0:-1]
        self.sounds = {}
        for sf in self.sound_files:
            self.read_sound_file(file_name=sf)
            # print self.sounds

    def read_sound_file(self, file_name, sound_name=None):
        """Read sound file from file_name, and append to self.sounds with name as key"""
        if sound_name == None:
            sound_name = os.path.splitext(os.path.split(file_name)[-1])[0]

        rate, data = wavfile.read(file_name)
        # create stream data assuming 2 channels, i.e. stereo data, and use np.float32 data format
        stream_data = data.astype(np.int16)

        # check data formats - is this stereo sound? If so, we need to fix it.
        wf = wave.open(file_name, 'rb')
        # print sound_name
        # print wf.getframerate(), wf.getnframes(), wf.getsampwidth(), wf.getnchannels()
        if wf.getnchannels() == 2:
            stream_data = stream_data[::2]

        self.sounds.update({sound_name: stream_data})

    def play_bleep(self):

        if self.config.get('audio', 'engine') == 'TK':
            self.play_sound('0')
        else:
            self.bleeper.play()

    def load_design(self):

        fn = 'sub-' + str(self.subject_initials).zfill(3) + '_tr-' + str(
            self.index_number) + '_design'
        design = pd.read_csv(os.path.join('designs', fn + '.csv'),
                             sep='\t',
                             index_col=False)

        self.design = design
        self.design = self.design.apply(pd.to_numeric)  # cast all to numeric


#        self.design.stop_trial = pd.to_
#        print(self.design)

    def prepare_staircase(self):
        # TODO: load from previous run?

        # check for old file
        now = datetime.datetime.now()
        opfn = now.strftime("%Y-%m-%d")
        expected_filename = str(self.subject_initials) + '_' + str(
            self.index_number) + '_' + opfn
        fns = glob.glob('./data/' + expected_filename + '_*_staircases.pkl')

        if self.start_block > 1 and len(fns) == 1:
            # if previous run was created
            with open(fns[0], 'r') as f:
                self.stairs = pkl.load(f)
        else:
            # Make dict
            info = {
                'startPoints': [.100, .200]
            }  # start points for the four staircases

            # create staircases
            self.stairs = []
            for thisStart in info['startPoints']:
                # we need a COPY of the info for each staircase
                # (or the changes here will be made to all the other staircases)
                thisInfo = copy.copy(info)

                # now add any specific info for this staircase
                thisInfo[
                    'thisStart'] = thisStart  # we might want to keep track of this
                thisStair = data.StairHandler(startVal=thisStart,
                                              extraInfo=thisInfo,
                                              stepType='lin',
                                              minVal=0,
                                              nTrials=1000,
                                              maxVal=0.900,
                                              stepSizes=[0.050])
                self.stairs.append(thisStair)

            # Save staircases
            with open(self.output_file + '_staircases.pkl', 'w') as f:
                pkl.dump(self.stairs, f)

        self.design.staircase_ID = -1
        for block in np.unique(self.design.block):
            if block < self.start_block:
                continue

            # how many stop trials this block?
            n_stop_trials = self.design.loc[self.design.block ==
                                            block].stop_trial.sum()
            staircase_idx = np.tile(np.arange(len(self.stairs)),
                                    reps=1000)[:n_stop_trials]
            np.random.shuffle(staircase_idx)

            # append to design
            self.design.loc[(self.design.stop_trial == 1) & (self.design.block == block), 'staircase_id'] = \
                staircase_idx

    def prepare_objects(self):
        config = self.config

        self.left_stim = StopStimulus(screen=self.screen,
                                      direction=0,
                                      arrow_size_horizontal_degrees=config.get(
                                          'stimulus', 'arrow_size'))
        self.right_stim = StopStimulus(
            screen=self.screen,
            direction=1,
            arrow_size_horizontal_degrees=config.get('stimulus', 'arrow_size'))
        self.fixation_circle = FixationCircle(
            screen=self.screen,
            circle_radius_degrees=config.get('stimulus',
                                             'circle_radius_degrees'),
            line_width=config.get('stimulus', 'line_width'),
            line_color=config.get('stimulus', 'line_color'))

        self.scanner_wait_screen = visual.TextStim(
            win=self.screen,
            text='Waiting for scanner...',
            name='scanner_wait_screen',
            units='pix',
            font='Helvetica Neue',
            pos=(0, 0),
            italic=True,
            height=30,
            alignHoriz='center')
        if self.subject_initials == 'DEBUG':
            self.stop_timing_circle = visual.Circle(win=self.screen,
                                                    radius=3,
                                                    edges=50,
                                                    lineWidth=1.5,
                                                    fillColor='red',
                                                    lineColor='red',
                                                    units='deg',
                                                    lineColorSpace='rgb',
                                                    fillColorSpace='rgb')

    def save_data(self, trial_handler=None, block_n='all'):

        output_fn_dat = self.output_file + '_block-' + str(block_n)
        output_fn_frames = self.output_file + '_block-' + str(block_n)

        if trial_handler is not None:
            trial_handler.saveAsPickle(output_fn_dat)
            trial_handler.saveAsWideText(output_fn_dat + '.csv', )

        if self.screen.recordFrameIntervals:
            # Save frame intervals to file
            self.screen.saveFrameIntervals(fileName=output_fn_frames +
                                           '_frameintervals.log',
                                           clear=False)

            # import matplotlib.pyplot as plt
            # # Make a nice figure
            # intervals_ms = np.array(self.screen.frameIntervals) * 1000
            # m = np.mean(intervals_ms)
            # sd = np.std(intervals_ms)
            #
            # msg = "Mean=%.1fms, s.d.=%.2f, 99%%CI(frame)=%.2f-%.2f"
            # dist_string = msg % (m, sd, m - 2.58 * sd, m + 2.58 * sd)
            # n_total = len(intervals_ms)
            # n_dropped = sum(intervals_ms > (1.5 * m))
            # msg = "Dropped/Frames = %i/%i = %.3f%%"
            # dropped_string = msg % (n_dropped, n_total, 100 * n_dropped / float(n_total))
            #
            # # plot the frame intervals
            # plt.figure(figsize=[12, 8])
            # plt.subplot(1, 2, 1)
            # plt.plot(intervals_ms, '-')
            # plt.ylabel('t (ms)')
            # plt.xlabel('frame N')
            # plt.title(dropped_string)
            #
            # plt.subplot(1, 2, 2)
            # plt.hist(intervals_ms, 50, normed=0, histtype='stepfilled')
            # plt.xlabel('t (ms)')
            # plt.ylabel('n frames')
            # plt.title(dist_string)
            # plt.savefig(output_fn_frames + '_frameintervals.png')

    def close(self):
        """ Saves stuff and closes """

        self.save_data()
        super(StopSignalSession, self).close()

    def run(self):
        """ Runs this Stop Signal task"""

        test_sound = TestSoundTrial(ID=-1,
                                    parameters={},
                                    phase_durations=[1000],
                                    session=self,
                                    screen=self.screen,
                                    tracker=None)
        test_sound.run()
        self.block_start_time = 0

        # start emulator TODO REMOVE THIS STUFF!!
        # n_vols = [343+2, 513+2, 343+2, 513+2]
        # trs = [3, 2, 3, 2]
        # n_vols = [31+2, 21+2]
        # trs = [3, 2]
        # from psychopy.hardware.emulator import launchScan

        for block_n in np.unique(self.design.block):
            if block_n < self.start_block:
                continue
            this_block_design = self.design.loc[self.design.block == block_n]

            # scanner_emulator = launchScan(win=self.screen, settings={'TR': trs[block_n-1], 'volumes': n_vols[block_n-1],
            #                                                          'sync': 't'},
            #                               mode='Test')

            if isinstance(self.trial_duration, list):
                trial_duration = self.trial_duration[block_n - 1]
            else:
                trial_duration = self.trial_duration

            trial_handler = data.TrialHandler(
                this_block_design.to_dict('records'),
                nReps=1,
                method='sequential')

            for block_trial_ID, this_trial_info in enumerate(trial_handler):

                is_stop_trial = this_trial_info['stop_trial']
                if is_stop_trial:
                    this_trial_staircase_id = int(
                        this_trial_info['staircase_id'])
                    this_trial_ssd = next(self.stairs[this_trial_staircase_id])
                    this_staircase_start_val = self.stairs[
                        this_trial_staircase_id].extraInfo['thisStart']
                else:
                    this_trial_staircase_id = -1
                    this_trial_ssd = -1
                    this_staircase_start_val = -1

                this_trial_parameters = {
                    'direction': int(this_trial_info['direction']),
                    'stop_trial': int(this_trial_info['stop_trial']),
                    'current_ssd': this_trial_ssd,
                    'current_staircase': this_trial_staircase_id,
                    'staircase_start_val': this_staircase_start_val,
                    'block': block_n,
                    'block_trial_ID': block_trial_ID
                }

                these_phase_durations = self.phase_durations.copy()
                these_phase_durations[1] = this_trial_info.jitter
                # NB we stop the trial 0.5s before the start of the new trial, to allow sufficient computation time
                # for preparing the next trial. Therefore 8.5s instead of 9s.
                these_phase_durations[
                    3] = trial_duration - these_phase_durations[
                        1] - these_phase_durations[2]

                this_trial = StopSignalTrial(
                    ID=int(this_trial_info.trial_ID),
                    parameters=this_trial_parameters,
                    phase_durations=these_phase_durations,
                    session=self,
                    screen=self.screen)

                # run the prepared trial
                this_trial.run()

                # Record some stuff
                trial_handler.addData('rt', this_trial.rt)
                trial_handler.addData('response', this_trial.response)

                # absolute times since session start
                trial_handler.addData('start_time', this_trial.start_time)
                trial_handler.addData('t_time', this_trial.t_time)
                trial_handler.addData('jitter_time', this_trial.jitter_time)
                trial_handler.addData('stimulus_time',
                                      this_trial.stimulus_time)
                trial_handler.addData('iti_time', this_trial.iti_time)

                # durations / time since actual trial start (note that the *actual* trial start is t_time!)
                if is_stop_trial:
                    trial_handler.addData('ssd', this_trial_ssd)
                    trial_handler.addData(
                        'stop_signal_time_recorded',
                        this_trial.bleep_time - this_trial.jitter_time)
                    trial_handler.addData('staircase_start_val',
                                          this_staircase_start_val)

                trial_handler.addData(
                    'phase_0_measured',
                    this_trial.t_time - this_trial.start_time)
                trial_handler.addData(
                    'phase_1_measured',
                    this_trial.jitter_time - this_trial.t_time)
                trial_handler.addData(
                    'phase_2_measured',
                    this_trial.stimulus_time - this_trial.jitter_time)
                trial_handler.addData(
                    'phase_3_measured',
                    this_trial.iti_time - this_trial.stimulus_time)

                # durations / time since actual start of the block. These are useful to create events-files later for
                #  convolving. Can also grab these from the eventArray though.
                trial_handler.addData(
                    'trial_t_time_block_measured',
                    this_trial.t_time - self.block_start_time)
                trial_handler.addData(
                    'stimulus_onset_time_block_measured',
                    this_trial.jitter_time - self.block_start_time)
                # Counter-intuitive, but jitter_time is END of the jitter period = onset of stim

                # Update staircase if this was a stop trial
                if is_stop_trial:
                    if this_trial.response_measured:
                        # Failed stop: Decrease SSD
                        self.stairs[this_trial_staircase_id].addData(1)
                    else:
                        # Successful stop: Increase SSD
                        self.stairs[this_trial_staircase_id].addData(0)

                if self.stopped:
                    # out of trial
                    break

            # Save
            self.save_data(trial_handler, block_n)

            if self.stopped:
                # out of block
                break

            # end of block
            this_trial = EndOfBlockTrial(ID=int('999' + str(block_n)),
                                         parameters={},
                                         phase_durations=[0.5, 1000],
                                         session=self,
                                         screen=self.screen)
            this_trial.run()

        self.close()
Exemple #20
0
ns = egi.Netstation()
ns.connect('10.10.10.42', 55513)
ns.BeginSession()
#
ns.sync()
#
ns.StartRecording()
#
# sampleDir = "./samples"
#
# sampleFilenames = [path.join(sampleDir, f) for f in listdir(sampleDir)]

# psychopy.voicekey.samples_from_file(sampleFilenames[0])

# trials = TrialHandler(sampleFilenames, 1, method="random")
s = Sound()

win = visual.Window()
msg = visual.TextStim(win, text="Press a key to hear a sound, Q to quit")

s.setSound("./samples/13.wav")
while True:
	msg.draw()
	win.flip()
	k = event.getKeys()
	if len(k)>0:
		if 'q' in k:
			break
		ns.sync()
		s.play()
		ns.send_event('evt1')
Exemple #21
0
kb_events = None

rise_amnt = 10
t_start = time.time()

flip_bit = 1 #this is just to flip the sign of width change

#-------------------------------
# SETUP SOUND STIMS
freq_factor = 2*np.pi*np.linspace(0,0.5,22050)
freqL = 440
freqR = 440
soundL = np.sin(freq_factor*freqL)
soundR = np.sin(freq_factor*freqR)
s_out = Sound(np.array([soundL,soundR]).T,secs=10)
#
#-------------------------------
t=1
while not kb_events:
    s_out.stop()
    if t==1:
        s_out.play()
        t=0
    
#    soundL = np.sin(freq_factor*(freqL+10))
#    soundR = np.sin(freq_factor*freqR)
#    s_out.setSound = np.array([soundL,soundR]).T
    # Get the current mouse position
    # posDelta is the change in position * since the last call *
    position, posDelta = mouse.getPositionAndDelta()
                              color=[-1, -1, -1])

Hz = 0
while (Hz < 50 or Hz > 150):
    Hz = win.getActualFrameRate(nIdentical=20, nMaxFrames=80, nWarmUpFrames=10)
    Hz = round(Hz)
    print(Hz)
ifi = 1 / Hz

gap_frames = round(gap_dur / ifi)
grating_frames = round(grating_dur / ifi)

# create sound
medf_s = Sound(800,
               sampleRate=44100,
               secs=grating_frames * ifi,
               stereo=True,
               loops=0)

trial_times = np.array([])

trial_times = win.flip()
for itrial in range(ntrials):
    for gframe in range(int(gap_frames)):
        #print(gframe)
        fixation.draw()
        win.flip()

    for gtframe in range(int(grating_frames)):
        if gtframe == 0:
            if ppt: win.callOnFlip(p_port.setData, int(itrial + 1))
Exemple #23
0
                           autoLog=None)

#clock initialization
globalClock = core.Clock()

#%%############################################################################
#                      THESE VARIABLES DEPEND ON                             #
#                      THE EXPERIMENTAL DESIGN                               #
###############################################################################
audio_stim_name = input('Insert the filename of the audio stimulus: ')
nr_NF_session = input('Number of the NF session (1,2,3...): ')

#path of the stimulus
stimulus_path = os.path.join(wdir,
                             'sounds/stimuli/' + audio_stim_name + '.wav')
stimulus = Sound(stimulus_path)
#path of the stop audio file
stop_wav = os.path.join(wdir, 'sounds/stop.wav')
stop_stim = Sound(stop_wav)

#create a new output directory for the FB images
outdir = os.path.join(os.path.abspath(os.path.join(sub_json_file, os.pardir)),
                      audio_stim_name + '_output_' + str(nr_NF_session))
if not os.path.exists(outdir):
    os.makedirs(outdir)

#condition timings
baselines = pickle.load(
    open(os.path.join(wdir, 'prt/intermittent/baselines.pkl'), 'rb'))
tasks = pickle.load(
    open(os.path.join(wdir, 'prt/intermittent/tasks.pkl'), 'rb'))
Exemple #24
0
###############################################################################
# .. code-block:: python
#
#   import serial
#   ser = serial.Serial('COM4')  # Change this value according to your setup

#%%
# Create an Oximeter instance, initialize recording and record for 10 seconds

oxi = Oximeter(serial=ser, sfreq=75, add_channels=4).setup()

#%%
# Create an Oxymeter instance, initialize recording and record for 10 seconds

beat = Sound("C", secs=0.1)
diastole1 = Sound("E", secs=0.1)
diastole2 = Sound("G", secs=0.1)
diastole3 = Sound("Bfl", secs=0.1)

systoleTime1, systoleTime2, systoleTime3 = None, None, None
tstart = time.time()
while time.time() - tstart < 30:

    # Check if there are new data to read
    while oxi.serial.inWaiting() >= 5:

        # Convert bytes into list of int
        paquet = list(oxi.serial.read(5))

        if oxi.check(paquet):  # Data consistency
Exemple #25
0
from tools import sound_freq_sweep, compound_sound

from psychopy.sound import Sound

p = dict(
    color_target=True,
    monitor='SportsMedicineLab',
    screen_number=0,
    scanner=False,
    eye_motion=True,
    full_screen=False,
    save_fig=True,
    #Feedback sounds
    incorrect_sound=Sound(sound_freq_sweep(8000, 200, .1)),
    correct_sound=Sound(sound_freq_sweep(2000, 2000, .1)),
    no_response_sound=Sound(sound_freq_sweep(500, 700, .1)),
    eye_motion_sound=Sound(compound_sound([200, 400, 800], .2)),
    fixation_size=3.0,
    target_dur=0.1,
    response_dur=1.5,
    feedback_dur=0.3,
    iti=0.24,
    isi=0.3,
    interblock_interval=0.54,
    # block_duration = 12, Enforce this!
    n_trials=5,
    n_blocks=12,
    target_size=4,
    target_x=4,
    target_y=4,
    sf_base=1,
Exemple #26
0
class SyncGenerator(threading.Thread):
    def __init__(self, TR=1.0, volumes=10, sync='5', skip=0, sound=False):
        """Class for a character-emitting metronome thread (emulate MR sync pulse).

            Aim: Allow testing of temporal robustness of fMRI scripts by emulating
            a hardware sync pulse. Adds an arbitrary 'sync' character to the key
            buffer, with sub-millisecond precision (less precise if CPU is maxed).
            Recommend: TR=1.000 or higher and less than 100% CPU. Shorter TR
            --> higher CPU load.

            Parameters:
                TR:      seconds per whole-brain volume
                volumes: number of 3D volumes to obtain in a given scanning run
                sync:    character used as flag for sync timing, default='5'
                skip:    how many frames to silently omit initially during T1
                         stabilization, no sync pulse. Not needed to test script
                         timing, but will give more accurate feel to start of run.
                         aka "discdacqs".
                sound:   simulate scanner noise
        """
        if TR < 0.1:
            raise ValueError, 'SyncGenerator:  whole-brain TR < 0.1 not supported'
        self.TR = TR
        self.hogCPU = 0.035
        self.timesleep = self.TR
        self.volumes = int(volumes)
        self.sync = sync
        self.skip = skip
        self.playSound = sound
        if self.playSound:  # pragma: no cover
            self.sound1 = Sound(800, secs=self.TR-.08, volume=0.15, autoLog=False)
            self.sound2 = Sound(813, secs=self.TR-.08, volume=0.15, autoLog=False)

        self.clock = core.Clock()
        self.stopflag = False
        threading.Thread.__init__(self, None, 'SyncGenerator', None)
        self.running = False
    def run(self):
        self.running = True
        if self.skip:
            for i in range(int(self.skip)):
                if self.playSound:  # pragma: no cover
                    self.sound1.play()
                    self.sound2.play()
                core.wait(self.TR, hogCPUperiod=0) # emulate T1 stabilization without data collection
        self.clock.reset()
        for vol in range(1, self.volumes+1):
            if self.playSound:  # pragma: no cover
                self.sound1.play()
                self.sound2.play()
            if self.stopflag:
                break
            # "emit" a sync pulse by placing a key in the buffer:
            event._onPygletKey(symbol=self.sync, modifiers=None, emulated=True)
            # wait for start of next volume, doing our own hogCPU for tighter sync:
            core.wait(self.timesleep - self.hogCPU, hogCPUperiod=0)
            while self.clock.getTime() < vol * self.TR:
                pass # hogs the CPU for tighter sync
        self.running = False
        return self
    def stop(self):
        self.stopflag = True
Exemple #27
0
        return str(self.id)


### Global variables for rendering stimuli
sone = subject(1, "act", 0.3, None, window.size[0] / -4)
stwo = subject(2, "obs", 0.7, None, window.size[0] / 4)
subjects = [sone, stwo]

expinfo = {'participant1': sone.id, 'participant2': stwo.id, 'pair': 1}
#expinfo = {'participant1': sone.id}

blocks = range(4)
ntrials = 2

# create beep for decision interval
beep = Sound('A', secs=0.5)


def genstartscreen():
    visual.TextStim(window,
                    text="Press spacebar to start.",
                    pos=[0 + sone.xoffset, 0],
                    color='black',
                    height=20).draw()

    visual.TextStim(window,
                    text="Press spacebar to start.",
                    pos=[0 + stwo.xoffset, 0],
                    color='black',
                    height=20).draw()