Exemple #1
0
    def __init__(self, settings_yaml, texts_yaml):
        with open(settings_yaml, 'r') as f:
            exp_info = yaml.load(f)

        self.waits = exp_info.pop('waits')
        self.response_keys = exp_info.pop('response_keys')
        self.survey_url = exp_info.pop('survey_url')

        with open(texts_yaml, 'r') as f:
            self.texts = yaml.load(f)

        self.win = visual.Window(fullscr=True, units='pix')

        text_kwargs = dict(height=60, font='Consolas', color='black')
        self.fix = visual.TextStim(self.win, text='+', **text_kwargs)
        self.prompt = visual.TextStim(self.win,
                                      text='Yes or No?',
                                      **text_kwargs)

        self.questions = load_sounds(Path(self.STIM_DIR, 'questions'))
        self.cues = load_sounds(Path(self.STIM_DIR, 'cues'))

        size = [400, 400]
        image_kwargs = dict(win=self.win, size=size)
        self.mask = DynamicMask(Path(self.STIM_DIR, 'dynamic_mask'),
                                **image_kwargs)
        self.pics = load_images(Path(self.STIM_DIR, 'pics'), **image_kwargs)
        frame_buffer = 20
        self.frame = visual.Rect(self.win,
                                 width=size[0] + 20,
                                 height=size[1] + 20,
                                 lineColor='black')

        feedback_dir = Path(self.STIM_DIR, 'feedback')
        self.feedback = {}
        self.feedback[0] = sound.Sound(Path(feedback_dir, 'buzz.wav'))
        self.feedback[1] = sound.Sound(Path(feedback_dir, 'bleep.wav'))

        self.timer = core.Clock()
Exemple #2
0
    def __init__(self, settings_yaml, texts_yaml):
        with open(settings_yaml, 'r') as f:
            exp_info = yaml.load(f)

        self.waits = exp_info.pop('waits')
        self.response_keys = exp_info.pop('response_keys')
        self.survey_url = exp_info.pop('survey_url')

        with open(texts_yaml, 'r') as f:
            self.texts = yaml.load(f)

        self.win = visual.Window(fullscr=True, units='pix')

        text_kwargs = dict(height=60, font='Consolas', color='black')
        self.fix = visual.TextStim(self.win, text='+', **text_kwargs)
        self.prompt = visual.TextStim(self.win, text='Yes or No?',
                                      **text_kwargs)

        self.questions = load_sounds(Path(self.STIM_DIR, 'questions'))
        self.cues = load_sounds(Path(self.STIM_DIR, 'cues'))

        size = [400, 400]
        image_kwargs = dict(win=self.win, size=size)
        self.mask = DynamicMask(Path(self.STIM_DIR, 'dynamic_mask'),
                                **image_kwargs)
        self.pics = load_images(Path(self.STIM_DIR, 'pics'), **image_kwargs)
        frame_buffer = 20
        self.frame = visual.Rect(self.win, width=size[0]+20, height=size[1]+20,
                                 lineColor='black')

        feedback_dir = Path(self.STIM_DIR, 'feedback')
        self.feedback = {}
        self.feedback[0] = sound.Sound(Path(feedback_dir, 'buzz.wav'))
        self.feedback[1] = sound.Sound(Path(feedback_dir, 'bleep.wav'))

        self.timer = core.Clock()
    def __init__(self, settings_yaml, texts_yaml):
        """Create the window and stimuli using the settings provided."""
        with open(settings_yaml, 'r') as f:
            exp_info = yaml.load(f)

        self.waits = exp_info.pop('waits')
        self.response_keys = exp_info.pop('response_keys')
        self.survey_url = exp_info.pop('survey_url')

        with open(texts_yaml, 'r') as f:
            self.texts = yaml.load(f)

        self.win = visual.Window(
            fullscr=True,
            units='pix',
            allowGUI=False,
            winType='pyglet',
            color=[.6, .6, .6],
        )

        fixation_point = [0, 100]
        text_kwargs = dict(
            win=self.win,
            pos=fixation_point,
            height=40,
            font='Consolas',
            color='black',
            wrapWidth=int(self.win.size[0] * 0.8),
        )
        self.ready = visual.TextStim(text='READY', **text_kwargs)
        self.question = visual.TextStim(**text_kwargs)
        self.prompt = visual.TextStim(text='?', **text_kwargs)
        self.prompt.setHeight(100)  # increase font size from default

        cues_dir = Path(self.STIM_DIR, 'cues')
        self.cues = load_sounds(cues_dir, include_ext=True)

        mask_kwargs = dict(win=self.win, pos=fixation_point, size=[500, 500])
        self.mask = DynamicMask(**mask_kwargs)

        feedback_dir = Path(self.STIM_DIR, 'feedback')
        self.feedback = {}
        self.feedback[0] = sound.Sound(Path(feedback_dir, 'buzz.wav'))
        self.feedback[1] = sound.Sound(Path(feedback_dir, 'bleep.wav'))

        self.timer = core.Clock()
Exemple #4
0
    def __init__(self, settings_yaml, texts_yaml):
        with open(settings_yaml, 'r') as f:
            exp_info = yaml.load(f)

        self.waits = exp_info.pop('waits')
        self.response_keys = exp_info.pop('response_keys')
        self.survey_url = exp_info.pop('survey_url')

        with open(texts_yaml, 'r') as f:
            self.texts = yaml.load(f)

        self.win = visual.Window(fullscr=True, units='pix', allowGUI=False)

        text_kwargs = dict(win=self.win,
                           height=30,
                           font='Consolas',
                           color='black',
                           wrapWidth=400)
        self.fix = visual.TextStim(text='+', **text_kwargs)
        self.question = visual.TextStim(**text_kwargs)
        self.prompt = visual.TextStim(text='?', **text_kwargs)
        self.word = visual.TextStim(**text_kwargs)

        self.cues = load_sounds(unipath.Path(self.STIM_DIR, 'cues'),
                                include_ext=True)  # key names strawberry_1.wav

        size = [400, 400]
        image_kwargs = dict(win=self.win, size=size)
        self.mask = DynamicMask(**image_kwargs)

        feedback_dir = unipath.Path(self.STIM_DIR, 'feedback')
        self.feedback = {}
        self.feedback[0] = sound.Sound(unipath.Path(feedback_dir, 'buzz.wav'))
        self.feedback[1] = sound.Sound(unipath.Path(feedback_dir, 'bleep.wav'))

        self.timer = core.Clock()
Exemple #5
0
    def __init__(self, experiment_yaml):
        self.window = visual.Window(fullscr=True, units='pix', allowGUI=False)

        # Save any info in the yaml file to the experiment object
        with open(experiment_yaml, 'r') as f:
            self.config = yaml.load(f)
        self.texts = self.config.pop('texts')
        self.times_in_seconds = self.config.pop('times_in_seconds')
        self.response_map = self.config.pop('response_map')

        # Create the fixation and prompt
        text_kwargs = {'height': 40, 'font': 'Consolas', 'color': 'black'}
        self.fix = visual.TextStim(self.window, text='+', **text_kwargs)
        self.prompt = visual.TextStim(self.window, text='?', **text_kwargs)

        # Create the masks
        mask_size = 200
        mask_kwargs = {'win': self.window, 'size': [mask_size, mask_size]}
        gutter = 440  # distance between L/R and U/D centroids
        self.location_map = {
            'left': (-gutter/2, 0),
            'right': (gutter/2, 0),
            'up': (0, gutter/2),
            'down': (0, -gutter/2)
        }
        self.masks = [DynamicMask(pos=p, **mask_kwargs)
                      for p in self.location_map.values()]

        # Stimuli directory
        STIM_DIR = unipath.Path('stimuli')
        assert STIM_DIR.isdir(), "stimuli directory not found"

        # Create the arrow cues
        self.arrows = {}
        for direction in ['left', 'right', 'neutral']:
            arrow_png = unipath.Path(STIM_DIR, 'arrow-%s.png' % direction)
            assert arrow_png.exists(), "%s not found" % arrow_png
            arrow_png = str(arrow_png)  # psychopy doesn't like unipath.Path's
            self.arrows[direction] = visual.ImageStim(self.window, arrow_png)

        # Create the visual word cue using same kwargs as fixation and prompt
        self.word = visual.TextStim(self.window, text='', **text_kwargs)

        # Load the sound cues
        # There are multiple versions of each sound, so pick one like this:
        # >>> random.choice(self.sounds['left']).play()
        self.sounds = {}
        for direction in ['left', 'right', 'neutral']:
            sounds_re = '%s-*.wav' % direction
            self.sounds[direction] = load_sounds(STIM_DIR, sounds_re)

        # Create the target
        target_size = 80
        self.target = visual.Rect(self.window, size=[target_size, target_size],
                                  opacity=0.8, fillColor='white')

        # Create the stimuli for feedback
        incorrect_wav = unipath.Path(STIM_DIR, 'feedback-incorrect.wav')
        correct_wav = unipath.Path(STIM_DIR, 'feedback-correct.wav')
        self.feedback = {}
        self.feedback[0] = sound.Sound(incorrect_wav)
        self.feedback[1] = sound.Sound(correct_wav)

        # Create a closure function to jitter target positions with the
        # bounds of the mask
        no_edge_to_edge_buffer = target_size/6
        amount = mask_size - target_size - no_edge_to_edge_buffer

        def jitter(pos):
            """ For jittering the target. """
            return (p + random.uniform(-amount/2, amount/2) for p in pos)
        self.jitter = jitter

        # Attach timer to experiment
        self.timer = core.Clock()
Exemple #6
0
    def __init__(self, settings_yaml='settings.yaml', texts_yaml='texts.yaml'):
        with open(settings_yaml) as f:
            settings = yaml.load(f)
        self.layout = settings.pop('layout')
        self.positions = self.layout.pop('positions')
        self.waits = settings.pop('waits')
        self.response_keys = settings.pop('response_keys')
        self.all_response_keys = self.response_keys['pic'].keys() +\
                                 self.response_keys['word'].keys()
        self.survey_url = settings.pop('survey_url')

        with open(texts_yaml) as f:
            self.texts = yaml.load(f)

        self.win = visual.Window(fullscr=True, allowGUI=False, units='pix')

        text_kwargs = dict(
            win=self.win,
            font='Consolas',
            height=60,
            color='black'
        )
        self.fix = visual.TextStim(text='+', **text_kwargs)
        self.prompt = visual.TextStim(text='?', **text_kwargs)

        pic_size = self.layout['pic_size']
        frame_kwargs = dict(
            win=self.win,
            lineColor='black',
            lineWidth=2.0,
            fillColor=None,
            width=pic_size[0] + 10,
            height=pic_size[0] + 10,
        )
        self.frames = [visual.Rect(pos=pos, **frame_kwargs)
                       for pos in self.positions.values()]

        self.cues = load_sounds(unipath.Path(self.STIM_DIR, 'cues'))


        mask_kwargs = dict(
            win=self.win,
            size=pic_size,
        )
        self.masks = [DynamicMask(pos=pos, **mask_kwargs)
                      for pos in self.positions.values()]

        # Targets
        # NOTE: Probably inefficient to load images twice, but
        # I was having problems trying to copy the image
        # to each location.
        image_kwargs = dict(win=self.win, size=pic_size)
        self.left_pics = load_images(unipath.Path(self.STIM_DIR, 'pics'),
                                     pos=self.positions['left'],
                                     **image_kwargs)
        self.right_pics = load_images(unipath.Path(self.STIM_DIR, 'pics'),
                                      pos=self.positions['right'],
                                      **image_kwargs)

        self.word = visual.TextStim(**text_kwargs)

        self.timer = core.Clock()

        feedback_dir = unipath.Path(self.STIM_DIR, 'feedback')
        self.feedback = {}
        self.feedback[0] = sound.Sound(unipath.Path(feedback_dir, 'buzz.wav'))
        self.feedback[1] = sound.Sound(unipath.Path(feedback_dir, 'bleep.wav'))