Esempio n. 1
0
    def _run_trial_main_loop(self, clock, time_bar_x):
        """ To run the main drawing loop """
        started_drawing = False
        cursor_t = np.zeros([10000])  # for recording times with cursor pos
        mouse = event.Mouse(win=self.win)
        #MD: Probably a better pattern -> two separate while loops,
        #   one for activating the cyan square and moving to the beginning
        #   then another one drawing and incrementing the bar
        #
        # As it is currently done, we have to evaluate all the ifs during each
        # while cycle
        # ---> actually this is what is happening...

        while True:
            self.draw_and_flip(exclude=['trace'])
            if mouse.isPressedIn(self.frame['elements']['start_point']):
                self.log.debug('Mouse pressed in startpoint')
                # change to cyan
                # older psychopy versions use str instead of arr to set color
                # eg 'Cyan'
                self.frame['elements']['start_point'].fillColor = [-1, 1, 1]
                tic = clock.getTime()
                self.draw_and_flip(exclude=['trace', 'instructions'])
                break

        while True:
            # drawing has begun
            if mouse.isPressedIn(self.frame['elements']['cursor']):
                self.log.debug('Mouse started drawing with cursor')
                started_drawing = True
                self.frame['lifted'] = False

                trial_timer = clock.CountdownTimer(self.trial_settings['trial_duration'])

                # save start time
                start_t_stamp = clock.getTime()

                # calc pre trial time
                ptt = start_t_stamp - tic

            # shrink time bar, draw trace, once drawing has started
            if started_drawing:

                if self.verbose:
                    print('STARTED DRAWING')
                self._exec_drawing(trial_timer, mouse, time_bar_x, cursor_t)

                # time_bar elapsed
                if self.verbose:
                    print('breaking out of main')

                break

        return ptt, start_t_stamp, cursor_t
Esempio n. 2
0
def calculate_contrast():
    if params.contrast_mod_type == 'fixed_trapezoidal':
        ramp_up_secs = params.frameDur 
        ramp_down_secs = params.frameDur 
        secs_passed = clock.getTime()-start_time
        if this_stim_secs >= 3*params.frameDur:
            if 0 <=secs_passed < ramp_up_secs:
                this_contr =  0.5 * this_max_contrast
            elif this_stim_secs >= secs_passed > this_stim_secs-ramp_down_secs:
                this_contr = 0.5*this_max_contrast
            else:
                this_contr = this_max_contrast
        else:
            this_contr = this_max_contrast
    elif params.contrast_mod_type == 'hybrid_gaussian':
        if this_stim_secs < frameDur*6:  # when sigma=0.015, assume 8 sigma is stimuli duration, it is 120ms, FWHM is 18ms. 
            secs_passed = clock.getTime()-start_time
            sigma=this_stim_secs/6  # 6 sigma
            mu = this_stim_secs/2
            # actual_stim_secs=0.7759*sigma*2
            this_contr = norm.pdf(secs_passed,mu, sigma)*this_condition['max_contr']* numpy.sqrt(2*numpy.pi)*sigma
        else:
            secs_passed = clock.getTime()-start_time
            sigma=frameDur
            mu = 3*sigma
            if secs_passed < mu:
                this_contr = norm.pdf(secs_passed, mu, sigma)*this_condition['max_contr'] * numpy.sqrt(2*numpy.pi)*sigma
            elif secs_passed > (this_stim_secs-mu):
                this_contr = norm.pdf(this_stim_secs-secs_passed, mu, sigma)*this_condition['max_contr']* numpy.sqrt(2*numpy.pi)*sigma
            else:
                this_contr = this_condition['max_contr']
            
    elif params.contrast_mod_type == 'variable_triangular': # linear ramp up for half of this_stim_secs, then ramp down
        secs_passed = clock.getTime()-start_time
        if secs_passed <= this_stim_secs * 0.5: # first half
            this_contr = (secs_passed/(this_stim_secs*0.5))*this_max_contrast
        else: 
            this_contr = (this_stim_secs - secs_passed )/(this_stim_secs * 0.5)*this_max_contrast
    else:
        this_contr = this_condition['max_contr']
        
    # Sanity check on this_contr to keep in [0.1]
    if this_contr > 1:
        this_contr = 1
    elif this_contr < 0:
        this_contr = 0
        
    return(this_contr)
Esempio n. 3
0
 def display_stimulus(stimuli,
                      recordtime=None,
                      waittime=None,
                      sound=None,
                      feedback=False):
     '''This presents stimuli and records relevant experiment times.
        stimuli = Name of stimulus object created in the setup 
                       section
        recordtime (optional) = Label for the time being recorded
        waittime (optional) = Amount of time to hold stimulus (i.e.,
                              stimulusname) on the screen (s)
        sound (optional) = Sound to play when stimulus becomes 
                           visible
        feedback = True if feedback is to be provided to user
     '''
     if feedback == True:
         if trial['CorrectResponse'][presentation] == '1':
             stimuli[0] = option1_waves
             #stimuli[0].color = 'green'
         else:
             stimuli[1] = option2_waves
             #stimuli[1].color = 'green'
     for stim in stimuli:
         stim.draw()
     win.flip()
     if recordtime:
         times[recordtime] = clock.getTime()
     if sound:
         sound.play()
     if waittime:
         core.wait(waittime)
Esempio n. 4
0
def displayStimulus(stimulusname,
                    recordtime=None,
                    waittime=None,
                    secondstim=None,
                    sound=None):
    '''Basic function for presenting stimuli.
       stimulusname = The name of the stimulus created in the setup section
       recordtime (optional) = The name of the time you are recording (e.g., time image is presented is imgTime)
       waittime (optional) = Time to hold stimulus (i.e., stimulusname) on screen
       sound (optional) = Sound to play after stimulus becomes visible'''
    stimulusname.draw()
    if secondstim:
        secondstim.draw()
    win.flip()
    if recordtime:
        times[recordtime] = clock.getTime()
    if sound:
        sound.play()
    if waittime:
        core.wait(waittime)
Esempio n. 5
0
 def complete_results(convergence):
     endTime = clock.getTime()
     experiment_info = [{'Trial Number': '', 'Level': ''}]
     for key, value in settings.items():
         experiment_info.append({'Trial Number': key, 'Level': value})
     if convergence == 'converged':
         converge_str = 'Convergence at level {} after {} trials!'.format(
             comparison, trialnum)
     elif convergence == 'escaped':
         converge_str = 'User ended experiment after {} trials.'.format(
             trialnum)
     else:
         converge_str = 'No convergence after {} trials.'.format(trialnum)
     experiment_info.extend([{
         'Trial Number': 'Convergence',
         'Level': converge_str
     }, {
         'Trial Number': 'Experiment Run Time (seconds)',
         'Level': (endTime - startTime)
     }])
     writer.writerows(experiment_info)
     print(converge_str)
Esempio n. 6
0
ppn = my.getString(win, "Please enter a participant number:")
datafile = my.openDataFile(ppn + "fam")
# connect it with a csv writer
writer = csv.writer(datafile, delimiter=",")
# create output file header
writer.writerow([
    "Trial Number", 
    "Sound Value",
    "Selection",
    "Filename"
    ])
    
## Experiment Section
# show welcome screen
my.getCharacter(win, welcomestr.format(trials[0]['Option1'], trials[0]['Option2'], numblocks))
startTime = clock.getTime() # clock is in seconds

blocknum = 0
trialnum = 0
key = []
randomseed = 0
while blocknum < numblocks:
    for i in range(len(trials)):
        trial = trials[i]
        times = {}
        
        # Cue: present fixation for 100 ms
        displayStimulus(fixation, 'fixationTime', 0.100)
        
        # Delay: blank screen for 100 ms
        win.flip()
def bandit_task_control(selected_value, arms, stimuli, feedback, window):
    # define instructions
    # print(messages:["welcome"],["break"],["thanks"])
    print('selected_value is %d' % selected_value)
    instruction_result_text = ""
    if selected_value == 1:
        instruction_result_text, is_reward = get_instructions(
            [INSTRUCTIONS_REWARD_CONTR, True])

    if selected_value == 2:
        instruction_result_text, is_reward = get_instructions(
            [INSTRUCTIONS_PUNISHMENT_CONTR, True])

    elif selected_value < 1 or selected_value > 2:
        sys.exit("Unknown condition number")

    print('instruction_result is %s and is_reward=%s' %
          (instruction_result_text, is_reward))

    text_stim_screen = psychopy.visual.TextStim(win=window,
                                                text=instruction_result_text,
                                                color=(-1, -1, -1),
                                                height=30.0)
    text_stim_screen.draw(window)
    win.flip()
    while True:
        print('in while...')
        response = psychopy.event.waitKeys(keyList=['space'])
        print('after response')
        print(response)
        if 'space' in str(response):
            print("selected space!")
            break  # break out of the while-loop


# screen experiments

    print('selected_value is %d' % selected_value)
    starting_screen_control = ""
    if selected_value > 0:
        starting_screen = get_starting_screen_control(([fixation_cross,
                                                        True], [arms, True]))
        print('display fixation cross and arms')
        psychopy.core.wait(0.5)
    print('starting_screen_control is %s' % (starting_screen_control))
    print(get_starting_screen_control)
    win.flip()

    baseline_screen = ""
    if selected_value == 1 and clock.getTime(
    ) > 0.5:  # baseline screen control reward condition
        baseline_screen, is_reward = get_baseline_screen_cont_reward(
            ([text_rule_minusone, True], [arms, True]))

    if selected_value == 2 and clock.getTime(
    ) > 0.5:  # baseline screen control punishment condition
        baseline_screen, is_reward = get_baseline_screen_cont_punishment(
            ([text_rule_zero, True], [arms, True]))

    print('baseline_screen is %s and is_reward=%s' %
          (baseline_screen, is_reward))
    # baseline_screen.draw(window)
    win.flip()

    while True:
        print('in while...')
        response = psychopy.event.waitKeys(keyList=['left', 'right'])
        print('after response')
        print(response)
        if 'left' in str(response):
            print("left!")
        elif 'right' in str(response):
            print("right!")
            break  # break out of the while-loop
Esempio n. 8
0
def wait(clock):
    while clock.getTime() < 0:
        pass
    return
Esempio n. 9
0
    def start(self, win, win2, baseline_inner, baseline_outer):
        t = 0
        clock = core.Clock()
        clock.reset()
        frameN = -1
        continueRoutine = True
        endExpNow = False
        while continueRoutine:
            # Current time and frame
            t = clock.getTime()
            frameN = frameN + 1

            # Get current pupil mean
            current_pupil_mean = self.get_pupil_mean()

            scaling = 35

            circle_outer_feedback = visual.Circle(win,
                                                  edges=96,
                                                  radius=baseline_outer *
                                                  scaling,
                                                  lineWidth=1,
                                                  lineColor=(0, 0, 0),
                                                  fillColor=(0, 0, 0),
                                                  interpolate=True)
            circle_inner_feedback = visual.Circle(win,
                                                  edges=96,
                                                  radius=baseline_inner *
                                                  scaling,
                                                  lineWidth=1,
                                                  lineColor=(-0.3, -0.3, -0.3),
                                                  fillColor=(-0.3, -0.3, -0.3),
                                                  interpolate=True)
            circle_pupil_size_live = visual.Circle(win,
                                                   edges=96,
                                                   radius=current_pupil_mean *
                                                   scaling,
                                                   lineWidth=4,
                                                   lineColor=(0, -1, -1),
                                                   interpolate=True)

            circle_outer_feedback1 = visual.Circle(win2,
                                                   edges=96,
                                                   radius=baseline_outer *
                                                   scaling,
                                                   lineWidth=1,
                                                   lineColor=(0, 0, 0),
                                                   fillColor=(0, 0, 0),
                                                   interpolate=True)
            circle_inner_feedback1 = visual.Circle(
                win2,
                edges=96,
                radius=baseline_inner * scaling,
                lineWidth=1,
                lineColor=(-0.3, -0.3, -0.3),
                fillColor=(-0.3, -0.3, -0.3),
                interpolate=True)
            circle_pupil_size_live1 = visual.Circle(win2,
                                                    edges=96,
                                                    radius=current_pupil_mean *
                                                    scaling,
                                                    lineWidth=4,
                                                    lineColor=(0, -1, -1),
                                                    interpolate=True)

            circle_outer_feedback1.setAutoDraw(True)
            circle_inner_feedback1.setAutoDraw(True)
            circle_pupil_size_live1.setAutoDraw(True)

            circle_outer_feedback.setAutoDraw(True)
            circle_inner_feedback.setAutoDraw(True)
            circle_pupil_size_live.setAutoDraw(True)

            continueRoutine = True

            # check if all components have finished
            # if not continueRoutine:  break

            # check for quit (the Esc key)
            if endExpNow or event.getKeys(keyList=["escape"]):
                continueRoutine = False

            # refresh the screen
            if continueRoutine: win.flip()
            if continueRoutine: win2.flip()
Esempio n. 10
0
def write_trial_data_to_file():
    dataFile.write('%s,%s' % (expInfo['Participant'], expInfo['Gender']))
    dataFile.write(',%i,%i,%i,%s,%s,%.2f' % (current_run,n_trials,this_dir,this_dir_str, thisKey, this_grating_degree))
    dataFile.write(',%.3f,%.3f,%.3f,%.9f,%i,%.9f' % (this_max_contrast, this_spf, this_tf, this_stim_secs,frame_n,actual_stim_secs))
    dataFile.write(',%.9f,%.9f,%.2f, %.3f' % (frameRate, frameDur, thisResp, rt))
    dataFile.write(',%i,%.3f,%.3f\n' % ( start_resp_time, clock.getTime()))
Esempio n. 11
0
         ori=params.grating_ori, pos=(0, 0), size=this_grating_degree, sf=this_spf, phase=0,
         color=0, colorSpace='rgb', opacity=1, blendmode='avg',
         texRes=128, interpolate=True, depth=0.0)
     
     # Show fixation until key press
     fixation.draw()
     win.flip()
     event.waitKeys()
     win.flip()
     
     # ISI (uniform within [isi_min, isi_max])
     core.wait(params.fixation_grating_isi)
     
     # draw grating
     keep_going = True
     start_time = clock.getTime()
     while keep_going:
         secs_from_start = (start_time - clock.getTime())
         pr_grating.phase = this_dir*(secs_from_start/params.cyc_secs)
         
         # Modulate contrast
         this_contr = calculate_contrast()
         if this_contr>=0.5*this_condition['max_contr']:
             frame_n+=1
         pr_grating.color = this_contr
 
         # Draw next grating component
         pr_grating.draw()
         win.flip()
         grating_start = clock.getTime()
 
Esempio n. 12
0
                                size=(3.5, 3.5),
                                sf=0.6,
                                phase=0,
                                color=[max_contr, max_contr, max_contr],
                                colorSpace='rgb',
                                opacity=1,
                                blendmode='avg',
                                texRes=128,
                                interpolate=True,
                                depth=0.0)

clock = core.Clock()

# Run-time loop for 1 Hz grating, max contrast .8
keep_going = True
start_time = clock.getTime()

tf = 20  # Hz, so stim_dur is 1/freq_temp
cyc_secs = 1 / tf  # in seconds
max_contr = .025

while keep_going:
    pr_grating.phase = round(
        np.mod(clock.getTime(), cyc_secs) /
        cyc_secs) / 2  # need value of 0 or 0.5 to switch phase

    # Contrast ramp in, hold, down
    secs_passed = clock.getTime() - start_time
    if secs_passed <= ramp_up_secs:
        contr = (secs_passed / ramp_up_secs) * max_contr
    elif (secs_passed > ramp_up_secs) & (secs_passed <=
Esempio n. 13
0
def main():
    def display_stimulus(stimuli,
                         recordtime=None,
                         waittime=None,
                         sound=None,
                         feedback=False):
        '''This presents stimuli and records relevant experiment times.
           stimuli = Name of stimulus object created in the setup 
                          section
           recordtime (optional) = Label for the time being recorded
           waittime (optional) = Amount of time to hold stimulus (i.e.,
                                 stimulusname) on the screen (s)
           sound (optional) = Sound to play when stimulus becomes 
                              visible
           feedback = True if feedback is to be provided to user
        '''
        if feedback == True:
            if trial['CorrectResponse'][presentation] == '1':
                stimuli[0] = option1_waves
                #stimuli[0].color = 'green'
            else:
                stimuli[1] = option2_waves
                #stimuli[1].color = 'green'
        for stim in stimuli:
            stim.draw()
        win.flip()
        if recordtime:
            times[recordtime] = clock.getTime()
        if sound:
            sound.play()
        if waittime:
            core.wait(waittime)

    def display_welcome(screen_number):
        screen_text = visual.TextStim(win,
                                      text=welcome_str[screen_number],
                                      wrapWidth=25,
                                      pos=(0, 0))
        welcome_img[screen_number].append(screen_text)
        display_stimulus(welcome_img[screen_number])
        c = event.waitKeys()
        if c:
            return c[0]
        else:
            return ''

    def complete_results(convergence):
        endTime = clock.getTime()
        experiment_info = [{'Trial Number': '', 'Level': ''}]
        for key, value in settings.items():
            experiment_info.append({'Trial Number': key, 'Level': value})
        if convergence == 'converged':
            converge_str = 'Convergence at level {} after {} trials!'.format(
                comparison, trialnum)
        elif convergence == 'escaped':
            converge_str = 'User ended experiment after {} trials.'.format(
                trialnum)
        else:
            converge_str = 'No convergence after {} trials.'.format(trialnum)
        experiment_info.extend([{
            'Trial Number': 'Convergence',
            'Level': converge_str
        }, {
            'Trial Number': 'Experiment Run Time (seconds)',
            'Level': (endTime - startTime)
        }])
        writer.writerows(experiment_info)
        print(converge_str)

    '''SETTINGS
    This is where the GUI that takes the experiment settings is
    initiated. It opens the GUI window for user input, saves the 
    entered information to a dictionary, and then assigns it to 
    variables used in the experiment.
    '''

    print('Waiting for user to input settings...')
    settings = gui.main()
    print('Loading settings...')
    settings['Experiment Date/Time'] = str(datetime.datetime.now())
    for key, value in settings.items():
        print('{}: {}'.format(key, value))
    experiment = settings['Experiment Name']
    ppn = settings['Participant ID']
    outdir = settings['Output Directory']
    stimfile = settings['Input File']
    stimdir = settings['Stimulus Directory']
    feedback = settings['Feedback']

    for k in settings.keys():
        if 'Interval' in k:
            settings[k] = float(settings[k]) / 1000

    win = visual.Window(size=(settings['Screen Width'],
                              settings['Screen Height']),
                        fullscr=False,
                        monitor='testMonitor',
                        units='cm')

    print('Building experiment...')
    '''STIMULUS CREATION
    This is where the stimuli are created. First, the input file is 
    read into a list of dictionaries. Then, visual and sound
    stimuli are created. Information about correctness is saved to a
    list, a fixation cross is created to indicate the start of each 
    trial, and strings of text are created to be read by the user at 
    the start of the experiment.
    '''

    rows = []
    with open(stimfile, 'r') as input:
        reader = csv.DictReader(input, delimiter=',')
        for row in reader:
            rows.append(row)
    trialsA = [rows[i] for i in range(0, len(rows), 2)]
    trialsB = [rows[i] for i in range(1, len(rows), 2)]
    trials = [{
        key: (value1, value2)
        for key, value1, value2 in zip(trialsA[i].keys(), trialsA[i].values(),
                                       trialsB[i].values())
    } for i in range(len(trialsA))]

    option1 = visual.ImageStim(win,
                               image='img/speaker-silent.jpg',
                               pos=(-8.0, 0))
    option2 = visual.ImageStim(win,
                               image='img/speaker-silent.jpg',
                               pos=(8.0, 0))
    option1_border = visual.ImageStim(win,
                                      image='img/speaker-silent-border.jpg',
                                      pos=(-8.0, 0))
    option2_border = visual.ImageStim(win,
                                      image='img/speaker-silent-border.jpg',
                                      pos=(8.0, 0))
    option1_waves = visual.ImageStim(win,
                                     image='img/speaker.jpg',
                                     pos=(-8.0, 0))
    option2_waves = visual.ImageStim(win,
                                     image='img/speaker.jpg',
                                     pos=(8.0, 0))

    #option1 = visual.TextStim(win, text='Pair 1\n\n   <==', pos=(-6.0,0))
    #option2 = visual.TextStim(win, text='Pair 2\n\n==>', pos=(6.0,0))

    for trial in trials:
        trial['soundstim'] = (sound.Sound(
            os.path.join(stimdir, 'tmp', trial['Filename'][0])),
                              sound.Sound(
                                  os.path.join(stimdir, 'tmp',
                                               trial['Filename'][1])))
        trial['basefile'] = trial['Original Base Filename'][0]
        trial['compfile'] = trial['Original Comparison Filename'][1]

    # PEST defaults
    if int(settings['Maximum Stimulus']) > len(trials):
        settings['Maximum Stimulus'] = len(trials)
    if int(settings['Maximum Step Size']) > len(trials):
        settings['Maximum Step Size'] = len(trials)
    if int(settings['First Comparison']) > len(trials):
        settings['First Comparison'] = len(trials)

    pest_dict = {
        'Expected Correct Response': 0,
        'User Correct': True,
        'Number Correct': 0,
        'Expected Number Correct': 0,
        'Reversals': 0,
        'Wald': 0.5,
        'Change Level': 0,
        'Next Direction': -1,
        'Step Size': int(settings['Maximum Step Size']) / 2,
        'Next Level': int(settings['First Comparison']),
        'Number of Trials at Stimulus Level': 0,
        'Double': False,
        'Same Direction': 0,
        'Doubled at Last Reversal': False
    }

    # Various pieces for presentation
    if settings['Response Box']:
        response_input = [
            'LEFT button', 'RIGHT button',
            'red stop button in the PsychoPy Coder window'
        ]
    else:
        response_input = ['LEFT arrow key', 'RIGHT arrow key', 'Esc key']

    no_response = visual.TextStim(
        win, 'You did not respond quickly enough.'
        '\n\nPress the ' + response_input[0] + ' or ' + response_input[1] +
        ' to choose which syllable pair had two unique syllables.')

    welcome_str = [
        'Welcome! In this experiment, you will hear two '
        'pairs of syllables presented consecutively. You will choose '
        'which pair sounds like two *different* syllables.\n\n\n\n\n\n\n'
        'Press any key to continue.\n',
        'You will use the {} for {} and the {} for {}. You will see a black border '
        'around your choice (see Pair 1 below).\n\n\n\n\n\n\n'
        'Press any key to continue.\n'.format(response_input[0], 'Pair 1',
                                              response_input[1], 'Pair 2'),
        'The speaker for the correct option will display sound waves as shown below for Pair 2.'
        '\n\n\n\n\n\n\n\nPress any key to continue.\n',
        'You may exit the experiment during any trial by pressing the {}.\n\n\n\n\n\n\n'
        'Press any key to begin the experiment.\n'.format(response_input[2])
    ]
    welcome_img = [[option1, option2], [option1_border, option2],
                   [option1, option2_waves], [option1, option2]]

    goodbyestr = visual.TextStim(win,
                                 text='You made it to the end!\nGreat work!',
                                 pos=(0, 5.0))
    goodbyeimg = visual.ImageStim(win, image='img/greatjob.png', pos=(0, -3.0))
    goodbyeimg.size /= 2

    fixation = visual.ImageStim(win, image='img/get-ready.jpg')

    cheertext = [
        'Awesome job! Keep up the good work!', 'You are doing great!',
        'Keep going! You can do it!', 'You are a great listener!'
    ]
    cheerimg = [
        'img/girl-waving.png', 'img/girl-riding-a-scooter.png',
        'img/girl-jumping-rope.png', 'img/girl-holding-magnifying-glass.png'
    ]
    cheerstim = []
    for txt, img in list(zip(cheertext, cheerimg)):
        cheerstim.append([
            visual.TextStim(win, text=txt, pos=(0, 5.0)),
            visual.ImageStim(win, image=img, pos=(0, -3.0))
        ])
    '''OUTPUT FILE
    This is where the output file is initially created using the
    experiment name and participation number. A header row is created 
    to start the file.
    '''
    datafile = os.path.join(outdir,
                            "{}_{}_results.csv".format(experiment, ppn))

    print('Initiating experiment...')
    '''EXPERIMENT
    This is where the actual experiment begins. First, instructions are 
    provided to the user. Then the program loops through the list of 
    trials, following timed sections as detailed in the comments below.
    '''
    with open(datafile, 'w', newline='') as datafile:
        fieldnames = [
            'Trial Number', 'Level', 'Base Filename', 'Comparison Filename',
            'Correct Response', 'User Response', 'Reaction Time', 'Filename',
            'Expected Correct Response', 'User Correct', 'Number Correct',
            'Expected Number Correct', 'Reversals', 'Wald', 'Change Level',
            'Next Direction', 'Step Size', 'Next Level',
            'Number of Trials at Stimulus Level', 'Double', 'Same Direction',
            'Doubled at Last Reversal'
        ]
        writer = csv.DictWriter(datafile, fieldnames=fieldnames)
        writer.writeheader()

        display_welcome(0)
        display_welcome(1)
        if feedback == True:
            display_welcome(2)
        display_welcome(3)

        startTime = clock.getTime()  # clock is in seconds
        trialnum = 1
        key = []
        comparison = int(pest_dict['Next Level'])
        cheernum = 0

        mouse = event.Mouse(win=win)

        while trialnum <= int(settings['Maximum Trials']):
            i = comparison - 1
            presentation = random.choice(
                [0, 1])  #Choose whether to use AAAB or ABAA
            trial = trials[i]
            times = {}
            correctresult = False

            # Cue: Present fixation image
            display_stimulus([fixation, option1_waves, option2_waves],
                             'fixationTime', settings['Cue Interval'])

            # Delay: Wait before presenting stimulus
            display_stimulus([option1, option2], 'blankTime',
                             settings['Delay Interval'])

            # Stimulus/Response: Present stimulus, wait
            # for a response of left, right, or Esc key / mouse button click
            if settings['Response Box']:
                display_stimulus([option1, option2],
                                 'stimTime',
                                 sound=trial['soundstim'][presentation])
                display_stimulus([option1_waves, option2], waittime=0.790)
                display_stimulus([option1, option2], waittime=0.500)
                display_stimulus([option1, option2_waves], waittime=0.790)
                wait = settings['Response Interval']
                timer = core.Clock()
                clicked = False
                mouse.clickReset()
                key = []
                while not clicked and timer.getTime() < wait:
                    display_stimulus([option1, option2])
                    button = mouse.getPressed()
                    if sum(button):
                        if button[0] == 1:
                            key.append('left')
                        elif button[2] == 1:
                            key.append('right')
                        else:
                            key.append('escape')
                        clicked = True

            else:
                display_stimulus([option1, option2],
                                 'stimTime',
                                 sound=trial['soundstim'][presentation])
                display_stimulus([option1_waves, option2], waittime=0.790)
                display_stimulus([option1, option2], waittime=0.500)
                display_stimulus([option1, option2_waves], waittime=0.790)
                display_stimulus([option1, option2])
                key = event.waitKeys(settings['Response Interval'],
                                     ['left', 'right', 'escape'])
            times['responseTime'] = clock.getTime()

            # Anti-Startle: Wait for 0 ms
            if key:
                if key[0] == 'left':
                    option1_response = option1_border
                    option2_response = option2
                elif key[0] == 'right':
                    option2_response = option2_border
                    option1_response = option1
                else:
                    option1_response = option1
                    option2_response = option2
            else:
                option1_response = option1
                option2_response = option2
            display_stimulus([option1_response, option2_response],
                             'antistartleTime',
                             settings['Anti-Startle Interval'])
            core.wait(0.250)

            # Feedback: Display feedback for 250 ms
            display_stimulus([option1, option2],
                             'feedbackTime',
                             settings['Feedback Interval'],
                             feedback=feedback)

            # Collect data
            selection = ''
            if key:
                # Record selection
                if key[0] == 'left':
                    selection = '1'
                elif key[0] == 'right':
                    selection = '2'
                else:
                    selection = 'escape'
            else:
                selection = 'no response'
                display_stimulus([no_response], waittime=5.000)

            if trial['CorrectResponse'][presentation] == selection:
                correctresult = True

            ## PEST STUFF
            pest_result = pest(
                trial['CorrectResponse'][presentation], correctresult,
                pest_dict['Number Correct'],
                pest_dict['Expected Number Correct'], pest_dict['Double'],
                pest_dict['Wald'], pest_dict['Next Direction'],
                pest_dict['Change Level'], pest_dict['Same Direction'],
                int(pest_dict['Next Level']), pest_dict['Reversals'],
                pest_dict['Number of Trials at Stimulus Level'],
                int(settings['Maximum Step Size']),
                float(settings['Minimum Step Size']), pest_dict['Step Size'],
                int(settings['Maximum Stimulus']),
                int(settings['Minimum Stimulus']),
                pest_dict['Doubled at Last Reversal'], trialnum)

            resultdata = {
                'Trial Number': trialnum,
                'Level': trial['Level'][presentation],
                'Base Filename': trial['basefile'],
                'Comparison Filename': trial['compfile'],
                'Correct Response': trial['CorrectResponse'][presentation],
                'User Response': selection,
                'Reaction Time': times['responseTime'] - times['stimTime'],
                'Filename': trial['Filename'][presentation]
            }
            resultdata.update(pest_result)

            # Write result to data file
            writer.writerow(resultdata)

            comparison = int(pest_result['Next Level'])
            if pest_result['Change Level'] == 2:
                complete_results('converged')
                break

            if selection == 'escape':
                complete_results('escaped')
                break

            # Inter-trial Interval: Wait for 1000 ms
            option1.color = 'white'
            option2.color = 'white'
            display_stimulus([option1, option2], 'intertrialTime',
                             settings['Inter-trial Interval'])

            # Move on to the next trial
            pest_dict = pest_result
            if trialnum < int(settings['Maximum Trials']):
                if (trialnum % 20) == 0:
                    if cheernum < 4:
                        display_stimulus(cheerstim[cheernum], waittime=3.000)
                        cheernum += 1
                    else:
                        display_stimulus(cheerstim[cheernum], waittime=3.000)
                        cheernum = 0
                trialnum += 1
            else:
                complete_results('no convergence')
                break

    # Show goodbye screen
    print('Results saved to {}'.format(
        os.path.join(outdir, ''.join([experiment, '_', ppn, '_results.csv']))))
    my.showText(win, "You finished the experiment! Great work!")
    display_stimulus([goodbyestr, goodbyeimg])
    shutil.rmtree(os.path.join(settings['Stimulus Directory'], 'tmp'))
    core.wait(2.000)

    ## Closing Section
    win.close()
    core.quit()
Esempio n. 14
0
    grating.setSF(SF)
    cyc_secs = 1 / TF
    resp.keys = []
    resp.rt = []
    # keep track of which components have finished
    trialComponents = [fixation, grating, resp]
    for thisComponent in trialComponents:
        thisComponent.tStart = None
        thisComponent.tStop = None
        thisComponent.tStartRefresh = None
        thisComponent.tStopRefresh = None
        if hasattr(thisComponent, 'status'):
            thisComponent.status = NOT_STARTED
    # reset timers
    t = 0
    start_time = clock.getTime()
    _timeToFirstFrame = win.getFutureFlipTime(clock="now")
    trialClock.reset(-_timeToFirstFrame)  # t0 is time of first possible flip
    frameN = -1
    continueRoutine = True

    # -------Run Routine "trial"-------
    while continueRoutine:
        # get current time
        t = trialClock.getTime()
        tThisFlip = win.getFutureFlipTime(clock=trialClock)
        tThisFlipGlobal = win.getFutureFlipTime(clock=None)
        frameN = frameN + 1  # number of completed frames (so 0 is the first frame)
        # update/draw components on each frame

        # *fixation* updates
import pandas as pd
logging.console.setLevel(logging.DEBUG)
import time
import numpy as np

NUMBER_OF_ITERATION = 100

reaction_times = []
#leftoutcomes= []
#rightoutcomes= []
feedbacks = []
pressed_lefts = []
pos_right = [200, -200]
pos_left = [-200, -200]

rt = clock.getTime()
# take reaction times

# upload conditions
left_outcome_reward = []
right_outcome_reward = []
left_outcome_punish = []
right_outcome_punish = []
print('upload conditions....')
dataFile_reward = pd.read_excel(
    r'C:\Users\ic18563\OneDrive - University of Bristol\python different\python start\Experimental - Copy\sensitivity_reward_condition_exc.xlsx'
)  #, skiprows=1, nrows= 100, use_cols = "A:B")  TAKE JUST THE 100 rows with data
dataFile_punishment = pd.read_excel(
    r'C:\Users\ic18563\OneDrive - University of Bristol\python different\python start\Experimental - Copy\sensitivity_punishment_condition_exc.xlsx'
)  #, skiprows=1, nrows= 100, use_cols = "A:B")  TAKE JUST THE 100 rows with data
    def start(self):
        for thisComponent in self.components:
            if hasattr(thisComponent, 'status'): thisComponent.status = NOT_STARTED

        t = 0
        clock = core.Clock()
        clock.reset()
        frameN = -1
        continueRoutine = True
        endExpNow = False
        while continueRoutine:
            
            # If this routine has a timer and the timer expired, end the routine
            if self.has_timer and self.routine_timer.getTime() <= 0:
                break
            
            # Current time and frame
            t = clock.getTime()
            frameN = frameN + 1
            
            current_pupil_mean = self.class_live_pupil.get_pupil_mean()
            scaling_val = 35
            self.stim_live_pupil.radius = current_pupil_mean * scaling_val
            
            # Draw stimulis
            for stimuli in self.stimulis:
                if t >= 0.0 and stimuli.status == NOT_STARTED:
                    stimuli.tStart = t
                    stimuli.frameNStart = frameN
                    stimuli.setAutoDraw(True)
                    
            # Key Responses
            if len(self.key_list) > 0:
                if t >= 0.0 and self.key_event.status == NOT_STARTED:
                    self.key_event.tStart = t
                    self.key_event.frameNStart = frameN
                    self.key_event.status = STARTED
                    
                    # keyboard checking is just starting
                    event.clearEvents(eventType='keyboard')
                    
                if self.key_event.status == STARTED:
                    theseKeys = event.getKeys(keyList=self.key_list)
                    
                    # check for quit:
                    if "escape" in theseKeys: endExpNow = True
                    if len(theseKeys) > 0:  # at least one key was pressed
                        continueRoutine = False
            
            # check if all components have finished
            if not continueRoutine: break
            
            continueRoutine = False
            
            for component in self.components:
                if hasattr(component, "status") and component.status != FINISHED:
                    continueRoutine = True
                    break
            
            # check for quit (the Esc key)
            if endExpNow or event.getKeys(keyList=["escape"]): core.quit()
            
            # refresh the screen
            if continueRoutine:
                self.win.flip()
                self.win2.flip()
                
        # end while

        # Stop drawing the stimulus
        for component in self.components:
            if hasattr(component, "setAutoDraw"): component.setAutoDraw(False)
            
Esempio n. 17
0
        sR = sR.split(',')
        if len(sR) == 5 and sR[0] == 'v':
            if int(sR[2]) == 999:
                runSession = 0
                sR[2] = 0
            g1['contrast'] = int(sR[2]) / 100
            g1['orientation'] = int(sR[1])
            g1['spFreq'] = int(sR[3]) / 100
            serTrack = 1

            grating1.contrast = g1['contrast']
            grating1.ori = g1['orientation']
            grating1.size = g1['size']

    if startFlag == 0:
        timeOffset = clock.getTime()
        startFlag = 1
    grating1.phase += g1['phaseDelta']
    curTime = clock.getTime() - timeOffset
    # grating1.draw()

    mywin.flip()
    lc = lc + 1

    # save
    exp.addData('clockTime', curTime)
    exp.addData('g1_phase', g1['Xpos'])
    exp.addData('g1_spFreq', g1['spFreq'])
    exp.addData('g1_size', g1['size'])
    exp.addData('g1_contrast', g1['contrast'])
    exp.addData('serTrack', serTrack)
            exec('{} = thisBlock_Loop[paramName]'.format(paramName))

    # ------Prepare to start Routine "Block_Initializer"-------
    t = 0
    Block_InitializerClock.reset()  # clock
    frameN = -1
    continueRoutine = True
    # update component parameters for each repeat
    goarray = ['GO'] * NGo_Trials
    stoparray = ['STOP'] * NSSD_Trials
    trialarray = goarray + stoparray
    shuffle(trialarray)
    totaltrialnumber = len(trialarray)
    trialnum = 0

    thisExp.addData('clocktime', clock.getTime())
    BlockInstructions.setText(Block_Instructions)
    # keep track of which components have finished
    Block_InitializerComponents = [BlockInstructions, Get_Ready]
    for thisComponent in Block_InitializerComponents:
        if hasattr(thisComponent, 'status'):
            thisComponent.status = NOT_STARTED

    # -------Start Routine "Block_Initializer"-------
    while continueRoutine:
        # get current time
        t = Block_InitializerClock.getTime()
        frameN = frameN + 1  # number of completed frames (so 0 is the first frame)
        # update/draw components on each frame

        # *BlockInstructions* updates
Esempio n. 19
0
def movSinGrat(MovSinGrat_SpatFreqVal, MovSinGrat_tempFreqVal,
               MovSinGrat_t_before, MovSinGrat_t_During, MovSinGrat_t_after,
               Synch, MovSinGrat_Motionmode, MovSinGrat_features,
               MovSinGrat_ledstate):
    ''' 
    INPUT: paramterS that are potentially changed by user in front.py by clicking buttons on the UI
    
    OUTPUT: vs stimuli for tuning, depending on the feature selected; default is 'ori' which can be changed in init_para.py;
    
    Note >> TUNING FEATURE VALUES are defined as follows:  # ori = 0, spat_freq = 1, temp_freq = 2, contrast = 3, location = 4

    
    '''

    from psychopy import visual, event, clock, gui
    from win32api import GetSystemMetrics
    from datetime import datetime

    from init_para import (
        MovSinGrat_addblank, MovSinGrat_Amp_sinu, MovSinGrat_controlmod,
        MovSinGrat_dirindex, MovSinGrat_ori, MovSinGrat_t_triginit,
        MovSinGrat_GammaFactor, MovSinGrat_AmpFactor, MovSinGrat_contrast,
        MovSinGrat_MeanLum, win, winWidth, winHeight, ScrnNum, PixelSize,
        winWidthofEachDisp, DisplayFrameWidth, FR, square1, square2, mask_L,
        mask_R, fontSize, fontClr, win, Local_IP, Local_Port, Remote_IP,
        Remote_Port, ani_distance, MovSinGrat_Rep, MovSinGrat_randomseq,
        MovSinGrat_features_dict, MovSinGrat_angles_list,
        MovSinGrat_temp_lin_list, MovSinGrat_temp_osc_list,
        MovSinGrat_location_list, MovSinGrat_contrast_list)

    import socket
    import numpy as np
    import conv

    #To display vs on a single screen set one_screen = True:
    one_screen = True

    #creating mouse functionality
    mouse = event.Mouse(visible=True, win=win)
    if Synch:

        #creating the socket in which communications will take place
        sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

        #binding the local IP address and local port
        sock.bind((Local_IP, Local_Port))

        #creating textbox showing that this VS computer is waiting for UDP signal
        standBy = visual.TextBox(
            window=win,
            text=("Waiting for starting the control computer."),
            font_size=fontSize,
            font_color=fontClr,
            pos=(-2690, 475),
            size=(300, 37),
            units='pix',
            grid_horz_justification='center',
            grid_vert_justification='center')

        standBy.draw()
        square1.draw(
        )  #have to draw trigger squ; otherwise transient white will happen
        square2.draw()
        win.flip()

        try:
            #wait for the command 'gammacorrection'
            info = sock.recv(1024)
        except Exception:
            sock.close()
            print("Did not recieve info, connection timeout.")
            return

        #sending 'gammafloatampfloat' to the second PC
        sock.sendto(("gamma" + str(drumgrating_GammaFactor) + "amp" +
                     str(drumgrating_AmpFactor)), (Remote_IP, Remote_Port))

        #creating textbox showing that this VS computer is waiting for UDP signal
        standBy = visual.TextBox(window=win,
                                 text=("Control Computer is Ready."),
                                 font_size=fontSize,
                                 font_color=fontClr,
                                 pos=(-2690, 475),
                                 size=(300, 37),
                                 units='pix',
                                 grid_horz_justification='center',
                                 grid_vert_justification='center')
        standBy.draw()

        try:
            #waiting for the signal autoVs
            drumgrating_controlmod = sock.recv(1024)
        except Exception:
            sock.close()
            print(
                "Did not recieve drumgrating_controlmod, connection timeout.")
            return

        #sending 'Wait for parameters' to the second PC
        sock.sendto("Wait for parameters", (Remote_IP, Remote_Port))

        if MovSinGrat_controlmod == 'autoVS':

            try:
                drumgrating_parasize = sock.recv(1024)
            except Exception:
                sock.close()
                print("Did not recieve parasize, connection timeout.")
                return

            #sending a completion transcript
            sock.sendto("read parasize", (Remote_IP, Remote_Port))

            #converting the string recieved into int
            drumgrating_parasize = conv.deleteParasize(drumgrating_parasize)

            #making the array in which the parameters will be added to
            paras = np.empty(shape=[drumgrating_parasize, 9])

            #adding the parameters to the array

            #this for loop receives the 9 parameters for all the stimulations and adds them to an array
            for i in range(
                    drumgrating_parasize):  #start from 0 to parasize[0] - 1
                temp = sock.recv(1024)
                temp = conv.convStr2Dig(temp)
                #adding the parameters to the array (temp) at position index
                #paras[i, :] = temp

            sock.sendto("Para DONE", (Remote_IP, Remote_Port))

            try:
                #recieving all orientation for stimuli 1 for veritcal, 0 for horizontal
                paratemp = sock.recv(1024)
            except Exception:
                sock.close()
                print("Did not recieve message, connection timeout.")
                return

            paratemp = conv.convStr2Dig(paratemp)

            #setting up the parameters based on what was send in the paras variable
            drumgrating_Ori = int(paratemp[0])
            Motionmode = int(paratemp[1])
            drumgrating_Amp_sinu = paratemp[2]
            drumgrating_addblank = paratemp[3]

            sock.sendto("Para DONE", (Remote_IP, Remote_Port))

            #creating generalized sequence of randomely shuffled stimuli for tuning, given a particular feature

            #This firt if loop will create two varialbes:
            #tuning_stim_val and tuning_stim_ind which contains all unique stimulus and corresponding index, respectively, given the tuning feature selected.
            if MovSinGrat_features == 0:  #ori

                tuning_stim_val = map(
                    float, MovSinGrat_angles_list
                )  #map applies the float() function to all elements of the list, therefore converting strings to float
                tuning_stim_ind = range(len(tuning_stim_val))

            elif MovSinGrat_features == 1:  #spat_freq

                tuning_stim_val = map(float, MovSinGrat_spat_list)
                tuning_stim_ind = range(len(tuning_stim_val))

            elif MovSinGrat_features == 2:  #temp_freq

                #assign temp freq depending on Motionmode (lin vs osc motion)
                if MovSinGrat_Motionmode == 0:
                    tuning_stim_val = map(
                        float, MovSinGrat_temp_lin_list
                    )  #SHOULD THIS BE DEPENDENT ON MOTIONMODE???
                    tuning_stim_ind = range(len(tuning_stim_val))
                else:
                    tuning_stim_val = map(
                        float, MovSinGrat_temp_osc_list
                    )  #SHOULD THIS BE DEPENDENT ON MOTIONMODE???
                    tuning_stim_ind = range(len(tuning_stim_val))

            elif MovSinGrat_features == 3:  #contrast
                tuning_stim_val = MovSinGrat_contrast_list  #HAS NOT BEEN CREATED IN INIT_PARA YET;
                tuning_stim_ind = range(len(tuning_stim_val))

            elif MovSinGrat_features == 4:  #location

                tuning_stim_val = map(float, MovSinGrat_location_list)
                tuning_stim_ind = range(len(tuning_stim_val))

            else:
                print 'ERROR: MovSinGrat_features outside range. Tuning Feature Value must an integer be between 0 and 4.'

            #calculating total number of stimuli that will be presented (based on number of repetitions for each stimulus*ledstate combo)
            tot_num_stim = MovSinGrat_Rep * MovSinGrat_ledstate * len(
                tuning_stim_ind)

            #defining the variable and array shape in which the parameters will be added to; each column will represent on parameter (spat_freq, temp_freq, stimId etc.) for each presented stimulus (row)
            paras = np.empty(shape=[tot_num_stim, 12])

            #adding the parameters to the array
            # Generating sequence of order of presenting stimID that will ONLY change the TUNING FEATURE PARAMETER of the stimulus:
            for repind in xrange(MovSinGrat_Rep):

                stimId = np.empty(len(tuning_stim_ind) * MovSinGrat_ledstate)
                stimId = map(int, stimId)

                if MovSinGrat_randomseq:

                    for iled in xrange(
                            0, MovSinGrat_ledstate
                    ):  #for each ledstate, one of each angle will be assoign in random order
                        np.random.shuffle(tuning_stim_ind)

                        for n in xrange(
                                iled, len(stimId), MovSinGrat_ledstate
                        ):  #assign tuning_stim_ind elements to stimId by hops of size ledstate (if ledstate = 1), stimId = tuning_stim_ind;
                            stimId[n] = tuning_stim_ind[n /
                                                        MovSinGrat_ledstate]  #

                else:
                    for iled in xrange(0, MovSinGrat_ledstate):

                        for n in xrange(iled, len(stimId),
                                        MovSinGrat_ledstate):
                            stimId[n] = tuning_stim_ind[n /
                                                        MovSinGrat_ledstate]

                #adding the parameters as an array at index i
                for localstimid in xrange(0, (len(stimId))):

                    if MovSinGrat_features == 0:  #0 = ori
                        paras[repind * (len(stimId)) + localstimid, :] = [
                            MovSinGrat_SpatFreqVal, MovSinGrat_tempFreqVal,
                            MovSinGrat_contrast, MovSinGrat_MeanLum,
                            MovSinGrat_dirindex, MovSinGrat_t_before,
                            MovSinGrat_t_During, MovSinGrat_t_after,
                            MovSinGrat_t_triginit,
                            tuning_stim_val[stimId[localstimid]],
                            MovSinGrat_ledstate, 0
                        ]  # ADD LOCATION

                    elif MovSinGrat_features == 1:  #1 = spat
                        paras[repind * (len(stimId)) + localstimid, :] = [
                            tuning_stim_val[stimId[localstimid]],
                            MovSinGrat_tempFreqVal, MovSinGrat_contrast,
                            MovSinGrat_MeanLum, MovSinGrat_dirindex,
                            MovSinGrat_t_before, MovSinGrat_t_During,
                            MovSinGrat_t_after, MovSinGrat_t_triginit,
                            MovSinGrat_ori, MovSinGrat_ledstate, 0
                        ]  #ADD LOCATION

                    elif MovSinGrat_features == 2:  # and movSinGrat_motionMode == 0: #2 = TempFreq
                        paras[repind * (len(stimId)) + localstimid, :] = [
                            MovSinGrat_SpatFreqVal,
                            tuning_stim_val[stimId[localstimid]],
                            MovSinGrat_contrast, MovSinGrat_MeanLum,
                            MovSinGrat_dirindex, MovSinGrat_t_before,
                            MovSinGrat_t_During, MovSinGrat_t_after,
                            MovSinGrat_t_triginit, MovSinGrat_ori,
                            MovSinGrat_ledstate, 0
                        ]  #ADD LOCATION

                    elif MovSinGrat_features == 3:  #3 = contrast
                        paras[repind * (len(stimId)) + localstimid, :] = [
                            MovSinGrat_SpatFreqVal, MovSinGrat_tempFreqVal,
                            tuning_stim_val[stimId[localstimid]],
                            MovSinGrat_MeanLum, MovSinGrat_dirindex,
                            MovSinGrat_t_before, MovSinGrat_t_During,
                            MovSinGrat_t_after, MovSinGrat_t_triginit,
                            MovSinGrat_ori, MovSinGrat_ledstate, 0
                        ]  #ADD LOCATION

                    #elif MovSinGrat_features == 4: #4 = location
                    #    paras[repind*(len(stimId))+localstimid, :] = [MovSinGrat_SpatFreqVal, MovSinGrat_tempFreqVal, MovSinGrat_contrast, MovSinGrat_MeanLum, MovSinGrat_dirindex,
                    #    MovSinGrat_t_before, MovSinGrat_t_During, MovSinGrat_t_after, MovSinGrat_t_triginit, MovSinGrat_ori, MovSinGrat_ledstate, 0] #ADD LOCATION

            paratemp = [
                drumgrating_Ori, Motionmode, drumgrating_Amp_sinu,
                drumgrating_addblank
            ]

            #setting up the parameters based on what was send in the paras variable
            drumgrating_Ori = int(paratemp[0])
            Motionmode = int(paratemp[1])
            drumgrating_Amp_sinu = paratemp[2]
            drumgrating_addblank = paratemp[3]

        elif MovSinGrat_controlmod == 'manualVS':
            return

    #if Synch is False, this else condition will make the parameters in the same format as if Synch was True
    else:

        #Naming the experiment to create fileName (at the end of this function)
        instruction_text = visual.TextStim(
            win,
            text=u'Name experiment and press enter to start.',
            pos=(0, 0.5))
        answer_text = visual.TextStim(win)

        #show instructions
        instruction_text.draw()
        square1.draw(
        )  #have to draw trigger squ; otherwise transient white will happen$$$$$$$$$$$$$$$
        square2.draw()
        win.flip()

        #get users input for experiment name
        now = True
        answer_text.text = ''
        while now:
            key = event.waitKeys()[0]
            # Add a new number
            if key in '1234567890abcdfeghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-':
                answer_text.text += key

            # Delete last character, if there are any chars at all
            elif key == 'backspace' and len(answer_text.text) > 0:
                answer_text.text = answer_text.text[:-1]

            # Stop collecting response and return it
            elif key == 'return':
                expName = answer_text.text
                print('expName IN here: ', expName)
                now = False

            # Show current answer state
            instruction_text.draw()
            answer_text.draw()
            square1.draw(
            )  #have to draw trigger squ; otherwise transient white will happen$$$$$$$$$$$$$
            square2.draw()
            win.flip()

        #setting name of file which will be used to save order of vs stim displayed; NAME = MVS (movSinGrat) + type of tuning feature manipulated in experiment + datetime
        feature = MovSinGrat_features_dict.keys()
        date = datetime.today().strftime(
            '%Y%m%d_%H%M%S')  #extract today's date
        fileName = expName + '_vs_' + feature[
            MovSinGrat_features] + '_' + date  #exp name defined above either by user (if not synch) or by eye tracking software (if user)

        #creating generalized sequence of randomely shuffled stimuli for tuning, given a particular feature

        #This firt if loop will create two varialbes:
        #tuning_stim_val and tuning_stim_ind which contains all unique stimulus and corresponding index, respectively, given the tuning feature selected.
        if MovSinGrat_features == 0:  #ori

            tuning_stim_val = map(
                float, MovSinGrat_angles_list
            )  #map applies the float() function to all elements of the list, therefore converting strings to float
            tuning_stim_ind = range(len(tuning_stim_val))

        elif MovSinGrat_features == 1:  #spat_freq

            tuning_stim_val = map(float, MovSinGrat_spat_list)
            tuning_stim_ind = range(len(tuning_stim_val))

        elif MovSinGrat_features == 2:  #temp_freq

            #assign temp freq depending on Motionmode (lin vs osc motion)
            if MovSinGrat_Motionmode == 0:
                tuning_stim_val = map(float, MovSinGrat_temp_lin_list)
                tuning_stim_ind = range(len(tuning_stim_val))
            else:
                tuning_stim_val = map(float, MovSinGrat_temp_osc_list)
                tuning_stim_ind = range(len(tuning_stim_val))

        elif MovSinGrat_features == 3:  #contrast

            tuning_stim_val = MovSinGrat_contrast_list
            tuning_stim_ind = range(len(tuning_stim_val))

        elif MovSinGrat_features == 4:  #location

            tuning_stim_val = map(float, MovSinGrat_location_list)
            tuning_stim_ind = range(len(tuning_stim_val))

        else:
            print 'ERROR: MovSinGrat_features outside range. Tuning Feature Value must an integer be between 0 and 4.'

        #calculating total number of stimuli that will be presented (based on number of repetitions for each stimulus*ledstate combo)
        tot_num_stim = MovSinGrat_Rep * MovSinGrat_ledstate * len(
            tuning_stim_ind)

        #defining the variable and array shape in which the parameters will be added to; each column will represent on parameter (spat_freq, temp_freq, stimId etc.) for each presented stimulus (row)
        paras = np.empty(shape=[tot_num_stim, 12])

        #adding the parameters to the array
        # Generating sequence of order of presenting stimID that will ONLY change the TUNING FEATURE PARAMETER of the stimulus:
        for repind in xrange(MovSinGrat_Rep):

            stimId = np.empty(len(tuning_stim_ind) * MovSinGrat_ledstate)
            stimId = map(int, stimId)

            if MovSinGrat_randomseq:

                for iled in xrange(
                        0, MovSinGrat_ledstate
                ):  #for each ledstate, one of each angle will be assoign in random order
                    np.random.shuffle(tuning_stim_ind)

                    for n in xrange(
                            iled, len(stimId), MovSinGrat_ledstate
                    ):  #assign tuning_stim_ind elements to stimId by hops of size ledstate (if ledstate = 1), stimId = tuning_stim_ind;
                        stimId[n] = tuning_stim_ind[n / MovSinGrat_ledstate]

            else:
                for iled in xrange(0, MovSinGrat_ledstate):

                    for n in xrange(iled, len(stimId), MovSinGrat_ledstate):
                        stimId[n] = tuning_stim_ind[n / MovSinGrat_ledstate]

            #adding the parameters as an array at index i
            for localstimid in xrange(0, (len(stimId))):

                if MovSinGrat_features == 0:  #0 = ori
                    paras[repind * (len(stimId)) + localstimid, :] = [
                        MovSinGrat_SpatFreqVal, MovSinGrat_tempFreqVal,
                        MovSinGrat_contrast, MovSinGrat_MeanLum,
                        MovSinGrat_dirindex, MovSinGrat_t_before,
                        MovSinGrat_t_During, MovSinGrat_t_after,
                        MovSinGrat_t_triginit,
                        tuning_stim_val[stimId[localstimid]],
                        MovSinGrat_ledstate, 0
                    ]  # ADD LOCATION

                elif MovSinGrat_features == 1:  #1 = spat
                    paras[repind * (len(stimId)) + localstimid, :] = [
                        tuning_stim_val[stimId[localstimid]],
                        MovSinGrat_tempFreqVal, MovSinGrat_contrast,
                        MovSinGrat_MeanLum, MovSinGrat_dirindex,
                        MovSinGrat_t_before, MovSinGrat_t_During,
                        MovSinGrat_t_after, MovSinGrat_t_triginit,
                        MovSinGrat_ori, MovSinGrat_ledstate, 0
                    ]  #ADD LOCATION

                elif MovSinGrat_features == 2:  # and movSinGrat_motionMode == 0: #2 = TempFreq
                    paras[repind * (len(stimId)) + localstimid, :] = [
                        MovSinGrat_SpatFreqVal,
                        tuning_stim_val[stimId[localstimid]],
                        MovSinGrat_contrast, MovSinGrat_MeanLum,
                        MovSinGrat_dirindex, MovSinGrat_t_before,
                        MovSinGrat_t_During, MovSinGrat_t_after,
                        MovSinGrat_t_triginit, MovSinGrat_ori,
                        MovSinGrat_ledstate, 0
                    ]  #ADD LOCATION

                elif MovSinGrat_features == 4:  #4 = contrast
                    paras[repind * (len(stimId)) + localstimid, :] = [
                        MovSinGrat_SpatFreqVal, MovSinGrat_tempFreqVal,
                        tuning_stim_val[stimId[localstimid]],
                        MovSinGrat_MeanLum, MovSinGrat_dirindex,
                        MovSinGrat_t_before, MovSinGrat_t_During,
                        MovSinGrat_t_after, MovSinGrat_t_triginit,
                        MovSinGrat_ori, MovSinGrat_ledstate, 0
                    ]  #ADD LOCATION

                #elif MovSinGrat_features == 5: #5 = location
                #    paras[repind*(len(stimId))+localstimid, :] = [MovSinGrat_SpatFreqVal, MovSinGrat_tempFreqVal, MovSinGrat_contrast, MovSinGrat_MeanLum, MovSinGrat_dirindex,
                #    MovSinGrat_t_before, MovSinGrat_t_During, MovSinGrat_t_after, MovSinGrat_t_triginit, MovSinGrat_ori, MovSinGrat_ledstate, 0] #ADD LOCATION

        #paratemp = [drumgrating_Ori, Motionmode, drumgrating_Amp_sinu, drumgrating_addblank]

        #setting up the parameters based on what was send in the paras variable
        #drumgrating_Ori = int(paratemp[0])
        #Motionmode = int(paratemp[1])
        #drumgrating_Amp_sinu = paratemp[2]
        #drumgrating_addblank = paratemp[3]

    if Synch:

        #waiting for "STR"
        while True:
            try:
                info = sock.recv(1024)
            except:
                pass
            if info == "STR":
                sock.sendto(("VS is running"), (Remote_IP, Remote_Port))
                break
            if mouse.getPressed()[1]:
                sock.close()
                return

    #generating the pixel angles relaive to the mouse position based on the orientation of the stimulus
    #generating matrix that will be the place holder for every pixel
    pixelangle = np.empty(shape=[1, winWidth
                                 ])  #pixel has to be 2D since the image is 2D
    temp = np.array(range(winWidthofEachDisp))
    temp.reshape(1, winWidthofEachDisp)  # the temp must be 2D
    #tempPixelAngle = np.degrees(np.arctan((temp - (winWidthofEachDisp/2.0))*PixelSize*(2.0/DisplayFrameWidth))) + 45 #calculating the pixel angle for first monitor
    spatangperpix = np.degrees(np.arctan(PixelSize / ani_distance))
    tempPixelAngle = spatangperpix * temp

    for i in range(ScrnNum):
        pixelangle[:, i * winWidthofEachDisp:(
            i + 1
        ) * winWidthofEachDisp] = tempPixelAngle + 90 * i  #taking specific ranges within the full winWidth and replacing the values with the corresponding angles

    #Genereating the VS based on the parameters in paras
    for m in xrange(tot_num_stim):

        paras[m, 11] = 1  #marks which stim have been presented to the animal

        tic = clock.getTime()

        if m == 0:
            SpatFreqDeg = paras[m, 0]
            TempFreq = paras[m, 1]
            contrast = paras[m, 2]
            MeanLum = paras[m, 3]
            dirindex = paras[m, 4]
            t_before = paras[m, 5]
            t_During = paras[m, 6]
            t_after = paras[m, 7]
            t_triginit = paras[m, 8]
            orientation = paras[m, 9]
            ledstate = paras[m, 10]

            pixelformeanlum = 2 * (np.exp(
                np.log(MovSinGrat_MeanLum / MovSinGrat_AmpFactor) /
                MovSinGrat_GammaFactor) / 255.0) - 1
            MovSinGrat_gray = MovSinGrat_MeanLum
            inc = MovSinGrat_gray * MovSinGrat_contrast

            #frames to be calculated per period
            frames = round(FR / TempFreq)

            phase = np.array(range(int(frames)))

            if MovSinGrat_Motionmode == 1:
                phase = (phase / float(round(frames))) * (2.0 * np.pi)

            elif MovSinGrat_Motionmode == 0:
                phase = MovSinGrat_Amp_sinu * np.sin(
                    (phase / frames) * 2 * np.pi) * SpatFreqDeg * 2 * np.pi

            #generating the pixel values for the stimulus

            #creating the list that will hold all frames
            texdata1D = []

            #generating the pixel values for vertical stimulus
            for i in range(int(frames)):
                texdata1DTmp = np.exp(
                    np.log((MovSinGrat_gray +
                            inc * np.sin(pixelangle * SpatFreqDeg * 2 * np.pi +
                                         phase[i])) / MovSinGrat_AmpFactor) /
                    MovSinGrat_GammaFactor)
                pixVal = 2 * (
                    texdata1DTmp /
                    255) - 1  #converting the pixel values from 0:255 to -1:1
                texdata1D.append(pixVal)

        else:
            if sum(abs(paras[m, :] - paras[m - 1, :])) > 1e-7:
                #if (not all([v == 0  for v in abs(paras[m, :] - paras[m-1, :])])):

                SpatFreqDeg = paras[m, 0]
                TempFreq = paras[m, 1]
                MovSinGrat_contrast = paras[m, 2]
                MovSinGrat_MeanLum = paras[m, 3]
                MovSinGrat_dirindex = paras[m, 4]
                t_before = paras[m, 5]
                t_During = paras[m, 6]
                t_afterVal = paras[m, 7]
                MovSinGrat_t_triginit = paras[m, 8]
                orientation = paras[m, 9]
                ledstate = paras[m, 10]

                pixelformeanlum = 2 * (np.exp(
                    np.log(MovSinGrat_MeanLum / MovSinGrat_AmpFactor) /
                    MovSinGrat_GammaFactor) / 255.0) - 1
                MovSinGrat_gray = MovSinGrat_MeanLum
                inc = MovSinGrat_gray * MovSinGrat_contrast

                #frames to be calculated per period
                frames = round(FR / TempFreq)

            phase = np.array(range(int(frames)))

            if MovSinGrat_Motionmode == 1:
                phase = (phase / float(round(frames))) * (2.0 * np.pi)

            elif MovSinGrat_Motionmode == 0:
                phase = MovSinGrat_Amp_sinu * np.sin(
                    (phase / frames) * 2 * np.pi) * SpatFreqDeg * 2 * np.pi

            #generating the pixel values for the stimulus

            #creating the list that will hold all frames
            texdata1D = []

            #generating the pixel values for vertical stimulus
            for i in range(int(frames)):
                texdata1DTmp = np.exp(
                    np.log((MovSinGrat_gray +
                            inc * np.sin(pixelangle * SpatFreqDeg * 2 * np.pi +
                                         phase[i])) / MovSinGrat_AmpFactor) /
                    MovSinGrat_GammaFactor)
                pixVal = 2 * (
                    texdata1DTmp /
                    255) - 1  #converting the pixel values from 0:255 to -1:1
                texdata1D.append(pixVal)

        #creating the looping variable for the simulation depending on the value of MovSinGrat_addblank
        if MovSinGrat_addblank == 0 or MovSinGrat_addblank == 1:
            #this variable controls the looping and frame that is to be displayed
            frmNum = 0  #frame number within one cycle

        elif MovSinGrat_addblank == 2 and m == 0:
            #this variable controls the looping and frame that is to be displayed
            frmNum = 0  #frame number within one cycle

        #setting up the grating
        DrawTexture = visual.GratingStim(win=win,
                                         size=[2 * winWidth, 2 * winWidth],
                                         units='pix',
                                         tex=texdata1D[0],
                                         ori=orientation)

        if Synch:

            #waiting for "TRLstart", if TRLstart is sent this loop will send "TRLstart m" then break
            sock.settimeout(0.5)
            comm = [""]
            while True:
                try:
                    comm = sock.recvfrom(1024)
                except Exception:
                    pass

                if comm[0] == "TRLstart":
                    sock.sendto(("TRLstart " + str(m + 1)),
                                (Remote_IP, Remote_Port))
                    break

                elif comm[
                        0] == "ESC1":  #if 'ESC1' is in the buffer, return to front
                    sock.close()
                    return

                if mouse.getPressed()[1]:
                    sock.close()
                    print("Exit at ESC1")
                    return

        if MovSinGrat_addblank == 1.0:
            win.color = pixelformeanlum

        elif MovSinGrat_addblank == 0.0:
            DrawTexture.draw()

        elif MovSinGrat_addblank == 2.0:
            DrawTexture.tex = texdata1D[frmNum]
            DrawTexture.draw()
            frmNum = frmNum + 1

            if frmNum >= len(texdata1D):
                frmNum = 0

        #mask R and L screen to display stim on front screen only
        if one_screen:
            mask_L.draw()
            mask_R.draw()

        square1.draw()
        square2.draw()
        win.flip()

        #time before the stimulation
        toc = clock.getTime() - tic

        while toc < (t_before / 1000.0):

            toc = clock.getTime() - tic

            if MovSinGrat_addblank == 2:

                #assigning the texture using the corrusponding frame
                DrawTexture.tex = texdata1D[frmNum]
                #this if statement is for existing the stimulation
                if mouse.getPressed()[1]:

                    if Synch:
                        sock.close()
                    return

                frmNum = frmNum + 1

                if frmNum >= len(texdata1D):
                    frmNum = 0

                DrawTexture.draw()

                #mask R and L screen to display stim on front screen only
                if one_screen:
                    mask_L.draw()
                    mask_R.draw()

                square1.draw()
                square2.draw()
                win.flip()

        #t_triger initial timing for triggerin the camera
        for i in range(int(FR * MovSinGrat_t_triginit / 1000.0)):
            if i < 3:
                square1.fillColor = [1, 1, 1]
                square2.fillColor = [-1, -1, -1]

            else:
                square1.fillColor = [-1, -1, -1]
                square2.fillColor = [-1, -1, -1]

            if MovSinGrat_addblank == 1.0:
                win.color = pixelformeanlum

            elif MovSinGrat_addblank == 0.0:
                DrawTexture.draw()

            elif MovSinGrat_addblank == 2:

                #assigning the texture using the corrusponding frame
                DrawTexture.tex = texdata1D[frmNum]

                frmNum = frmNum + 1

                if frmNum >= len(texdata1D):
                    frmNum = 0

                DrawTexture.draw()

            if mouse.getPressed()[1]:
                if Synch:
                    sock.close()
                return

            #mask R and L screen to display stim on front screen only
            if one_screen:
                mask_L.draw()
                mask_R.draw()

            square1.draw()
            square2.draw()
            win.flip()

        #making the top square white
        square1.fillColor = [-1, -1, -1]
        square2.fillColor = [1, 1, 1]

        #drawing the frames on the window
        for frm in range(int(FR * t_During / 1000.0)):

            #assigning the texture using the corrusponding frame
            DrawTexture.tex = texdata1D[frmNum]
            #this if statement is for existing the stimulation
            if mouse.getPressed()[1]:

                if Synch:
                    sock.close()
                return

            frmNum = frmNum + 1

            if frmNum >= len(texdata1D):
                frmNum = 0

            DrawTexture.draw()

            #mask R and L screen to display stim on front screen only
            if one_screen:
                mask_L.draw()
                mask_R.draw()

            square1.draw()
            square2.draw()
            win.flip()

        #save vs data in .csv format

        #create a temp list variable that stores array values that will be appended
        save_row = paras[m].tolist()

        #open and append values to new file
        with open(fileName + '.csv', 'a') as f:

            for i in range(len(save_row)):

                f.write(str(save_row[i]) + ',')

            f.write('\n')

        if Synch:
            sock.sendto(("TRLdone " + str(m + 1)), (Remote_IP, Remote_Port))

        #changing the characteristics of the two squares at the bottom left corner
        square1.fillColor = [-1, -1, -1]
        square2.fillColor = [-1, -1, -1]

        #time after the stimulation
        for toc in range(int(t_after * FR / 1000.0)):

            if MovSinGrat_addblank == 1.0:
                win.color = pixelformeanlum

            elif MovSinGrat_addblank == 0.0:
                DrawTexture.draw()

            elif MovSinGrat_addblank == 2:

                #assigning the texture using the corrusponding frame
                DrawTexture.tex = texdata1D[frmNum]
                frmNum = frmNum + 1

                if frmNum >= len(texdata1D):
                    frmNum = 0

                DrawTexture.draw()

            #mask R and L screen to display stim on front screen only
            if one_screen:
                mask_L.draw()
                mask_R.draw()

            square1.draw()
            square2.draw()
            win.flip()

        if Synch:

            #checking for stop button
            while True:

                try:
                    comm = sock.recvfrom(1024)
                except:
                    pass

                if comm[0] == "ESC1":
                    sock.close()
                    return

                elif comm[0] == "ESC0":
                    break

                elif mouse.getPressed()[1]:
                    sock.close()
                    print("Exit at ESC2")
                    return
Esempio n. 20
0
                                phase=0,
                                color=[max_contr, max_contr, max_contr],
                                colorSpace='rgb',
                                opacity=1,
                                blendmode='avg',
                                texRes=128,
                                interpolate=True,
                                depth=0.0)

clock = core.Clock()

max_resp_secs = 5

# Run-time loop for 1 Hz grating, max contrast .8
keep_going = True
start_time = clock.getTime()

tf = 4  # Hz, so stim_dur is 1/freq_temp
cyc_secs = 1 / tf  # in seconds
#max_contr = .025
stim_dur_secs = 20 / expInfo['frameRate']
motion_dir = +1  # rightward

while keep_going:
    secs_from_start = (start_time - clock.getTime())
    pr_grating.phase = motion_dir * (
        secs_from_start / cyc_secs
    )  # Shift phase as a proportion of elapsed cycle duration
    pr_grating.draw()
    win.flip()
    # Contrast ramp in, hold, down
Esempio n. 21
0
def rf6x8(win):

    from psychopy import visual, event, clock, gui
    from win32api import GetSystemMetrics
    from datetime import datetime

    from init_para import (MovSinGrat_addblank, MovSinGrat_Amp_sinu, MovSinGrat_controlmod, MovSinGrat_dirindex, MovSinGrat_ori, 
    MovSinGrat_t_triginit, MovSinGrat_GammaFactor, MovSinGrat_AmpFactor, MovSinGrat_contrast, MovSinGrat_MeanLum,
    winWidth , winHeight, ScrnNum, PixelSize, winWidthofEachDisp, DisplayFrameWidth, FR, square1, square2, fontSize, fontClr, win, ani_distance)

    import socket
    import numpy as np
    import conv


    #PARAMETERS

    #stim time parameters
    t_before = 1000
    t_during = 3000
    t_after = 1000

    #contrast (stim colour) parameters
    meanLum = 55
    minLum = 0
    maxLum = meanLum * 2
    gammaFactor = 2.251
    ampFactor = 0.0007181
    
    winClr = 2*(np.exp(np.log(meanLum/ampFactor)/gammaFactor)/255.0) -1  #colour of background (in pix val)
    white = 2*(np.exp(np.log(maxLum/ampFactor)/gammaFactor)/255.0) -1 #convert maxLum into pix value
    black =  -1

    #stim order parameters
    repetition = 2
    num_dif_stim = 48*2 #number of location the stimulus (6 x 8 = 48) will be displayed in two colours values with 100% contrast => 48*2 = 96
    tot_num_stim = repetition * num_dif_stim #total number of stiumuli that will be displayed in one experiment 

    index = range(num_dif_stim) #number 0-95 representing each location in 6 x 8 screen and black or white colour
    x_pos = ([0]*6 + [1]*6 +[2]*6 +[3]*6 +[4]*6 +[5]*6 +[6]*6 +[7]*6) * 2  #list of all possible x-coordinate for stim
    y_pos = range(6)*8*2  #list of all possible y-coordinate for stim
    colour_list = [white]*48 + [black]*48 

    #reshape data into numpy array
    x_pos = np.asarray(x_pos)
    y_pos = np.asarray(y_pos)
    colour_list = np.asarray(colour_list)

    #creating a matrix to store all stim ID (index), position and colour information
    loc_order = np.ones((96,3), dtype=int)
    colour_order = np.ones((96,1), dtype=float)
    
    loc_order[:,0] = index
    loc_order[:,1] = x_pos
    loc_order[:,2] = y_pos
    colour_order[:,0] = colour_list
    
    stim_order = np.concatenate((loc_order, colour_order),axis=1)

    #create ramdom order for stimuli presentation
    np.random.shuffle(index) #this function automatically shuffles the input (no need to assign a new var)


    #stim shape parameters
    tot_stim = 96
    stim_vertices = np.array(([-1./3, 1],[-1./4, 1], [-1./4, 2./3], [-1.0/3, 2./3]))
    stim_vertices.reshape(4, 2) #reshape to fit psychopy texture requirements 


    #creating visual stimulation display functionality 

    #creating mouse functionality
    mouse = event.Mouse(
        visible = True, 
        win = win
        )
        
    #Naming the experiment to create fileName (at the end of this function)
    instruction_text = visual.TextStim(win, text = u'Name experiment and press enter to start.', pos=(0, 0.5))
    answer_text = visual.TextStim(win)

    #show instructions
    win.color = winClr
    instruction_text.draw()
    square1.draw()  #have to draw trigger squ; otherwise transient white will happen$$$$$$$$$$$$$$$
    square2.draw()
    win.flip()

    #get users input for experiment name
    now = True
    answer_text.text = ''
    while now:
        key = event.waitKeys()[0]
        # Add a new number
        if key in '1234567890abcdfeghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_-':
            answer_text.text += key

        # Delete last character, if there are any chars at all
        elif key == 'backspace' and len(answer_text.text) > 0:
            answer_text.text = answer_text.text[:-1]

        # Stop collecting response and return it
        elif key == 'return':
            expName = answer_text.text
            print ('expName IN here: ', expName)
            now = False
        
        # Show current answer state
        instruction_text.draw()
        answer_text.draw()
        square1.draw()  #have to draw trigger squ; otherwise transient white will happen$$$$$$$$$$$$$
        square2.draw()
        win.flip()
            
    #setting name of file which will be used to save order of vs stim displayed; 
    date = datetime.today().strftime('%Y%m%d_%H%M%S') #extract today's date
    fileName = expName + '_vs_Rf6x8' + date  



    #loop over each stim and display it
    for rep in range(len(index)*repetition):  #will loop around for total Repetitions (96*repetition)
        
        tic = clock.getTime()
        
        stim_ind = rep % len(index)  #give a number between 0 and 95 which represents each stimuli, allowing to continuouly loop around for total repetitions
        
        #assign postion of stimulus
        i, j = stim_order[index[stim_ind]][1], stim_order[index[stim_ind]][2]
        #assing colour of stimulus
        colour = stim_order[index[stim_ind]][3]
        
        #Create stimulus with changing : a single rect with x = screenwdth/8 and y = screenheight/6
        stim = visual.ShapeStim(
            win = win, 
            units = "norm",
            pos = (i * (1.0/12.0) , j * (-1.0/3.0)),  #moves the stim by 1/9th to the right and 1/4th down (relative to screen size); if (i,j) = (0,0); display middle screen @ top left corner
            fillColor = colour,
            vertices = stim_vertices, 
            lineWidth = 0
            )
        
        #save vs data in .csv format
        #create a temp list variable that stores array values that will be appended
        save_row = stim_order[index[stim_ind]].tolist()
        
        #open and append values to new file
        with open(fileName + '.csv', 'a') as f: 
            
            for i in range(len(save_row)):
                
                f.write(str(save_row[i]) + ',')
            
            f.write('\n')
        
        #Display stimulation using a series of while loops 
        win.color = winClr
        square1.draw()
        square2.draw()
        win.flip()

        #time before the stimulation
        toc = clock.getTime() - tic
        
        while toc < (t_before/1000.0):
            
            toc = clock.getTime() - tic 
            
            #this if statement is for existing the stimulation
            if mouse.getPressed()[1]:
                return
            
            #display trigger squares 
            square1.draw()
            square2.draw()
            win.flip()
            
        #t_triger initial timing for triggerin the camera
        for i in range(int(FR*MovSinGrat_t_triginit/1000.0)):
            
            if mouse.getPressed()[1]:
                return
                
            if i < 3:
                square1.fillColor = [1,1,1]
                square2.fillColor = [-1,-1,-1]
            
            else:
                square1.fillColor = [-1,-1,-1]
                square2.fillColor = [-1,-1,-1]
                
            win.color = winClr
            square1.draw()
            square2.draw()
            win.flip()
            
        #making the top square white
        square1.fillColor = [-1,-1,-1]
        square2.fillColor = [1,1,1]
        
        
        #drawing the stimulus on the window
        for frm in range(int(FR*t_during/1000.0)):
            
            if mouse.getPressed()[1]:
                return
            
            stim.draw()
            
            square1.draw()
            square2.draw()
            win.flip()
            
        #changing the characteristics of the two squares at the bottom left corner
        square1.fillColor = [-1,-1,-1]
        square2.fillColor = [-1,-1,-1]
            
        #time after the stimulation
        for toc in range(int(t_after*FR/1000.0)):
            
            #win.color = winClr
            
            square1.draw()
            square2.draw()
            win.flip()
Esempio n. 22
0
    # monitoring signal
    on = visual.GratingStim(win=mywin, size=1, pos=[7, -7], sf=0, color=1)
    off = visual.GratingStim(win=mywin, size=1, pos=[7, -7], sf=0, color=-1)

    # on top of orientation protocol
    ORI = []
    for i, theta in enumerate(np.linspace(0, 5 * 180 / 6., 6)):
        ORI.append(visual.GratingStim(win=mywin, size=1000, sf=1, ori=theta))
    blank = visual.GratingStim(win=mywin, size=1000, pos=[0, 0], sf=0, color=0)

    Ton, Toff = 200, 800  # ms
    Tfull, Tfull_first = int(Ton + Toff), int((Ton + Toff) / 2.)
    for i in range(len(ORI)):

        #draw the stimuli and update the window
        start = clock.getTime()
        while (clock.getTime() - start) < 5:
            ORI[i].draw()
            if (int(1e3*clock.getTime()-1e3*start)<Tfull) and\
               (int(1e3*clock.getTime()-1e3*start)%Tfull_first<Ton):
                on.draw()
            elif int(1e3 * clock.getTime() - 1e3 * start) % Tfull < 200:
                on.draw()
            else:
                off.draw()
            mywin.flip()
        blank.draw()
        off.draw()
        mywin.flip()
        clock.wait(2)
Esempio n. 23
0
def bandit_task(selected_value, arms, stimuli, feedback, window):
    # define instructions
    # print(messages:["welcome"],["break"],["thanks"])
    print('selected_value is %d' % selected_value)
    instruction_result_text = ""
    if selected_value == 1:
        instruction_result_text, is_exp = get_random_instructions(
            [[INSTRUCTIONS_REWARD_EXP, True], [INSTRUCTIONS_REWARD_CONTR, False]])

    if selected_value == 2:
        instruction_result_text, is_exp = get_random_instructions(
            [[INSTRUCTIONS_PUNISHMENT_EXP, True], [INSTRUCTIONS_PUNISHMENT_CONTR, False]])

    elif selected_value < 1 or selected_value > 2:
        sys.exit("Unknown condition number")

    print('instruction_result is %s and is_exp=%s' % (instruction_result_text, is_exp))

    text_stim_screen = psychopy.visual.TextStim(
        win=window,
        text=instruction_result_text,
        color=(-1, -1, -1), height=30.0)
    text_stim_screen.draw(window)
    win.flip()
    while True:
        print('in while...')
        response = psychopy.event.waitKeys(keyList=['space'])
        print('after response')
        print(response)
        if 'space' in str(response):
            print("selected space!")
            break  # break out of the while-loop

    # experiments
    if is_exp and cond_num == 1:
        experiment_trial(True)

    if is_exp and cond_num == 2:
        experiment_trial(False)

    # add testing
    # control
    if is_exp is False and cond_num == 1:
        control_trial(True)

    if is_exp is False and cond_num == 2:
        control_trial(False)

    print('selected_value is %d' % selected_value)

    # starting screen
    # screen experiments

    if is_exp == True and cond_num == 1 or cond_num == 2:
        print('display fixation cross and arms')
           while clock.getTime() < 2.0:
            draw(fixation_cross)
            arms, is_exp = get_random_toys([[toy1, True], [toy2, True]])  # they are always random
            if clock.getTime > 2.0 and cond_num == 1:
                draw(sadface)
                arms, is_exp = get_random_toys([[toy1, True], [toy2, True]])
            elif clock.getTime > 2.0 and cond_num == 2:
                draw(neutralfaceface)
        arms, is_exp = get_random_toys([[toy1, True], [toy2, True]])
Esempio n. 24
0
def show_practice_trial():
    win.flip()
    # randomly set motion direction of grating on each trial
    if numpy.random.random() >= 0.5:
        this_dir = +1 # leftward
        this_dir_str='left'
    else:
        this_dir = -1 # rightward
        this_dir_str='right'
    
    this_stim_secs = .5
    this_grating_degree = 4
    this_spf = 1.2
    keep_going = 1
    
    pr_grating = visual.GratingStim(
        win=win, name='grating_murray',units='deg', 
        tex='sin', mask='gauss',
        ori=params.grating_ori, pos=(0, 0), size=this_grating_degree, sf=this_spf, phase=0,
        color=0, colorSpace='rgb', opacity=1, blendmode='avg',
        texRes=128, interpolate=True, depth=0.0)
    
    # fixation until keypress
    fixation.draw()
    win.flip()
    event.waitKeys()
    win.flip()
    
    # ISI
    core.wait(params.fixation_grating_isi)
    
    # show grating
    start_time = clock.getTime()
    while keep_going:
        secs_from_start = (start_time - clock.getTime())
        pr_grating.phase = this_dir*(secs_from_start/params.cyc_secs)
        
        # Modulate contrast
        this_contr = .98
        pr_grating.color = this_contr
    
        # Draw next grating component
        pr_grating.draw()
        win.flip()
        grating_start = clock.getTime()
    
        # Start collecting responses
        thisResp = None
    
        # Is stimulus presentation time over?
        if (clock.getTime()-start_time > this_stim_secs):
            win.flip()
            keep_going = False 
            
        # check for quit (typically the Esc key)
        if kb.getKeys(keyList=["escape"]):
            thisResp = 0
            rt = 0
                    
            print("Exiting program.")
            core.quit()
    
    # clear screen, get response
    if params.show_response_frame:
        respond.draw()
        win.flip()
    start_resp_time = clock.getTime()
    
    # Show response fixation
    while thisResp is None:
        allKeys = event.waitKeys()
        rt = clock.getTime() - start_resp_time
        for thisKey in allKeys:
            if ((thisKey == 'left' and this_dir == -1) or
                (thisKey == 'right' and this_dir == +1)):
                thisResp = 0 # incorrect
            elif ((thisKey == 'left' and this_dir == +1) or
                (thisKey == 'right' and this_dir == -1)):
                thisResp = 1  # correct
                
            elif thisKey in ['q', 'escape']:
                test = False
                core.quit()  # abort experiment
    #-----------------------------------------------------------------------------------------------------------
    
    win.flip()
    beep.setSound('A', secs=0.2, hamming=True)
    beep.setVolume(0.5)
    if (thisResp == 0):
        instructionsIncorrect.draw()
    else:
        instructionsCorrect.draw()
        # Feedback
        beep.play(when=win)    # Only first plays?
        # donut.draw()            # Try visual feedback for now
    win.flip()
    
    # wait
    core.wait(1)
Esempio n. 25
0
def drumgrating(SpatFreqDeg, TempFreq, t_before, t_During, t_after, Synch, Motionmode): #any paramter potentially changed by user in front.py 
    
    from psychopy import visual
    from psychopy import event
    from psychopy import clock
    from win32api import GetSystemMetrics

    from init_para import (drumgrating_addblank, drumgrating_Amp_sinu, drumgrating_controlmod, drumgrating_dirindex, drumgrating_Ori, 
    drumgrating_parasize, drumgrating_t_triginit, drumgrating_GammaFactor, drumgrating_AmpFactor, drumgrating_contrast, drumgrating_MeanLum, 
    winWidth , winHeight, ScrnNum, PixelSize, winWidthofEachDisp, DisplayFrameWidth, FR, square1, square2, fontSize, fontClr, win, Local_IP, Local_Port, Remote_IP, Remote_Port)
    
    import socket
    import numpy as np
    import conv
    
    #crating mouse functionality
    mouse = event.Mouse(
        visible = True, 
        win = win
        )
    if Synch:
        
        #creating the socket in which communications will take place
        sock = socket.socket(
            socket.AF_INET, 
            socket.SOCK_DGRAM
        )
        
        #binding the local IP address and local port 
        sock.bind((Local_IP, Local_Port))
        
        #creating textbox showing that this VS computer is waiting for UDP signal
        standBy= visual.TextBox(
            window=win,
            text=("Waiting for starting the control computer."),
            font_size = fontSize,
            font_color=fontClr,
            pos=(-2690 ,475),
            size=(300,37),
            units='pix',
            grid_horz_justification='center',
            grid_vert_justification='center')
        
        standBy.draw()
        square1.draw()
        square2.draw()
        win.flip()
        
        try:
            #wait for the command 'gammacorrection'
            info = sock.recv(1024)
        except Exception:
            sock.close()
            print("Did not recieve info, connection timeout.")
            return
            
        
        #sending 'gammafloatampfloat' to the second PC
        sock.sendto(("gamma" + str(drumgrating_GammaFactor) + "amp" + str(drumgrating_AmpFactor)), (Remote_IP, Remote_Port))
        
        #creating textbox showing that this VS computer is waiting for UDP signal
        standBy= visual.TextBox(
            window=win,
            text=("Control Computer is Ready."),
            font_size = fontSize,
            font_color=fontClr,
            pos=(-2690 ,475),
            size=(300,37),
            units='pix',
            grid_horz_justification='center',
            grid_vert_justification='center')
        standBy.draw()
        
        try:
            #waiting for the signal autoVs
            drumgrating_controlmod = sock.recv(1024)
        except Exception:
            sock.close()
            print("Did not recieve drumgrating_controlmod, connection timeout.")
            return
        
        #sending 'Wait for parameters' to the second PC
        sock.sendto("Wait for parameters", (Remote_IP, Remote_Port))
        
        if drumgrating_controlmod == 'autoVS':
            
            try:
                drumgrating_parasize = sock.recv(1024)
            except Exception:
                sock.close()
                print("Did not recieve parasize, connection timeout.")
                return
            
            #sending a completion transcript
            sock.sendto("read parasize", (Remote_IP, Remote_Port))
            
            #converting the string recieved into int
            drumgrating_parasize = conv.deleteParasize(drumgrating_parasize)
            
            #making the array in which the parameters will be added to
            paras = np.empty(shape=[drumgrating_parasize, 9])
            
            #adding the parameters to the array
            
            #this for loop receives the 9 parameters for all the stimulations and adds them to an array
            for i in range(drumgrating_parasize): #start from 0 to parasize[0] - 1
                temp = sock.recv(1024)
                temp =conv.convStr2Dig(temp)
                #adding the parameters to the array (temp) at position index
                paras[i, :] = temp
            
            sock.sendto("Para DONE", (Remote_IP, Remote_Port))
            
            try:
                #recieving all orientation for stimuli 1 for veritcal, 0 for horizontal
                paratemp = sock.recv(1024)
            except Exception:
                sock.close()
                print("Did not recieve message, connection timeout.")
                return
            
            paratemp = conv.convStr2Dig(paratemp)
            
            #setting up the parameters based on what was send in the paras variable
            drumgrating_Ori = int(paratemp[0])
            Motionmode = int(paratemp[1])
            drumgrating_Amp_sinu = paratemp[2]
            drumgrating_addblank = paratemp[3]
            
            sock.sendto("Para DONE", (Remote_IP, Remote_Port))
        
        elif drumgrating_controlmod == 'manualVS':
            return
    
    #if Synch is False, this else condition will make the parameters in the same format as if Synch was True
    else:
        
        #making the array in which the parameters will be added to
        paras = np.empty(shape=[drumgrating_parasize, 9])
        
        #adding the parameters to the array
        for i in range(drumgrating_parasize): #start from 0 to parasize[0] - 1
            
            #adding the parameters as an array at index i
            paras[i, :] = [SpatFreqDeg, TempFreq, drumgrating_contrast, drumgrating_MeanLum, drumgrating_dirindex, t_before, t_During, t_after, drumgrating_t_triginit]
        
        paratemp = [drumgrating_Ori, Motionmode, drumgrating_Amp_sinu, drumgrating_addblank]
        
        #setting up the parameters based on what was send in the paras variable
        drumgrating_Ori = int(paratemp[0])
        Motionmode = int(paratemp[1])
        drumgrating_Amp_sinu = paratemp[2]
        drumgrating_addblank = paratemp[3]
        
    if Synch:
        #get file name $$$$$$$$$$$$$$$$$$$$
        while True:
            try:
                info = sock.recv(1024)
            except:
                pass
            if info.strip(): #strip spaces
                print (info)
                sock.sendto(("nex"), (Remote_IP, Remote_Port))
                break
            if mouse.getPressed()[1]:
                sock.close()
                return
        #$$$$$$$$$$$$$$$$$$$$$$$$$$
        #waiting for "STR"
        while True:
            try:
                info = sock.recv(1024)
            except:
                pass
            if info == "STR":
                sock.sendto(("VS is running"), (Remote_IP, Remote_Port))
                break
            if mouse.getPressed()[1]:
                sock.close()
                return
    
    #generating the pixel angles relaive to the mouse position based on the orientation of the stimulus
    if drumgrating_Ori == 1:
        
        #generating matrix that will be the place holder for every pixel 
        pixelangle = np.empty(shape=[1, winWidth]) #pixel has to be 2D since the image is 2D
        temp = np.array(range(winWidthofEachDisp)) 
        temp.reshape(1,winWidthofEachDisp)# the temp must be 2D 
        tempPixelAngle = np.degrees(np.arctan((temp - (winWidthofEachDisp/2.0))*PixelSize*(2.0/DisplayFrameWidth))) + 45 #calculating the pixel angle for first monitor
        
        for i in range(ScrnNum):
            pixelangle[:,i*winWidthofEachDisp: (i + 1)*winWidthofEachDisp ] = tempPixelAngle + 90*i #taking specific ranges within the full winWidth and replacing the values with the corresponding angles
    
    else:
        return
    
        
    for m in range(drumgrating_parasize):
        
        tic = clock.getTime()
        
        if m == 0: 
            SpatFreqDeg = paras[m, 0]
            TempFreq = paras[m, 1]
            drumgrating_contrast = paras[m, 2]
            drumgrating_MeanLum = paras[m, 3]
            drumgrating_dirindex = paras[m, 4]
            t_before = paras[m, 5]
            t_During = paras[m, 6]
            t_after = paras[m, 7]
            drumgrating_t_triginit = paras[m, 8]
            pixelformeanlum = 2*(np.exp(np.log(drumgrating_MeanLum/drumgrating_AmpFactor)/drumgrating_GammaFactor)/255.0) -1
            drumgrating_gray = drumgrating_MeanLum
            inc = drumgrating_gray*drumgrating_contrast
            
            #frames to be calculated per period
            frames = round(FR/TempFreq)
            
            phase = np.array(range(int(frames)))
            
            if Motionmode == 1:
                phase = (phase/float(round(frames)))*(2.0*np.pi)
            
            elif Motionmode == 0:
                phase = drumgrating_Amp_sinu*np.sin((phase/frames)*2*np.pi)*SpatFreqDeg*2*np.pi
            
            #generating the pixel values for the stimulus depending on the orientation of the stimulus 
            if drumgrating_Ori == 1:
                
                #creating the list that will hold all frames
                texdata1D = []
                
                #generating the pixel values for vertical stimulus
                for i in range(int(frames)):
                    texdata1DTmp = np.exp(np.log((drumgrating_gray + inc*np.sin(pixelangle*SpatFreqDeg*2*np.pi + phase[i]))/drumgrating_AmpFactor)/drumgrating_GammaFactor)
                    pixVal = 2*(texdata1DTmp/255) - 1 #converting the pixel values from 0:255 to -1:1
                    texdata1D.append(pixVal)
                
            
            else:
                return
        
        else:
            if sum(abs(paras[m, :] - paras[m-1, :])) > 1e-7:
            #if (not all([v == 0  for v in abs(paras[m, :] - paras[m-1, :])])):
                
                SpatFreqDeg = paras[m, 0]
                TempFreq = paras[m, 1]
                drumgrating_contrast = paras[m, 2]
                drumgrating_MeanLum = paras[m, 3]
                drumgrating_dirindex = paras[m, 4]
                t_before = paras[m, 5]
                t_During = paras[m, 6]
                t_afterVal = paras[m, 7]
                drumgrating_t_triginit = paras[m, 8]
                pixelformeanlum = 2*(np.exp(np.log(drumgrating_MeanLum/drumgrating_AmpFactor)/drumgrating_GammaFactor)/255.0) -1
                drumgrating_gray = drumgrating_MeanLum
                inc = drumgrating_gray*drumgrating_contrast
                
                #frames to be calculated per period
                frames = round(FR/TempFreq)
                
            
            phase = np.array(range(int(frames)))
            
            if Motionmode == 1:
                phase = (phase/float(round(frames)))*(2.0*np.pi)
            
            elif Motionmode == 0:
                phase = drumgrating_Amp_sinu*np.sin((phase/frames)*2*np.pi)*SpatFreqDeg*2*np.pi
            
            #generating the pixel values for the stimulus depending on the orientation of the stimulus 
            if drumgrating_Ori == 1:
                
                #creating the list that will hold all frames
                texdata1D = []
                
                #generating the pixel values for vertical stimulus
                for i in range(int(frames)):
                    texdata1DTmp = np.exp(np.log((drumgrating_gray + inc*np.sin(pixelangle*SpatFreqDeg*2*np.pi + phase[i]))/drumgrating_AmpFactor)/drumgrating_GammaFactor)
                    pixVal = 2*(texdata1DTmp/255) - 1 #converting the pixel values from 0:255 to -1:1
                    texdata1D.append(pixVal)
                
            
            else:
                return
        
        #creating the looping variable for the simulation depending on the value of drumgrating_addblank
        if drumgrating_addblank == 0 or drumgrating_addblank == 1:
            #this variable controls the looping and frame that is to be displayed
            frmNum = 0 #frame number within one cycle
            
        elif drumgrating_addblank == 2 and m == 0:
            #this variable controls the looping and frame that is to be displayed
            frmNum = 0 #frame number within one cycle
        
        #setting up the grating
        DrawTexture = visual.GratingStim(
            win=win,
            size = [winWidth, winHeight],
            units = 'pix',
            tex=texdata1D[0]
            )
        
        if Synch:
            
            #waiting for "TRLstart", if TRLstart is sent this loop will send "TRLstart m" then break
            sock.settimeout(0.5)
            comm = [""]
            while True:
                try:
                    comm = sock.recvfrom(1024)
                except Exception:
                    pass
                    
                if comm[0] == "TRLstart":
                    sock.sendto(("TRLstart " + str(m +1)), (Remote_IP, Remote_Port))
                    break
                    
                elif comm[0] == "ESC1": #if 'ESC1' is in the buffer, return to front
                    sock.close()
                    return
                    
                if mouse.getPressed()[1]:
                    sock.close()
                    print("Exit at ESC1")
                    return
        
        if drumgrating_addblank == 1.0:
            win.color = pixelformeanlum
        
        elif drumgrating_addblank == 0.0:
            DrawTexture.draw()
        
        elif drumgrating_addblank == 2.0:
            DrawTexture.tex = texdata1D[frmNum]
            DrawTexture.draw()
            frmNum = frmNum + 1
            
            if frmNum >= len(texdata1D):
                    frmNum = 0
                    
        square1.draw()
        square2.draw()
        win.flip()
        
        #time before the stimulation
        toc = clock.getTime() - tic
        
        while toc < (t_before/1000.0):
            
            toc = clock.getTime() - tic
            
            if drumgrating_addblank == 2:
                
                #assigning the texture using the corrusponding frame
                DrawTexture.tex = texdata1D[frmNum]
                #this if statement is for existing the stimulation
                if mouse.getPressed()[1]:
                    
                    if Synch:
                        sock.close()
                    return
                
                frmNum = frmNum + 1
                
                if frmNum >= len(texdata1D):
                    frmNum = 0
                
                DrawTexture.draw()
                square1.draw()
                square2.draw()
                win.flip()
                
        
        #t_triger initial timing for triggerin the camera
        for i in range(int(FR*drumgrating_t_triginit/1000.0)):
            if i < 3:
                square1.fillColor = [1,1,1]
                square2.fillColor = [-1,-1,-1]
            
            else:
                square1.fillColor = [-1,-1,-1]
                square2.fillColor = [-1,-1,-1]
            
            if drumgrating_addblank == 1.0:
                win.color = pixelformeanlum
            
            elif drumgrating_addblank == 0.0:
                DrawTexture.draw()
            
            elif drumgrating_addblank == 2.0:
                
                #assigning the texture using the corrusponding frame
                DrawTexture.tex = texdata1D[frmNum]
                
                frmNum = frmNum + 1
                
                if frmNum >= len(texdata1D):
                    frmNum = 0
                
                DrawTexture.draw()
            
            if mouse.getPressed()[1]:
                if Synch:
                    sock.close()
                return
            
            square1.draw()
            square2.draw()
            win.flip()
        
        #making the top square white
        square1.fillColor = [-1,-1,-1]
        square2.fillColor = [1,1,1]
        
        
        
        #drawing the frames on the window
        for frm in range(int(FR*t_During/1000.0)):
            
            #assigning the texture using the corrusponding frame
            DrawTexture.tex = texdata1D[frmNum]
            #this if statement is for existing the stimulation
            if mouse.getPressed()[1]:
                
                if Synch:
                    sock.close()
                return
            
            frmNum = frmNum + 1
            
            if frmNum >= len(texdata1D):
                frmNum = 0
            
            DrawTexture.draw()
            square1.draw()
            square2.draw()
            win.flip()
        
        if Synch:
            sock.sendto(("TRLdone " + str(m +1)), (Remote_IP, Remote_Port))
        
        #changing the characteristics of the two squares at the bottom left corner
        square1.fillColor = [-1,-1,-1]
        square2.fillColor = [-1,-1,-1]
        
        #time after the stimulation
        for toc in range(int(t_after*FR/1000.0)):
            
            if drumgrating_addblank == 1.0:
                win.color = pixelformeanlum
                
            elif drumgrating_addblank == 0.0:
                DrawTexture.draw()
            
            elif drumgrating_addblank == 2:
                
                #assigning the texture using the corrusponding frame
                DrawTexture.tex = texdata1D[frmNum]
                frmNum = frmNum + 1
                
                if frmNum >= len(texdata1D):
                    frmNum = 0
                
                DrawTexture.draw()
                
            square1.draw()
            square2.draw()
            win.flip()
        
        if Synch:
            
            #checking for stop button
            while True:
                
                try:
                    comm = sock.recvfrom(1024)
                except:
                    pass
                
                if comm[0] == "ESC1":
                    sock.close()
                    return 
                
                elif comm[0] == "ESC0":
                    break
                
                elif mouse.getPressed()[1]:
                    sock.close()
                    print("Exit at ESC2")
                    return 
Esempio n. 26
0
                send_triggers(init=False, sending=True, value=60)
            else:
                send_triggers(init=False, sending=True, value=61)

        # clear the keyboard input
        event.clearEvents(eventType="keyboard")

        # Wait for the response
        if speedy:
            start_time = 0
            stop_time = np.random.randint(250, 1000) / np.random.randint(
                901, 1000)
            keys = np.random.choice(["f", "j", None])
        else:
            too_slow = True
            start_time = clock.getTime()
            keys = event.waitKeys(keyList=["f", "j", "escape"],
                                  maxWait=max_resp_time)
            stop_time = clock.getTime()

        if keys is None:

            # triggers: response locked - no answer
            if EEG_measure:
                send_triggers(init=False, sending=True, value=72)

            reaction_time = max_resp_time * 1000
            resp = np.array([-1.])  # no response
            accurate = False
        else:
Esempio n. 27
0
    window.callOnFlip(biopac.setData, biopac, 0)
window.flip()

# wait until trigger is pressed- or fMRI scanner trigger
while trigger != validTrigger:
    if test == 1:
        trigger = event.getKeys(keyList=['space'])
        if trigger:
            trigger = trigger[0]
    else:
        trigger = event.getKeys(keyList=['5'])  #ser.read()
        if trigger:
            trigger = trigger[0]
window.mouseVisible = False
# start experiment timing
experimentStart = clock.getTime()

# counter for looping through ITI list
ITI_counter = 0

# create timer
timer = core.Clock()

# main experiment loop
for key, task in task_order.items():
    print
    key  # get rid of this
    if biopac_exists:
        window.callOnFlip(biopac.setData, biopac, 0)
        window.callOnFlip(biopac.setData, biopac, word2biopaccode[key])