Exemplo n.º 1
0
    def __init__(self):
        LatentModule.__init__(self)

        # MARKERS LEGEND
        # 0: beginning
        # 10+: AUTs
        # 20+: RATs

        self.ratWait = 15                    # this will be the wait time for each trio in the RAT
        self.ratRounds = 25                  # number of word trios to be presented (number of elements in ratGroup)
        self.ratGroup1 = ["COTTAGE SWISS CAKE", "SHOW LIFE ROW", "DUCK FOLD DOLLAR", "ROCKING WHEEL HIGH", "FOUNTAIN BAKING SHOP",
                          "AID RUBBER WAGON", "CRACKER FLY FIGHTER", "CANE DADDY PLUM", "DREAM BREAK LIGHT", "POLITICAL SURPRISE LINE",
                          "PIECE MIND DATING", "FLOWER FRIEND SCOUT", "PRINT BERRY BIRD", "DATE ALLEY FOLD", "CADET CAPSULE SHIP",
                          "STICK MAKER POINT", "FOX MAN PEEP", "DUST CEREAL FISH", "FOOD FORWARD BREAK", "PEACH ARM TAR",
                          "PALM SHOE HOUSE", "WHEEL HAND SHOPPING", "HOME SEA BED", "SANDWICH HOUSE GOLF", "SAGE PAINT HAIR"]

        self.ratGroup2 = ["LOSER THROAT SPOT",  "NIGHT WRIST STOP", "DEW COMB BEE", "PRESERVE RANGER TROPICAL", "FLAKE MOBILE CONE",
                          "SAFETY CUSHION POINT", "FISH MINE RUSH", "MEASURE WORM VIDEO", "RIVER NOTE ACCOUNT", "HIGH DISTRICT HOUSE",
                          "WORM SHELF END", "SENSE COURTESY PLACE", "PIE LUCK BELLY", "OPERA HAND DISH", "FUR RACK TAIL",
                          "HOUND PRESSURE SHOT", "SLEEPING BEAN TRASH", "LIGHT BIRTHDAY STICK", "SHINE BEAM STRUCK", "WATER MINE SHAKER",
                          "BASKET EIGHT SNOW", "RIGHT CAT CARBON", "NUCLEAR FEUD ALBUM", "CROSS RAIN TIE", "FRENCH CAR SHOE"]

        self.autWait = 60                   # this will be the wait time for each object in the AUT
        self.autRounds = 1                  # number of objects to be presented (number of elements in autGroup)
        self.autGroup1a = ["BRICK"]
        self.autGroup1b = ["PAPERCLIP"]
        self.autGroup2a = ["BUTTON"]
        self.autGroup2b = ["NEWSPAPER"]
Exemplo n.º 2
0
Arquivo: Sample1.py Projeto: sccn/SNAP
 def __init__(self):
     LatentModule.__init__(self)
     
     # set defaults for some configurable parameters:
     self.trials1 = 5            # number of trials in first part
     self.trials2 = 5            # number of trials in second part
     self.a_probability = 0.5    # probability that an "A" appears instead of a "U"
Exemplo n.º 3
0
Arquivo: Sample1.py Projeto: s2t2/SNAP
    def __init__(self):
        LatentModule.__init__(self)

        # set defaults for some configurable parameters:
        self.trials1 = 5  # number of trials in first part
        self.trials2 = 5  # number of trials in second part
        self.a_probability = 0.5  # probability that an "A" appears instead of a "U"
Exemplo n.º 4
0
    def __init__(self):
        LatentModule.__init__(self)
        self.conditions = ['object', 'animal']

        # Load stimuli
        with open('studies/obj_animals/stimuli.txt', 'r') as f:
            self.stimuli = f.readlines()

        n_animals = 64
        n_objects = 464

        self.n_blocks = 4  # Divide the stimuli into this number of blocks

        animals_list = range(0, n_animals)
        objects_list = range(n_animals, n_animals + n_objects)

        animals_blocks = []
        objects_blocks = []
        self.stimuli_order = []  # This list contains the final indices
        self.av_type = [
        ]  # This list contains the stimulus type for each word (either 0 or 1, corresponding to auditory and visual, or vice versa)

        tmp_animals_1 = random.sample(
            animals_list, 32)  # Choose 32 random samples from animals
        tmp_animals_2 = list(set(animals_list) - set(tmp_animals_1))

        for k in range(self.n_blocks):
            if k % 2 == 0:
                tmp_animals = tmp_animals_1
            else:
                tmp_animals = tmp_animals_2
            random.shuffle(tmp_animals)
            animals_blocks.append(tmp_animals)

            tmp_objects = random.sample(objects_list,
                                        n_objects / self.n_blocks)
            objects_blocks.append(tmp_objects)
            objects_list = list(set(objects_list) - set(tmp_objects))

            tmp_animals_objects = animals_blocks[k] + objects_blocks[k]
            random.shuffle(tmp_animals_objects)
            # Make sure that there are no consecutive equal animals - shuffle until this is not the case
            max_repeats = max(
                len(list(v))
                for g, v in itertools.groupby(tmp_animals_objects))
            while max_repeats > 1:
                random.shuffle(tmp_animals_objects)
                max_repeats = max(
                    len(list(v))
                    for g, v in itertools.groupby(tmp_animals_objects))
            self.stimuli_order.append(tmp_animals_objects)

        # Flatten lists
        self.stimuli_order = [
            item for sublist in self.stimuli_order for item in sublist
        ]
        self.target = [1 for k in range(1, n_animals + 1)
                       ] + [0 for k in range(1, n_objects + 1)]
        self.pause = 0.5
        self.score = 0  # Total score
Exemplo n.º 5
0
 def __init__(self):
     LatentModule.__init__(self)
     
     self.tasks = ['overt', 'covert', 'control']
     with open('studies/speech/stimuli.txt', 'r') as f:
         self.stimuli = f.readlines()
     self.conditions = ['visual', 'auditory']
     self.n_blocks = 4
     self.pause = 0.5
Exemplo n.º 6
0
 def __init__(self):
     LatentModule.__init__(self)
     self.conditions = ['congruent', 'incongruent']  # Congruent/incongruent right/left
     self.n_blocks = 3  # Number of blocks of 14 unique trials
     self.n_runs = 10  # Number of runs (i.e. the number of random blocks)
     self.stim_duration = 0.15
     self.pre_duration = [0, 0.033, 0.066, 0.1, 0.133, 0.166, 0.2]
     self.trial_duration = 1.65
     self.thresholds = [0.9, 0.6]  # Thresholds of accuracy for congruent and incongruent
     self.stimulus_images = ['target_R_C.bmp', 'target_L_C.bmp', 'target_R_I.bmp', 'target_L_I.bmp']
     self.pre_images = ['flankers_R.bmp', 'flankers_L.bmp']
Exemplo n.º 7
0
    def __init__(self):
        LatentModule.__init__(self)

        self.tasks = ['overt', 'covert', 'none']
        with open('studies/speech/stimuli.txt', 'r') as f:
            self.stimuli = f.readlines()
        self.conditions = ['visual', 'auditory']

        self.n_blocks = len(self.tasks) * len(self.conditions)
        self.n_runs = 4
        self.pause = 0.75
Exemplo n.º 8
0
    def __init__(self):
        LatentModule.__init__(self)
        
        # set defaults for some configurable parameters:
        self.awake_duration = 10                 # duration of the awake condition
        self.snooze_duration = 10                # duration of the zone-out condition
        self.wakeup_sound = 'nice_bell.wav'      # sound to indicate the end of the zone-out condition
        self.transition_duration = 1.5           # time that the subject has to come back

        self.moviefile = 'big\\alpha_movie2.avi'
        self.begintime = 0.0                    # time into the movie where we begin playing
        self.endtime = 3.5*60                   # time into the movie where we end
Exemplo n.º 9
0
    def __init__(self):
        LatentModule.__init__(self)
        self.conditions = ['object', 'animal']
        
        # Load stimuli
        with open('studies/obj_animals/stimuli.txt', 'r') as f:
            self.stimuli = f.readlines()

        n_animals = 64
        n_objects = 464

        self.n_blocks = 4  # Divide the stimuli into this number of blocks

        animals_list = range(0, n_animals)
        objects_list = range(n_animals, n_animals + n_objects)

        animals_blocks = []
        objects_blocks = []
        self.stimuli_order = []  # This list contains the final indices
        self.av_type = []  # This list contains the stimulus type for each word (either 0 or 1, corresponding to auditory and visual, or vice versa)
        
        tmp_animals_1 = random.sample(animals_list, 32)  # Choose 32 random samples from animals
        tmp_animals_2 = list(set(animals_list) - set(tmp_animals_1))
        
        for k in range(self.n_blocks):
            if k % 2 == 0:
                tmp_animals = tmp_animals_1
            else:
                tmp_animals = tmp_animals_2
            random.shuffle(tmp_animals)
            animals_blocks.append(tmp_animals)
            
            tmp_objects = random.sample(objects_list, n_objects/self.n_blocks)
            objects_blocks.append(tmp_objects)
            objects_list = list(set(objects_list) - set(tmp_objects))
            
            tmp_animals_objects = animals_blocks[k] + objects_blocks[k]
            random.shuffle(tmp_animals_objects)
            # Make sure that there are no consecutive equal animals - shuffle until this is not the case
            max_repeats = max(len(list(v)) for g, v in itertools.groupby(tmp_animals_objects))
            while max_repeats > 1:
                random.shuffle(tmp_animals_objects)
                max_repeats = max(len(list(v)) for g, v in itertools.groupby(tmp_animals_objects))
            self.stimuli_order.append(tmp_animals_objects)

        # Flatten lists
        self.stimuli_order = [item for sublist in self.stimuli_order for item in sublist]
        self.target = [1 for k in range(1, n_animals + 1)] + [0 for k in range(1, n_objects + 1)]
        self.pause = 0.5
        self.score = 0  # Total score
Exemplo n.º 10
0
    def __init__(self):
        LatentModule.__init__(self)
        
        # set defaults for some configurable parameters:
        self.training_trials = 10           # number of trial in training block
        self.trials_per_block = 30          # number of trials in each block
        self.blocks = 3                     # number of blocks to present
        self.pause_duration = 45            # duration of the pause between blocks
        self.fixation_duration = 1          # duation for which the fixation cross is displayed
        self.letter_duration = 3            # duration for which the letter is shown
        self.wait_duration = 1              # wait duration at the end of each trial (nothing displayed)

        self.letter_scale = 0.3
        self.stimulus_set = ['L','R','O']   # the set of stimuli to present
Exemplo n.º 11
0
    def __init__(self):
        LatentModule.__init__(self)

        self.nsamplesOrig = 5000           # 100 Number of samples to collect for online methods (worth 0.5 seconds of data)
        self.srateToBe = 100    # sampling rate where the processing is done 
        self.ratDec = 5   # 10 this is to downsample in two steps
        self.ratDec1 = 10   # 10 this is to downsample in two steps

        self.bands = np.asarray([7, 30]) #frequency range to filter in 

        self.trialsInBlock = 10    
        self.imageryTime = 3    # this is the time for which the subject is doing imagery
        self.wait_duration = 5  #  between two consecutive trials 

        self.discardTime = 0.12 # this is the amount of delay in the eeg system 

        self.maxVar = 0.5     # to adjust the bar heights
        self.distVar = 0.02

        self.fontFace = "arial.ttf"
        self.fontSize = 0.07
        self.textColor = (.8, .8, .5, 1)
        self.blockPauseText = "Please take a break, and tell the experimenter \n when you are ready to continue."
        self.blockEndText = "You have completed this block of the experiment. \n Please take a break, and tell the experimenter \n when you are ready to move on to the next part."

        self.finalText = "You have completed the experiment. \n Well done!."


        self.stimulus_set = ['L','R']  
        self.rectHeight = [0,0] 
        self.cue = None
        self.cue1 = None
        self.cue2 = None

        ## Set up variables for EEG
        self.eegsystem = 'BrainProducts'
        self.chansLeft = [12, 42, 51] #[8,42,18,12,47,51]   #11, 12]#[20, 13]   , 5, 6, 11, 12        # chans[0] - chan to use; chans[1] - chan to re-ref to !!!!!!!!! TO CHECK
        self.chansRight = [14, 43, 53] #[9,14,19,43,48,53]      #9, 10]       , 2, 9, 10    # chans[0] - chan to use; chans[1] - chan to re-ref to !!!!!!!!! TO CHECK
        self.chansAll = [12, 42, 51, 14, 43, 53] 
        #self.numberOfChannels = 64
        
        self.size = 0.35  # size of the arrows representing left and right trials 

        self.b = []
        self.a = []
Exemplo n.º 12
0
    def __init__(self):
        LatentModule.__init__(self)
        self.client_port = 3663  # port where this client waits for connections from the master
        self.client_id = 0  # 0 for the first client, 1 for the second

        self.keydown_mastercallback = None
        self.keyup_mastercallback = None
        self.joymove_mastercallback = None
        self.speech_mastercallback = None
        self.callbacks_connected = False
        self.localtesting = (
            False
        )  # if both clients run on one machine -- then they need to use different input peripherals
        self.allow_speech = (self.client_id == 0) if self.localtesting else True  # this is for debugging

        self.joystick = None
        self.last_x = 0
        self.last_y = 0
        self.last_u = 0
        self.last_v = 0
        self.last_buttons = ()
Exemplo n.º 13
0
    def __init__(self):
        LatentModule.__init__(self)
        self.client_port = 3663  # port where this client waits for connections from the master
        self.client_id = 0  # 0 for the first client, 1 for the second

        self.keydown_mastercallback = None
        self.keyup_mastercallback = None
        self.joymove_mastercallback = None
        self.speech_mastercallback = None
        self.callbacks_connected = False
        self.localtesting = False  # if both clients run on one machine -- then they need to use different input peripherals
        self.allow_speech = (
            self.client_id
            == 0) if self.localtesting else True  # this is for debugging

        self.joystick = None
        self.last_x = 0
        self.last_y = 0
        self.last_u = 0
        self.last_v = 0
        self.last_buttons = ()
Exemplo n.º 14
0
Arquivo: DAS1a.py Projeto: sccn/SNAP
    def __init__(self):
        LatentModule.__init__(self)
    
        # === settings for the visual stimulus presenters ===
        
        # a center presenter (always an image)
        self.img_center_params = {'pos':[0,0,0.3],'clearafter':1.5,'scale':0.1}
        # two different left presenters - either an image or a text box, depending on block
        self.img_left_params = {'pos':[-1.25,0,0.3],'clearafter':1,'color':[1, 1, 1, 0.1],'scale':0.1}
        self.txt_left_params = {'pos':[-1.25,0.3],'clearafter':2,'framecolor':[0, 0, 0, 0],'scale':0.1}
        # two different right presenters - either an image or a text box, depending on block
        self.img_right_params = {'pos':[1.25,0,0.3],'clearafter':1,'color':[1, 1, 1, 0.1],'scale':0.1}
        self.txt_right_params = {'pos':[1.25,0.3],'clearafter':2,'framecolor':[0, 0, 0, 0],'scale':0.1}        
        
        # === settings for the auditory stimulus presenters ===
        
        # there is a left, a right, and a center location
        self.aud_left_params = {'direction':-1}
        self.aud_right_params = {'direction':1}
        self.aud_center_params = {'direction':0}
        
        # === settings for the block design ===
        
        # parameters of the block configuration
        self.num_blocks = 42                # total number of blocks of the following types
        self.fraction_avstrong = 12         # audio/visual, strong separation of target probability/reward
        self.fraction_avweak = 12           # audio/visual, weak separation of target probability/reward
        self.fraction_avruminate = 12       # audio/visual with added rumination (here: math) tasks
        self.fraction_rest = 3              # rest block
        self.fraction_restmath = 3          # rest block with math tasks

        # === settings for the A/V switching design ===
        
        # switch layout for audio/visual blocks
        self.switches_per_block = lambda: int(random.uniform(3,3)) # number of switches per a/v block (random draw), was: 7,13
        self.switches_withinmodality = 1./3                     # probability of a within-modality switch stimulus
        self.switches_outofmodality = 1./3                      # probability of a (salient) out-of-modality switch stimulus
        self.switches_bimodally = 1./3                          # probability of a bimodally delivered switch stimulus
        self.av_switch_interval = lambda: random.uniform(25,35) # inter-switch interval for the audio/visual condition, was: 25,35
        self.switch_time = 1                                    # duration for which the switch instruction is being displayed        

        # === settings for the stimulus material ===

        # this is formatted as follows:
        # {'type of block1 ':{'type of presenter 1': [['targets if focused',...],['nontargets if focused',...],['optional targets if not focused'],['optional nontargets if not focused']]
        #                     'type of presenter 2': [['targets if focused',...],['nontargets if focused',...],['optional targets if not focused'],['optional nontargets if not focused']]},        
        #  'type of block 2':{'type of presenter 1': [['targets if focused',...],['nontargets if focused',...],['optional targets if not focused'],['optional nontargets if not focused']]
        #                     'type of presenter 2': [['targets if focused',...],['nontargets if focused',...],['optional targets if not focused'],['optional nontargets if not focused']]}}
        self.stim_material = {'avstrong': {'center_aud':[['Target.'],['nothing special','blah blah','monkey','nothing to report'],['TARGET!']],
                                           'center_vis':[['warning.png'],['onerust.png','tworust.png','threerust.png'],['salient_warning.png']],
                                           'side_img':[['rebel.png'],['onerust.png','tworust.png','threerust.png']],
                                           'side_txt':[['Target'],['Frankfurt','Berlin','Calgary','Barcelona']],
                                           'side_spc':[['Target'],['Frankfurt','Berlin','Calgary','Barcelona']],
                                           'side_snd':[['xHyprBlip.wav'],['xClick01.wav']]},
                              'avweak': {'center_aud':[['Target.'],['nothing special','blah blah','monkey','nothing to report']],
                                           'center_vis':[['warning.png'],['onerust.png','tworust.png','threerust.png']],
                                           'side_img':[['rebel.png'],['onerust.png','tworust.png','threerust.png']],
                                           'side_txt':[['Target'],['Frankfurt','Berlin','Calgary','Barcelona']],
                                           'side_spc':[['Target'],['Frankfurt','Berlin','Calgary','Barcelona']],
                                           'side_snd':[['xHyprBlip.wav'],['xClick01.wav']]}
                              }
                
        # probability distribution over locations, if a target should be presented
        self.target_probabilities = {'avstrong': {'center_aud':[0.4,0.1], # this is [probability-if-focused, probability-if-unfocused] 
                                                  'center_vis':[0.4,0.1], 
                                                  'side_img':[0.25,0.0],   # note that there are 2 locations with side_* (left/right) and that usually only one set of these is active at a given time
                                                  'side_txt':[0.25,0.0],   # also note that all the focused numbers one modality plus the unfocused numbers of the other modality should add up to 1.0
                                                  'side_spc':[0.25,0.0],   # (however, they will be automatically renormalized if necessary)
                                                  'side_snd':[0.25,0.0]},
                                     'avweak': {'center_aud':[0.4,0.2], 
                                                'center_vis':[0.4,0.2], 
                                                'side_img':[0.2,0.0],
                                                'side_txt':[0.2,0.0],
                                                'side_spc':[0.2,0.0],
                                                'side_snd':[0.2,0.0]}}
        
        # probability distribution over locations, if a non-target should be presented
        self.nontarget_probabilities = {'avstrong': {'center_aud':[0.3,0.3], 
                                                  'center_vis':[0.3,0.3], 
                                                  'side_img':[0.2,0.0],
                                                  'side_txt':[0.2,0.0],
                                                  'side_spc':[0.2,0.0],
                                                  'side_snd':[0.2,0.0]},
                                     'avweak': {'center_aud':[0.3,0.1], 
                                                'center_vis':[0.3,0.1], 
                                                'side_img':[0.2,0.1],
                                                'side_txt':[0.2,0.1],
                                                'side_spc':[0.2,0.1],
                                                'side_snd':[0.2,0.1]}}
        
        # rewards and penalities for target hits/misses
        self.rewards_penalties = {'avstrong': {'center_aud':['high-gain','high-loss','low-gain','low-loss'], # this is [score-if-focused-and-hit,score-if-focused-and-missed,score-if-nonfocused-and-hit,score-if-nonfocused-and-missed] 
                                               'center_vis':['high-gain','high-loss','low-gain','low-loss'], 
                                               'side_img':['low-gain','low-loss','low-gain','low-loss'],
                                               'side_txt':['low-gain','low-loss','low-gain','low-loss'],
                                               'side_spc':['low-gain','low-loss','low-gain','low-loss'],
                                               'side_snd':['low-gain','low-loss','low-gain','low-loss']},
                                     'avweak': {'center_aud':['high-gain','high-loss','high-gain','low-loss'], 
                                                'center_vis':['high-gain','high-loss','low-gain','low-loss'], 
                                                'side_img':['low-gain','low-loss','low-gain','low-loss'],
                                                'side_txt':['low-gain','low-loss','low-gain','low-loss'],
                                                'side_spc':['low-gain','low-loss','low-gain','low-loss'],
                                                'side_snd':['low-gain','low-loss','low-gain','low-loss']}}
        
        # auditory and visual switch stimuli, in and out of modality 
        self.vis_switch_inmodality = 'switch.png'
        self.vis_switch_outmodality = 'switch-target.png'
        self.aud_switch_inmodality = 'Switch'
        self.aud_switch_outmodality = 'Hey, Switch NOW!'

        # === settings for the stimulus appearance ===
        
        # target layout for audio/visual blocks
        self.target_probability = 0.2                           # overall probability of an event being a target in the a/v condition        
        self.target_focus_prob_strong = 0.9                     # probability of a given target appearing in the focused modality, if strong separation
                                                                # (1 - this number) for a target appearing in the non-focused modality 
        self.target_focus_prob_weak = 0.6                       # probability of a given target appearing in the focused modality, if weak separation
                                                                # (1 - this number) for a target appearing in the non-focused modality
        self.prob_salient = 0.2                                 # probability that a target appears at the salient location (center)
        self.prob_side1 = 0.5                                   # probability that a target appears at the first side location (side locations may be swapped from block to block)
        self.prob_side2 = 0.3                                   # probability that a target appears a the second side location
        
        # stimulus layout for audio/visual blocks
        self.av_stimulus_interval = lambda: random.uniform(0.5,4) # inter-stimulus interval for the audio/visual condition        

        # === settings for the rest & math tasks ===
        
        self.rest_duration = lambda: random.uniform(45,75)      # the duration of the rest condition
        self.math_params = {'difficulty': 1,                    # difficulty level of the problems (determines the size of involved numbers)
                            'problem_interval': lambda: random.uniform(3,12), # delay before a new problem appears after the previous one has been solved
                            'response_timeout': 10.0,           # time within which the subject may respond to a problem           
                            'numpad_topleft': [1.1,-0.3],        # top-left corner of the numpad
                            'numpad_gridspacing': [0.21,-0.21],   # spacing of the button grid
                            'numpad_buttonsize': [1,1]          # size of the buttons
                            }

        # === settings for scoring ===

        # scoring parameters
        self.scoring_params = {'initial_score': 250,                                                    # the initial score at the beginning of the experiment
                               'score_image_params': {'scale':0.12,'pos':[-1.25,0.5,0.5],'clearafter':2},   # properties of the score image
                               'score_sound_params': {'direction':-0.7,'volume':0.3},                     # properties of the score sound source
                               'score_responses': {'high-gain':[25,'happy_star.png','xDingLing.wav'],   # [points, image, soundfile] for each of the ...
                                                   'low-gain':[5,'star.png','ding.wav'],                # ... possible scoring conditions
                                                   'low-loss':[-5,'worried_smiley.png','xBuzz01.wav'],
                                                   'high-loss':[-25,'sad_smiley.png','slap.wav']}}

        # === settings for miscellaneous parameters ===
        
        # response control
        self.response_window = 3                                # response time window in seconds
        self.response_event = 'target-response'                 # response event/message type 
        self.button_params = {'frameSize':(-3,3,-0.5,1),'pos':(-1.25,0,-0.92),'text':"Target",'scale':.1,'text_font':loader.loadFont('arial.ttf')}     # parameters of the target button
        self.voiceindicator_params = {'pos':(0,0,-0.925),'scale':0.1,'color':[1, 1, 1, 1]}                               # parameters of the voice indicator image
        self.allow_speech = False

        # misc parameters
        self.randseed = 34214                                       # initial randseed for the experiment (NOTE: should be random!)
        self.scroller_params = {'pos':[-1.8,-0.5],'width':22,'clearafter':4}   # a text box for debugging, output, etc
        self.movers_params = {'frame':[0.35,0.65,0.1,0.5],           # parameters of the moving-items process
                              'trials':500,
                              'target_probability':0}
        
        self.developer = True                                   # if true, some time-consuming instructions are skipped
Exemplo n.º 15
0
    def __init__(self):
        LatentModule.__init__(self)

        # set defaults for some configurable parameters:
        self.bci = 1.5  # this variable shall be controlled by a BCI (between 1 and 2)
Exemplo n.º 16
0
 def __init__(self):
     LatentModule.__init__(self)
Exemplo n.º 17
0
    def __init__(self):
        LatentModule.__init__(self)

        # set defaults for some configurable parameters:
        self.bci = 1.5     # this variable shall be controlled by a BCI (between 1 and 2)
Exemplo n.º 18
0
Arquivo: Sample3.py Projeto: s2t2/SNAP
 def __init__(self):
     LatentModule.__init__(self)
     
     # set defaults for some configurable parameters:
     self.speed = 2
Exemplo n.º 19
0
 def __init__(self):
     LatentModule.__init__(self)
Exemplo n.º 20
0
Arquivo: DAS1a.py Projeto: s2t2/SNAP
    def __init__(self):
        LatentModule.__init__(self)

        # === settings for the visual stimulus presenters ===

        # a center presenter (always an image)
        self.img_center_params = {
            'pos': [0, 0, 0.3],
            'clearafter': 1.5,
            'scale': 0.1
        }
        # two different left presenters - either an image or a text box, depending on block
        self.img_left_params = {
            'pos': [-1.25, 0, 0.3],
            'clearafter': 1,
            'color': [1, 1, 1, 0.1],
            'scale': 0.1
        }
        self.txt_left_params = {
            'pos': [-1.25, 0.3],
            'clearafter': 2,
            'framecolor': [0, 0, 0, 0],
            'scale': 0.1
        }
        # two different right presenters - either an image or a text box, depending on block
        self.img_right_params = {
            'pos': [1.25, 0, 0.3],
            'clearafter': 1,
            'color': [1, 1, 1, 0.1],
            'scale': 0.1
        }
        self.txt_right_params = {
            'pos': [1.25, 0.3],
            'clearafter': 2,
            'framecolor': [0, 0, 0, 0],
            'scale': 0.1
        }

        # === settings for the auditory stimulus presenters ===

        # there is a left, a right, and a center location
        self.aud_left_params = {'direction': -1}
        self.aud_right_params = {'direction': 1}
        self.aud_center_params = {'direction': 0}

        # === settings for the block design ===

        # parameters of the block configuration
        self.num_blocks = 42  # total number of blocks of the following types
        self.fraction_avstrong = 12  # audio/visual, strong separation of target probability/reward
        self.fraction_avweak = 12  # audio/visual, weak separation of target probability/reward
        self.fraction_avruminate = 12  # audio/visual with added rumination (here: math) tasks
        self.fraction_rest = 3  # rest block
        self.fraction_restmath = 3  # rest block with math tasks

        # === settings for the A/V switching design ===

        # switch layout for audio/visual blocks
        self.switches_per_block = lambda: int(random.uniform(
            3, 3))  # number of switches per a/v block (random draw), was: 7,13
        self.switches_withinmodality = 1. / 3  # probability of a within-modality switch stimulus
        self.switches_outofmodality = 1. / 3  # probability of a (salient) out-of-modality switch stimulus
        self.switches_bimodally = 1. / 3  # probability of a bimodally delivered switch stimulus
        self.av_switch_interval = lambda: random.uniform(
            25, 35
        )  # inter-switch interval for the audio/visual condition, was: 25,35
        self.switch_time = 1  # duration for which the switch instruction is being displayed

        # === settings for the stimulus material ===

        # this is formatted as follows:
        # {'type of block1 ':{'type of presenter 1': [['targets if focused',...],['nontargets if focused',...],['optional targets if not focused'],['optional nontargets if not focused']]
        #                     'type of presenter 2': [['targets if focused',...],['nontargets if focused',...],['optional targets if not focused'],['optional nontargets if not focused']]},
        #  'type of block 2':{'type of presenter 1': [['targets if focused',...],['nontargets if focused',...],['optional targets if not focused'],['optional nontargets if not focused']]
        #                     'type of presenter 2': [['targets if focused',...],['nontargets if focused',...],['optional targets if not focused'],['optional nontargets if not focused']]}}
        self.stim_material = {
            'avstrong': {
                'center_aud': [['Target.'],
                               [
                                   'nothing special', 'blah blah', 'monkey',
                                   'nothing to report'
                               ], ['TARGET!']],
                'center_vis': [['warning.png'],
                               ['onerust.png', 'tworust.png', 'threerust.png'],
                               ['salient_warning.png']],
                'side_img': [['rebel.png'],
                             ['onerust.png', 'tworust.png', 'threerust.png']],
                'side_txt': [['Target'],
                             ['Frankfurt', 'Berlin', 'Calgary', 'Barcelona']],
                'side_spc': [['Target'],
                             ['Frankfurt', 'Berlin', 'Calgary', 'Barcelona']],
                'side_snd': [['xHyprBlip.wav'], ['xClick01.wav']]
            },
            'avweak': {
                'center_aud': [['Target.'],
                               [
                                   'nothing special', 'blah blah', 'monkey',
                                   'nothing to report'
                               ]],
                'center_vis': [['warning.png'],
                               ['onerust.png', 'tworust.png',
                                'threerust.png']],
                'side_img': [['rebel.png'],
                             ['onerust.png', 'tworust.png', 'threerust.png']],
                'side_txt': [['Target'],
                             ['Frankfurt', 'Berlin', 'Calgary', 'Barcelona']],
                'side_spc': [['Target'],
                             ['Frankfurt', 'Berlin', 'Calgary', 'Barcelona']],
                'side_snd': [['xHyprBlip.wav'], ['xClick01.wav']]
            }
        }

        # probability distribution over locations, if a target should be presented
        self.target_probabilities = {
            'avstrong': {
                'center_aud': [
                    0.4, 0.1
                ],  # this is [probability-if-focused, probability-if-unfocused] 
                'center_vis': [0.4, 0.1],
                'side_img': [
                    0.25, 0.0
                ],  # note that there are 2 locations with side_* (left/right) and that usually only one set of these is active at a given time
                'side_txt': [
                    0.25, 0.0
                ],  # also note that all the focused numbers one modality plus the unfocused numbers of the other modality should add up to 1.0
                'side_spc': [
                    0.25, 0.0
                ],  # (however, they will be automatically renormalized if necessary)
                'side_snd': [0.25, 0.0]
            },
            'avweak': {
                'center_aud': [0.4, 0.2],
                'center_vis': [0.4, 0.2],
                'side_img': [0.2, 0.0],
                'side_txt': [0.2, 0.0],
                'side_spc': [0.2, 0.0],
                'side_snd': [0.2, 0.0]
            }
        }

        # probability distribution over locations, if a non-target should be presented
        self.nontarget_probabilities = {
            'avstrong': {
                'center_aud': [0.3, 0.3],
                'center_vis': [0.3, 0.3],
                'side_img': [0.2, 0.0],
                'side_txt': [0.2, 0.0],
                'side_spc': [0.2, 0.0],
                'side_snd': [0.2, 0.0]
            },
            'avweak': {
                'center_aud': [0.3, 0.1],
                'center_vis': [0.3, 0.1],
                'side_img': [0.2, 0.1],
                'side_txt': [0.2, 0.1],
                'side_spc': [0.2, 0.1],
                'side_snd': [0.2, 0.1]
            }
        }

        # rewards and penalities for target hits/misses
        self.rewards_penalties = {
            'avstrong': {
                'center_aud': [
                    'high-gain', 'high-loss', 'low-gain', 'low-loss'
                ],  # this is [score-if-focused-and-hit,score-if-focused-and-missed,score-if-nonfocused-and-hit,score-if-nonfocused-and-missed] 
                'center_vis':
                ['high-gain', 'high-loss', 'low-gain', 'low-loss'],
                'side_img': ['low-gain', 'low-loss', 'low-gain', 'low-loss'],
                'side_txt': ['low-gain', 'low-loss', 'low-gain', 'low-loss'],
                'side_spc': ['low-gain', 'low-loss', 'low-gain', 'low-loss'],
                'side_snd': ['low-gain', 'low-loss', 'low-gain', 'low-loss']
            },
            'avweak': {
                'center_aud':
                ['high-gain', 'high-loss', 'high-gain', 'low-loss'],
                'center_vis':
                ['high-gain', 'high-loss', 'low-gain', 'low-loss'],
                'side_img': ['low-gain', 'low-loss', 'low-gain', 'low-loss'],
                'side_txt': ['low-gain', 'low-loss', 'low-gain', 'low-loss'],
                'side_spc': ['low-gain', 'low-loss', 'low-gain', 'low-loss'],
                'side_snd': ['low-gain', 'low-loss', 'low-gain', 'low-loss']
            }
        }

        # auditory and visual switch stimuli, in and out of modality
        self.vis_switch_inmodality = 'switch.png'
        self.vis_switch_outmodality = 'switch-target.png'
        self.aud_switch_inmodality = 'Switch'
        self.aud_switch_outmodality = 'Hey, Switch NOW!'

        # === settings for the stimulus appearance ===

        # target layout for audio/visual blocks
        self.target_probability = 0.2  # overall probability of an event being a target in the a/v condition
        self.target_focus_prob_strong = 0.9  # probability of a given target appearing in the focused modality, if strong separation
        # (1 - this number) for a target appearing in the non-focused modality
        self.target_focus_prob_weak = 0.6  # probability of a given target appearing in the focused modality, if weak separation
        # (1 - this number) for a target appearing in the non-focused modality
        self.prob_salient = 0.2  # probability that a target appears at the salient location (center)
        self.prob_side1 = 0.5  # probability that a target appears at the first side location (side locations may be swapped from block to block)
        self.prob_side2 = 0.3  # probability that a target appears a the second side location

        # stimulus layout for audio/visual blocks
        self.av_stimulus_interval = lambda: random.uniform(
            0.5, 4)  # inter-stimulus interval for the audio/visual condition

        # === settings for the rest & math tasks ===

        self.rest_duration = lambda: random.uniform(
            45, 75)  # the duration of the rest condition
        self.math_params = {
            'difficulty':
            1,  # difficulty level of the problems (determines the size of involved numbers)
            'problem_interval': lambda: random.uniform(
                3, 12
            ),  # delay before a new problem appears after the previous one has been solved
            'response_timeout':
            10.0,  # time within which the subject may respond to a problem           
            'numpad_topleft': [1.1, -0.3],  # top-left corner of the numpad
            'numpad_gridspacing': [0.21, -0.21],  # spacing of the button grid
            'numpad_buttonsize': [1, 1]  # size of the buttons
        }

        # === settings for scoring ===

        # scoring parameters
        self.scoring_params = {
            'initial_score':
            250,  # the initial score at the beginning of the experiment
            'score_image_params': {
                'scale': 0.12,
                'pos': [-1.25, 0.5, 0.5],
                'clearafter': 2
            },  # properties of the score image
            'score_sound_params': {
                'direction': -0.7,
                'volume': 0.3
            },  # properties of the score sound source
            'score_responses': {
                'high-gain':
                [25, 'happy_star.png', 'xDingLing.wav'
                 ],  # [points, image, soundfile] for each of the ...
                'low-gain': [5, 'star.png',
                             'ding.wav'],  # ... possible scoring conditions
                'low-loss': [-5, 'worried_smiley.png', 'xBuzz01.wav'],
                'high-loss': [-25, 'sad_smiley.png', 'slap.wav']
            }
        }

        # === settings for miscellaneous parameters ===

        # response control
        self.response_window = 3  # response time window in seconds
        self.response_event = 'target-response'  # response event/message type
        self.button_params = {
            'frameSize': (-3, 3, -0.5, 1),
            'pos': (-1.25, 0, -0.92),
            'text': "Target",
            'scale': .1,
            'text_font': loader.loadFont('arial.ttf')
        }  # parameters of the target button
        self.voiceindicator_params = {
            'pos': (0, 0, -0.925),
            'scale': 0.1,
            'color': [1, 1, 1, 1]
        }  # parameters of the voice indicator image
        self.allow_speech = False

        # misc parameters
        self.randseed = 34214  # initial randseed for the experiment (NOTE: should be random!)
        self.scroller_params = {
            'pos': [-1.8, -0.5],
            'width': 22,
            'clearafter': 4
        }  # a text box for debugging, output, etc
        self.movers_params = {
            'frame': [0.35, 0.65, 0.1,
                      0.5],  # parameters of the moving-items process
            'trials': 500,
            'target_probability': 0
        }

        self.developer = True  # if true, some time-consuming instructions are skipped