class LinearlyDecreasingN(LinearlyDecreasingAttribute):
    memory_decay_rate = traits.Tuple((-1., 0.5), desc="")
    memory_decay_rate_time = traits.Float(300, desc="")

    def __init__(self, *args, **kwargs):
        super(LinearlyDecreasingAssist, self).__init__(*args, **kwargs)
        if 'memory_decay_rate' not in self.attrs:
            self.attrs.append('memory_decay_rate')
Ejemplo n.º 2
0
class LinearlyDecreasingHalfLife(LinearlyDecreasingAttribute):
    '''
    Specific case of LinearlyDecreasingAttribute for a linearly decreasing CLDA half-life
    '''
    half_life = traits.Tuple((450., 450.),
                             desc="Initial and final half life for CLDA")
    half_life_time = traits.Float(
        600, desc="Number of seconds to go from initial to final half life")

    def __init__(self, *args, **kwargs):
        super(LinearlyDecreasingHalfLife, self).__init__(*args, **kwargs)
        if 'half_life' not in self.attrs:
            self.attrs.append('half_life')
Ejemplo n.º 3
0
class LinearlyDecreasingAngAssist(LinearlyDecreasingAttribute):
    ''' 
    linearly decreasing angular assist -- for psi and ReHand
    '''
    rh_assist_level = traits.Tuple(
        (0.0, 0.0), desc='level of assist to apply to ang output')
    rh_assist_level_time = traits.Float(
        600,
        desc="Number of seconds to go from initial to minimum assist level")

    def __init__(self, *args, **kwargs):
        super(LinearlyDecreasingAngAssist, self).__init__(*args, **kwargs)
        if 'rh_assist_level' not in self.attrs:
            self.attrs.append('rh_assist_level')
Ejemplo n.º 4
0
class LinearlyDecreasingAssist(LinearlyDecreasingAttribute):
    '''
    Specific case of LinearlyDecreasingAttribute for a linearly decreasing assist parameter
    '''
    assist_level = traits.Tuple((0.0, 0.0),
                                desc="Level of assist to apply to BMI output")
    assist_level_time = traits.Float(
        600,
        desc="Number of seconds to go from initial to minimum assist level")

    def __init__(self, *args, **kwargs):
        super(LinearlyDecreasingAssist, self).__init__(*args, **kwargs)
        if 'assist_level' not in self.attrs:
            self.attrs.append('assist_level')
Ejemplo n.º 5
0
class RedGreen(Sequence, Pygame):
    status = dict(
        wait = dict(start_trial="pretrial", premature="penalty", stop=None),
        pretrial = dict(go="trial", premature="penalty"),
        trial = dict(correct="reward", timeout="penalty"),
        reward = dict(post_reward="wait"),
        penalty = dict(post_penalty="wait"),
    )

    colors = traits.Array(shape=(2, 3), value=[[255,0,0],[0,255,0]],
        desc="Tuple of colors (c1, c2) where c* = [r,g,b] between 0 and 1")
    dot_radius = traits.Int(100, desc='dot size')
    delay_range = traits.Tuple((0.5, 5.), 
        desc='delay before switching to second color will be drawn from uniform distribution in this range')

    def _while_pretrial(self):
        import pygame
        self.surf.fill(self.background)
        right = [self.next_trial[0] + 1920, self.next_trial[1]]
        ts = time.time() - self.start_time
        dotsize = (init_dot - self.dot_radius) * (shrinklen - min(ts, shrinklen)) + self.dot_radius
        if (np.mod(np.round(ts*1000),freq) < freq/2):
            pygame.draw.circle(self.surf, self.colors[0], self.next_trial, int(dotsize))
            pygame.draw.circle(self.surf, self.colors[0], right, int(dotsize))
        self.flip_wait()
    
    def _while_trial(self):
        import pygame
        self.surf.fill(self.background)
        right = [self.next_trial[0] + 1920, self.next_trial[1]]
        ts = time.time() - self.start_time
        if (np.mod(np.round(ts*1000),freq) < freq/2):
            pygame.draw.circle(self.surf, self.colors[1], self.next_trial, self.dot_radius)
            pygame.draw.circle(self.surf, self.colors[1], right, self.dot_radius)
        self.flip_wait()
    
    def _start_pretrial(self):
        self._wait_time = np.random.rand()*abs(self.delay_range[1]-self.delay_range[0]) + self.delay_range[0]
    
    def _test_correct(self, ts):
        return self.event is not None
    
    def _test_go(self, ts):
        return ts > self._wait_time + shrinklen

    def _test_premature(self, ts):
        return self.event is not None
class TestBoundary(Window):
    '''
    A very simple task that displays a marker at the specified screen locations.
    Useful for determining reasonable boundary values for targets.
    '''

    status = dict(wait=dict(stop=None))

    state = "wait"

    boundaries = traits.Tuple((-18, 18, -10, 10, -12, 12),
                              desc="x,y,z boundaries to display")

    def __init__(self, **kwargs):
        super(TestBoundary, self).__init__(**kwargs)
        # Create a small sphere for each of the 6 boundary marks
        self.xmin = Sphere(radius=.1, color=(.5, 0, .5, 1))
        self.add_model(self.xmin)
        self.xmax = Sphere(radius=.1, color=(.5, 0, .5, 1))
        self.add_model(self.xmax)
        self.ymin = Sphere(radius=.1, color=(.5, 0, .5, 1))
        self.add_model(self.ymin)
        self.ymax = Sphere(radius=.1, color=(.5, 0, .5, 1))
        self.add_model(self.ymax)
        self.zmin = Sphere(radius=.1, color=(.5, 0, .5, 1))
        self.add_model(self.zmin)
        self.zmax = Sphere(radius=.1, color=(.5, 0, .5, 1))
        self.add_model(self.zmax)

    def _start_wait(self):
        self.xmin.translate(self.boundaries[0], 0, 0, reset=True)
        self.xmin.attach()
        self.xmax.translate(self.boundaries[1], 0, 0, reset=True)
        self.xmax.attach()
        self.ymin.translate(0, self.boundaries[2], 0, reset=True)
        self.ymin.attach()
        self.ymax.translate(0, self.boundaries[3], 0, reset=True)
        self.ymax.attach()
        self.zmin.translate(0, 0, self.boundaries[4], reset=True)
        self.zmin.attach()
        self.zmax.translate(0, 0, self.boundaries[5], reset=True)
        self.zmax.attach()
        self.requeue()

    def _while_wait(self):
        self.draw_world()
Ejemplo n.º 7
0
class Autostart(traits.HasTraits):
    '''
    Automatically begins the trial from the wait state, 
    with a random interval drawn from `rand_start`
    '''
    rand_start = traits.Tuple((0., 0.), desc="Start interval")

    def _start_wait(self):
        '''
        At the start of the 'wait' state, determine how long to wait before starting the trial
        by drawing a sample from the rand_start interval
        '''
        s, e = self.rand_start
        self.wait_time = random.random() * (e - s) + s
        super(Autostart, self)._start_wait()

    def _test_start_trial(self, ts):
        '''
        Test if the required random wait time has passed
        '''
        return ts > self.wait_time and not self.pause
class MouseSpeller(LinearlyDecreasingAssist, BMILoop, Sequence, Window):
    ## Traits: runtime-configurable parameters
    reward_time = traits.Float(.5, desc="Length of juice reward")
    target_radius = traits.Float(1.3, desc="Radius of targets in cm")
    window_size = traits.Tuple((1920*2, 1080), descr='window size')
    hold_time = traits.Float(.2, desc="Length of hold required at targets")
    hold_penalty_time = traits.Float(1, desc="Length of penalty time for target hold error")
    timeout_time = traits.Float(10, desc="Time allowed to go between targets")
    timeout_penalty_time = traits.Float(1, desc="Length of penalty time for timeout error")
    max_attempts = traits.Int(10, desc='The number of attempts at a target before\
        skipping to the next one')
    max_error_penalty = traits.Float(3, desc='Max number of penalties (unrewarded backspaces) for false positive clicks')

    background = (0.5, 0.5, 0.5, 1)

    state = 'wait'

    sequence_generators = ['rand_key_seq_gen', 'mackenzie_soukoreff_corpus']

    status = dict(
        wait = dict(start_trial="target", stop=None),
        target = dict(enter_target="hold", timeout="timeout_penalty", stop=None, false_click="target"),
        hold = dict(hold_complete="targ_transition", timeout="timeout_penalty", leave_early="target"),
        targ_transition = dict(trial_complete="reward", trial_abort="wait", trial_incomplete="target"),
        timeout_penalty = dict(timeout_penalty_end="targ_transition"),
        hold_penalty = dict(hold_penalty_end="targ_transition"),
        reward = dict(reward_end="wait")
    )
    trial_end_states = ['reward', 'timeout_penalty', 'hold_penalty']

    def __init__(self, *args, **kwargs):
        kwargs['instantiate_targets'] = False
        self.plant = mouse #plants.CursorPlant() #MouseFakeKF() #plantlist[self.plant_type]
        super(MouseSpeller, self).__init__(*args, **kwargs)

        ## instantiate the keyboard targets
        n_target_per_row = [5, 10, 9, 9, 6]
        self.targets = dict()
        for key in keyboard_spec:
            targ = KeyTarget(target_radius=self.target_radius, target_color=np.array([0.25, 0.25, 0.25, 1]))
            # targ = VirtualRectangularTarget(target_width=self.target_radius*2, target_color=np.array([0.25, 0.25, 0.25, 1]))
            self.targets[key] = targ
            
            p = keyboard_spec[key].pos
            p[1] += 2
            targ.move_to_position(p)

            for model in targ.graphics_models:
                self.add_model(model)

        # Add graphics models for the plant and targets to the window
        if hasattr(self.plant, 'graphics_models'):
            for model in self.plant.graphics_models:
                self.add_model(model)

        self.current_target = None

        for attr in self.plant.hdf_attrs:
            self.add_dtype(*attr)

        self.text_output = ''

    def init(self):
        self.add_dtype('target', 'f8', (3,))
        self.add_dtype('target_index', 'i', (1,))        
        super(MouseSpeller, self).init()

    def _cycle(self):
        '''
        Calls any update functions necessary and redraws screen. Runs 60x per second.
        '''
        
        for key, targ in self.targets.items():
            in_targ = targ.pt_inside(self.plant.get_endpoint_pos())
            if in_targ and self.plant.click_:
                targ.click()
            else:
                targ.unclick()

        if not (self.current_target is None):
            self.task_data['target'] = self.current_target.get_position()
        else:
            self.task_data['target'] = np.ones(3)*np.nan

        self.task_data['target_index'] = self.target_index

        self.move_plant()

        ## Save plant status to HDF file
        plant_data = self.plant.get_data_to_save()
        for key in plant_data:
            self.task_data[key] = plant_data[key]

        super(MouseSpeller, self)._cycle()

    def move_plant(self):
        super(MouseSpeller, self).move_plant()

    def _test_enter_target(self, ts):
        '''
        return true if the distance between center of cursor and target is smaller than the cursor radius
        '''
        ent_targ = self.current_target.pt_inside(self.plant.get_endpoint_pos())
        clicked = self.plant.click_
        return ent_targ and not clicked
        
    def _test_leave_early(self, ts):
        '''
        return true if cursor moves outside the exit radius
        '''
        outside = not self.current_target.pt_inside(self.plant.get_endpoint_pos())
        if outside:
            self.target_index -= 1
        return outside

    def _record_char(self, new_char):
        new_char = self.targs[self.target_index]
        if new_char == 'space': new_char = ' '
        if new_char == 'enter': new_char = '\n'
        if new_char == 'shift': new_char = ''
        if new_char == 'backspace' and len(self.text_output) > 0:
            self.text_output = self.text_output[:-1]
        elif new_char == 'backspace':
            pass
        else:
            self.text_output += new_char


    def _test_hold_complete(self, ts):
        hold_complete = self.plant.click_ and self.current_target.pt_inside(self.plant.get_endpoint_pos())
        if hold_complete:
            self._record_char(self.targs[self.target_index])
        return hold_complete

    def update_report_stats(self):
        '''
        see experiment.Experiment.update_report_stats for docs
        '''
        super(MouseSpeller, self).update_report_stats()
        self.reportstats['Text output'] = self.text_output

    def _test_timeout(self, ts):
        return ts>self.timeout_time

    def _test_timeout_penalty_end(self, ts):
        return ts>self.timeout_penalty_time

    def _test_hold_penalty_end(self, ts):
        return ts>self.hold_penalty_time

    def _test_trial_complete(self, ts):
        return self.target_index == len(self.targs) - 1

    def _test_trial_incomplete(self, ts):
        return (not self._test_trial_complete(ts)) and (self.tries<self.max_attempts)

    def _test_trial_abort(self, ts):
        return (not self._test_trial_complete(ts)) and (self.tries==self.max_attempts)

    def _test_reward_end(self, ts):
        return ts>self.reward_time

    def _test_false_click(self, ts):
        if self.plant.click_posedge:
            for key, targ in self.targets.items():
                if targ == self.current_target:
                    return False
                in_targ = targ.pt_inside(self.plant.get_endpoint_pos())
                if in_targ:
                    # add backspace as a target
                    self.n_false_clicks_this_trial  += 1
                    if self.n_false_clicks_this_trial < self.max_error_penalty:
                        self.targs.insert(self.target_index, 'backspace')
                    
                    self.target_index -= 1
                    self.current_target.reset()
                    self._record_char(key)
                    return True
            # if the click was outside any of the targets
            return False
        else:
            return False


    #### STATE FUNCTIONS ####
    def _parse_next_trial(self):
        self.targs = list(self.next_trial)

    def _start_wait(self):
        super(MouseSpeller, self)._start_wait()
        self.n_false_clicks_this_trial = 0
        self.tries = 0
        self.target_index = -1
        #hide targets
        for key in self.targets:
            self.targets[key].reset()

        #get target locations for this trial
        self._parse_next_trial()
        # self.chain_length = len(self.targs)

    def _start_target(self):
        self.target_index += 1

        #move a target to current location (target1 and target2 alternate moving) and set location attribute
        target = self.targets[self.targs[self.target_index]]
        self.current_target = target
        # self.target_location = target.pos
        target.cue_trial_start()

    def _end_hold(self):
        # change current target color to green
        self.current_target.reset()

    def _start_hold_penalty(self):
        #hide targets
        for key in self.targets:
            self.targets[key].reset()

        self.tries += 1
        self.target_index = -1
    
    def _start_timeout_penalty(self):
        #hide targets
        for key in self.targets:
            self.targets[key].reset()

        self.tries += 1
        self.target_index = -1

    def _start_targ_transition(self):
        self.current_target.reset()

    def _start_reward(self):
        super(MouseSpeller, self)._start_reward()
        self.current_target.cue_trial_end_success()

    @staticmethod
    def rand_key_seq_gen(length=1000, seq_len=2):
        key_sequences = []
        keys = keyboard_spec.keys()
        n_keys = len(keys)

        for k in range(length):
            inds = np.random.randint(0, n_keys, seq_len)
            key_sequences.append([keys[m] for m in inds])

        return key_sequences

    @staticmethod
    def mackenzie_soukoreff_corpus(length=1000):
        trials = []
        fh = open('/storage/task_data/MouseSpeller/phrases2.txt')

        missing_chars = []
        for line in fh:
            for char in line:
                if char == ' ':

                    char = 'space'
                elif char in ['\n', '\r']:
                    char = 'enter'

                if char in keyboard_spec:
                    trials.append([char])
                elif chr(ord(char) + (ord('a') - ord('A'))) in keyboard_spec:
                    char = chr(ord(char) + (ord('a') - ord('A')))
                    trials.append(['shift', char])
                elif char not in keyboard_spec:
                    missing_chars.append(char)
        print "missing_chars", missing_chars
        return trials

    def create_assister(self):
        self.assister = FeedbackControllerAssist(mouse_motion_model, style='mixing')

    def create_goal_calculator(self):
        self.goal_calculator = goal_calculators.ZeroVelocityGoal(self.decoder.ssm)

    def get_target_BMI_state(self, *args):
        '''
        Run the goal calculator to determine the target state of the task
        '''
        if self.current_target is None:
            target_pos = np.zeros(2)
        else:
            target_pos = self.current_target.get_position()[[0,2]]
        opt_click_state = float(self.state == 'hold')
        target_pos_state = np.hstack([target_pos, opt_click_state])
        data, solution_updated = self.goal_calculator(target_pos_state)
        target_state, error = data
        return np.tile(np.array(target_state).reshape(-1,1), [1, self.decoder.n_subbins])
class LFP_Mod(BMILoop, Sequence, Window):

    background = (0,0,0,1)
    
    plant_visible = traits.Bool(True, desc='Specifies whether entire plant is displayed or just endpoint')
    
    lfp_cursor_rad = traits.Float(.5, desc="length of LFP cursor")
    lfp_cursor_color = (.5,0,.5,.75)  
     
    lfp_plant_type_options = plantlist.keys()
    lfp_plant_type = traits.OptionsList(*plantlist, bmi3d_input_options=plantlist.keys())

    window_size = traits.Tuple((1920*2, 1080), desc='window size')

    lfp_frac_lims = traits.Tuple((0., 0.35), desc='fraction limits')
    xlfp_frac_lims = traits.Tuple((-.7, 1.7), desc = 'x dir fraction limits')
    lfp_control_band = traits.Tuple((25, 40), desc='beta power band limits')
    lfp_totalpw_band = traits.Tuple((1, 100), desc='total power band limits')
    xlfp_control_band = traits.Tuple((0, 5), desc = 'x direction band limits')
    n_steps = traits.Int(2, desc='moving average for decoder')


    powercap = traits.Float(1, desc="Timeout for total power above this")

    zboundaries=(-12,12)

    status = dict(
        wait = dict(start_trial="lfp_target", stop=None),
        lfp_target = dict(enter_lfp_target="lfp_hold", powercap_penalty="powercap_penalty", stop=None),
        lfp_hold = dict(leave_early="lfp_target", lfp_hold_complete="reward", powercap_penalty="powercap_penalty"),
        powercap_penalty = dict(powercap_penalty_end="lfp_target"),
        reward = dict(reward_end="wait")
        )

    static_states = [] # states in which the decoder is not run
    trial_end_states = ['reward']
    lfp_cursor_on = ['lfp_target', 'lfp_hold']

    #initial state
    state = "wait"

    #create settable traits
    reward_time = traits.Float(.5, desc="Length of juice reward")

    lfp_target_rad = traits.Float(3.6, desc="Length of targets in cm")
    
    lfp_hold_time = traits.Float(.2, desc="Length of hold required at lfp targets")
    lfp_hold_var = traits.Float(.05, desc="Length of hold variance required at lfp targets")

    hold_penalty_time = traits.Float(1, desc="Length of penalty time for target hold error")
    
    powercap_penalty_time = traits.Float(1, desc="Length of penalty time for timeout error")

    # max_attempts = traits.Int(10, desc='The number of attempts at a target before\
    #     skipping to the next one')

    session_length = traits.Float(0, desc="Time until task automatically stops. Length of 0 means no auto stop.")

    #plant_hide_rate = traits.Float(0.0, desc='If the plant is visible, specifies a percentage of trials where it will be hidden')
    lfp_target_color = (123/256.,22/256.,201/256.,.5)
    mc_target_color = (1,0,0,.5)

    target_index = -1 # Helper variable to keep track of which target to display within a trial
    #tries = 0 # Helper variable to keep track of the number of failed attempts at a given trial.
    
    cursor_visible = False # Determines when to hide the cursor.
    no_data_count = 0 # Counter for number of missing data frames in a row
    
    sequence_generators = ['lfp_mod_4targ']
    
    def __init__(self, *args, **kwargs):
        super(LFP_Mod, self).__init__(*args, **kwargs)
        self.cursor_visible = True

        print 'INIT FRAC LIMS: ', self.lfp_frac_lims
        
        dec_params = dict(lfp_frac_lims = self.lfp_frac_lims,
                          xlfp_frac_lims = self.xlfp_frac_lims,
                          powercap = self.powercap,
                          zboundaries = self.zboundaries,
                          lfp_control_band = self.lfp_control_band,
                          lfp_totalpw_band = self.lfp_totalpw_band,
                          xlfp_control_band = self.xlfp_control_band,
                          n_steps = self.n_steps)

        self.decoder.filt.init_from_task(**dec_params)
        self.decoder.init_from_task(**dec_params)

        self.lfp_plant = plantlist[self.lfp_plant_type]
        if self.lfp_plant_type == 'inv_cursor_onedimLFP':
            print 'MAKE SURE INVERSE GENERATOR IS ON'
            
        self.plant_vis_prev = True

        self.current_assist_level = 0
        self.learn_flag = False

        if hasattr(self.lfp_plant, 'graphics_models'):
            for model in self.lfp_plant.graphics_models:
                self.add_model(model)

        # Instantiate the targets
        ''' 
        height and width on kinarm machine are 2.4. Here we make it 2.4/8*12 = 3.6
        '''
        lfp_target = VirtualSquareTarget(target_radius=self.lfp_target_rad, target_color=self.lfp_target_color)
        self.targets = [lfp_target]
        
        # Initialize target location variable
        self.target_location_lfp = np.array([-100, -100, -100])

        # Declare any plant attributes which must be saved to the HDF file at the _cycle rate
        for attr in self.lfp_plant.hdf_attrs:
            self.add_dtype(*attr) 

    def init(self):
        self.plant = DummyPlant()
        self.add_dtype('lfp_target', 'f8', (3,)) 
        self.add_dtype('target_index', 'i', (1,))
        self.add_dtype('powercap_flag', 'i',(1,))

        for target in self.targets:
            for model in target.graphics_models:
                self.add_model(model)

        super(LFP_Mod, self).init()

    def _cycle(self):
        '''
        Calls any update functions necessary and redraws screen. Runs 60x per second.
        '''
        self.task_data['loop_time'] = self.iter_time()
        self.task_data['lfp_target'] = self.target_location_lfp.copy()
        self.task_data['target_index'] = self.target_index
        #self.task_data['internal_decoder_state'] = self.decoder.filt.current_lfp_pos
        self.task_data['powercap_flag'] = self.decoder.filt.current_powercap_flag

        self.move_plant()

        ## Save plant status to HDF file, ###ADD BACK
        lfp_plant_data = self.lfp_plant.get_data_to_save()
        for key in lfp_plant_data:
            self.task_data[key] = lfp_plant_data[key]

        super(LFP_Mod, self)._cycle()

    def move_plant(self):
        feature_data = self.get_features()

        # Save the "neural features" (e.g. spike counts vector) to HDF file
        for key, val in feature_data.items():
            self.task_data[key] = val
        Bu = None
        assist_weight = 0
        target_state = np.zeros([self.decoder.n_states, self.decoder.n_subbins])

        ## Run the decoder
        if self.state not in self.static_states:
            neural_features = feature_data[self.extractor.feature_type]
            self.call_decoder(neural_features, target_state, Bu=Bu, assist_level=assist_weight, feature_type=self.extractor.feature_type)

        ## Drive the plant to the decoded state, if permitted by the constraints of the plant
        self.lfp_plant.drive(self.decoder)
        self.task_data['decoder_state'] = decoder_state = self.decoder.get_state(shape=(-1,1))
        return decoder_state     

    def run(self):
        '''
        See experiment.Experiment.run for documentation. 
        '''
        # Fire up the plant. For virtual/simulation plants, this does little/nothing.
        self.lfp_plant.start()
        try:
            super(LFP_Mod, self).run()
        finally:
            self.lfp_plant.stop()

    ##### HELPER AND UPDATE FUNCTIONS ####
    def update_cursor_visibility(self):
        ''' Update cursor visible flag to hide cursor if there has been no good data for more than 3 frames in a row'''
        prev = self.cursor_visible
        if self.no_data_count < 3:
            self.cursor_visible = True
            if prev != self.cursor_visible:
                self.show_object(self.cursor, show=True)
        else:
            self.cursor_visible = False
            if prev != self.cursor_visible:
                self.show_object(self.cursor, show=False)

    def update_report_stats(self):
        '''
        see experiment.Experiment.update_report_stats for docs
        '''
        super(LFP_Mod, self).update_report_stats()
        self.reportstats['Trial #'] = self.calc_trial_num()
        self.reportstats['Reward/min'] = np.round(self.calc_events_per_min('reward', 120), decimals=2)

    #### TEST FUNCTIONS ####
    def _test_powercap_penalty(self, ts):
        if self.decoder.filt.current_powercap_flag:
            #Turn off power cap flag:
            self.decoder.filt.current_powercap_flag = 0
            return True
        else:
            return False


    def _test_enter_lfp_target(self, ts):
        '''
        return true if the distance between center of cursor and target is smaller than the cursor radius in the x and z axis only
        '''
        cursor_pos = self.lfp_plant.get_endpoint_pos()
        dx = np.linalg.norm(cursor_pos[0] - self.target_location_lfp[0])
        dz = np.linalg.norm(cursor_pos[2] - self.target_location_lfp[2])
        in_targ = False
        if dx<= (self.lfp_target_rad/2.) and dz<= (self.lfp_target_rad/2.):
            in_targ = True

        return in_targ

        # #return d <= (self.lfp_target_rad - self.lfp_cursor_rad)

        # #If center of cursor enters target at all: 
        # return d <= (self.lfp_target_rad/2.)

        # #New version: 
        # cursor_pos = self.lfp_plant.get_endpoint_pos()
        # d = np.linalg.norm(cursor_pos[2] - self.target_location_lfp[2])
        # d <= (self.lfp_target_rad - self.lfp_cursor_rad)
        
    def _test_leave_early(self, ts):
        '''
        return true if cursor moves outside the exit radius
        '''
        cursor_pos = self.lfp_plant.get_endpoint_pos()
        dx = np.linalg.norm(cursor_pos[0] - self.target_location_lfp[0])
        dz = np.linalg.norm(cursor_pos[2] - self.target_location_lfp[2])
        out_of_targ = False
        if dx > (self.lfp_target_rad/2.) or dz > (self.lfp_target_rad/2.):
            out_of_targ = True
        #rad = self.lfp_target_rad - self.lfp_cursor_rad
        #return d > rad
        return out_of_targ

    def _test_lfp_hold_complete(self, ts):
        return ts>=self.lfp_hold_time_plus_var

    # def _test_lfp_timeout(self, ts):
    #     return ts>self.timeout_time

    def _test_powercap_penalty_end(self, ts):
        if ts>self.powercap_penalty_time:
            self.lfp_plant.turn_on()

        return ts>self.powercap_penalty_time

    def _test_reward_end(self, ts):
        return ts>self.reward_time

    def _test_stop(self, ts):
        if self.session_length > 0 and (self.get_time() - self.task_start_time) > self.session_length:
            self.end_task()
        return self.stop

    #### STATE FUNCTIONS ####
    def _parse_next_trial(self):
        self.targs = self.next_trial
        
    def _start_wait(self):
        super(LFP_Mod, self)._start_wait()
        self.tries = 0
        self.target_index = -1
        #hide targets
        for target in self.targets:
            target.hide()

        #get target locations for this trial
        self._parse_next_trial()
        self.chain_length = 1
        self.lfp_hold_time_plus_var = self.lfp_hold_time + np.random.uniform(low=-1,high=1)*self.lfp_hold_var

    def _start_lfp_target(self):
        self.target_index += 1
        self.target_index = 0

        #only 1 target: 
        target = self.targets[0]
        self.target_location_lfp = self.targs #Just one target. 
        
        target.move_to_position(self.target_location_lfp)
        target.cue_trial_start()

    def _start_lfp_hold(self):
        #make next target visible unless this is the final target in the trial
        idx = (self.target_index + 1)
        if idx < self.chain_length: 
            target = self.targets[idx % 2]
            target.move_to_position(self.targs[idx])
    
    def _end_lfp_hold(self):
        # change current target color to green
        self.targets[self.target_index % 2].cue_trial_end_success()
    
    def _start_timeout_penalty(self):
        #hide targets
        for target in self.targets:
            target.hide()

        self.tries += 1
        self.target_index = -1

    def _start_reward(self):
        super(LFP_Mod, self)._start_reward()
        #self.targets[self.target_index % 2].show()

    def _start_powercap_penalty(self):
        for target in self.targets:
            target.hide()
        self.lfp_plant.turn_off()

    @staticmethod
    def lfp_mod_4targ(nblocks=100, boundaries=(-18,18,-12,12), xaxis=-8):
        '''Mimics beta modulation task from Kinarm Rig:

        In Kinarm rig, the following linear transformations happen: 
            1. LFP cursor is calculated
            2. mapped from fraction limits [0, .35] to [-1, 1] (unit_coordinates)
            3. udp sent to kinarm machine and multiplied by 8
            4. translated upward in the Y direction by + 2.5

        This means, our targets which are at -8, [-0.75, 2.5, 5.75, 9.0]
        must be translated down by 2.5 to: -8, [-3.25,  0.  ,  3.25,  6.5]
        then divided by 8: -1, [-0.40625,  0.     ,  0.40625,  0.8125 ] in unit_coordinates

        The radius is 1.2, which is 0.15 in unit_coordinates

        Now, we map this to a new system: 
        - new_zero: (y1+y2) / 2
        - new_scale: (y2 - y1) / 2

         (([-0.40625,  0.     ,  0.40625,  0.8125 ]) * new_scale ) + new_zero
        
        new_zero = 0
        new_scale = 12

        12 * [-0.40625,  0.     ,  0.40625,  0.8125 ] 

        = array([-4.875,  0.   ,  4.875,  9.75 ])

        '''

        new_zero = (boundaries[3]+boundaries[2]) / 2.
        new_scale = (boundaries[3] - boundaries[2]) / 2.

        kin_targs = np.array([-0.40625,  0.     ,  0.40625,  0.8125 ])

        lfp_targ_y = (new_scale*kin_targs) + new_zero

        for i in range(nblocks):
            temp = lfp_targ_y.copy()
            np.random.shuffle(temp)
            if i==0:
                z = temp.copy()
            else:
                z = np.hstack((z, temp))

        #Fixed X axis: 
        x = np.tile(xaxis,(nblocks*4))
        y = np.zeros(nblocks*4)
        
        pairs = np.vstack([x, y, z]).T
        return pairs
class ApproachAvoidanceTask(Sequence, Window):
    '''
    This is for a free-choice task with two targets (left and right) presented to choose from.  
    The position of the targets may change along the x-axis, according to the target generator, 
    and each target has a varying probability of reward, also according to the target generator.
    The code as it is written is for a joystick.  

    Notes: want target_index to only write once per trial.  if so, can make instructed trials random.  else, make new state for instructed trial.
    '''

    background = (0,0,0,1)
    shoulder_anchor = np.array([2., 0., -15.]) # Coordinates of shoulder anchor point on screen
    
    arm_visible = traits.Bool(True, desc='Specifies whether entire arm is displayed or just endpoint')
    
    cursor_radius = traits.Float(.5, desc="Radius of cursor")
    cursor_color = (.5,0,.5,1)

    joystick_method = traits.Float(1,desc="1: Normal velocity, 0: Position control")
    joystick_speed = traits.Float(20, desc="Speed of cursor")

    plant_type_options = plantlist.keys()
    plant_type = traits.OptionsList(*plantlist, bmi3d_input_options=plantlist.keys())
    starting_pos = (5, 0, 5)
    # window_size = (1280*2, 1024)
    window_size = traits.Tuple((1366*2, 768), desc='window size')
    

    status = dict(
        #wait = dict(start_trial="target", stop=None),
        wait = dict(start_trial="center", stop=None),
        center = dict(enter_center="hold_center", timeout="timeout_penalty", stop=None),
        hold_center = dict(leave_early_center = "hold_penalty",hold_center_complete="target", timeout="timeout_penalty", stop=None),
        target = dict(enter_targetL="hold_targetL", enter_targetH = "hold_targetH", timeout="timeout_penalty", stop=None),
        hold_targetR = dict(leave_early_R="hold_penalty", hold_complete="targ_transition"),
        hold_targetL = dict(leave_early_L="hold_penalty", hold_complete="targ_transition"),
        targ_transition = dict(trial_complete="check_reward",trial_abort="wait", trial_incomplete="center"),
        check_reward = dict(avoid="reward",approach="reward_and_airpuff"),
        timeout_penalty = dict(timeout_penalty_end="targ_transition"),
        hold_penalty = dict(hold_penalty_end="targ_transition"),
        reward = dict(reward_end="wait"),
        reward_and_airpuff = dict(reward_and_airpuff_end="wait"),
    )
    #
    target_color = (.5,1,.5,0)

    #initial state
    state = "wait"

    #create settable traits
    reward_time_avoid = traits.Float(.2, desc="Length of juice reward for avoid decision")
    reward_time_approach_min = traits.Float(.2, desc="Min length of juice for approach decision")
    reward_time_approach_max = traits.Float(.8, desc="Max length of juice for approach decision")
    target_radius = traits.Float(1.5, desc="Radius of targets in cm")
    block_length = traits.Float(100, desc="Number of trials per block")  
    
    hold_time = traits.Float(.5, desc="Length of hold required at targets")
    hold_penalty_time = traits.Float(1, desc="Length of penalty time for target hold error")
    timeout_time = traits.Float(10, desc="Time allowed to go between targets")
    timeout_penalty_time = traits.Float(1, desc="Length of penalty time for timeout error")
    max_attempts = traits.Int(10, desc='The number of attempts at a target before\
        skipping to the next one')
    session_length = traits.Float(0, desc="Time until task automatically stops. Length of 0 means no auto stop.")
    marker_num = traits.Int(14, desc="The index of the motiontracker marker to use for cursor position")
   
    arm_hide_rate = traits.Float(0.0, desc='If the arm is visible, specifies a percentage of trials where it will be hidden')
    target_index = 0 # Helper variable to keep track of whether trial is instructed (1 = 1 choice) or free-choice (2 = 2 choices)
    target_selected = 'L'   # Helper variable to indicate which target was selected
    tries = 0 # Helper variable to keep track of the number of failed attempts at a given trial.
    timedout = False    # Helper variable to keep track if transitioning from timeout_penalty
    reward_counter = 0.0
    cursor_visible = False # Determines when to hide the cursor.
    no_data_count = 0 # Counter for number of missing data frames in a row
    scale_factor = 3.0 #scale factor for converting hand movement to screen movement (1cm hand movement = 3.5cm cursor movement)
    starting_dist = 10.0 # starting distance from center target
    #color_targets = np.random.randint(2)
    color_targets = 1   # 0: yellow low, blue high; 1: blue low, yellow high
    stopped_center_hold = False   #keep track if center hold was released early
    
    limit2d = 1

    color1 = target_colors['purple']  			# approach color
    color2 = target_colors['lightsteelblue']  	# avoid color
    reward_color = target_colors['green'] 		# color of reward bar
    airpuff_color = target_colors['red']		# color of airpuff bar

    sequence_generators = ['colored_targets_with_probabilistic_reward','block_probabilistic_reward','colored_targets_with_randomwalk_reward','randomwalk_probabilistic_reward']
    
    def __init__(self, *args, **kwargs):
        super(ApproachAvoidanceTask, self).__init__(*args, **kwargs)
        self.cursor_visible = True

        # Add graphics models for the plant and targets to the window

        self.plant = plantlist[self.plant_type]
        self.plant_vis_prev = True

        # Add graphics models for the plant and targets to the window
        if hasattr(self.plant, 'graphics_models'):
            for model in self.plant.graphics_models:
                self.add_model(model)

        self.current_pt=np.zeros([3]) #keep track of current pt
        self.last_pt=np.zeros([3]) #kee
        ## Declare cursor
        #self.dtype.append(('cursor', 'f8', (3,)))
        if 0: #hasattr(self.arm, 'endpt_cursor'):
            self.cursor = self.arm.endpt_cursor
        else:
            self.cursor = Sphere(radius=self.cursor_radius, color=self.cursor_color)
            self.add_model(self.cursor)
            self.cursor.translate(*self.get_arm_endpoint(), reset=True) 

        ## Instantiate the targets. Target 1 is center target, Target H is target with high probability of reward, Target L is target with low probability of reward.
        self.target1 = Sphere(radius=self.target_radius, color=self.target_color)           # center target
        self.add_model(self.target1)
        self.targetR = Sphere(radius=self.target_radius, color=self.target_color)           # left target
        self.add_model(self.targetH)
        self.targetL = Sphere(radius=self.target_radius, color=self.target_color)           # right target
        self.add_model(self.targetL)

        ###STOPPED HERE: should define Rect target here and then adapt length during task. Also, 
        ### be sure to change all targetH instantiations to targetR.

        # Initialize target location variable. 
        self.target_location1 = np.array([0,0,0])
        self.target_locationR = np.array([-self.starting_dist,0,0])
        self.target_locationL = np.array([self.starting_dist,0,0])

        self.target1.translate(*self.target_location1, reset=True)
        self.targetH.translate(*self.target_locationR, reset=True)
        self.targetL.translate(*self.target_locationL, reset=True)

        # Initialize colors for high probability and low probability target.  Color will not change.
        self.targetH.color = self.color_targets*self.color1 + (1 - self.color_targets)*self.color2 # high is magenta if color_targets = 1, juicyorange otherwise
        self.targetL.color = (1 - self.color_targets)*self.color1 + self.color_targets*self.color2

        #set target colors 
        self.target1.color = (1,0,0,.5)      # center target red
        
        
        # Initialize target location variable
        self.target_location = np.array([0, 0, 0])

        # Declare any plant attributes which must be saved to the HDF file at the _cycle rate
        for attr in self.plant.hdf_attrs:
            self.add_dtype(*attr)  


    def init(self):
        self.add_dtype('targetR', 'f8', (3,))
        self.add_dtype('targetL','f8', (3,))
        self.add_dtype('reward_scheduleR','f8', (1,))
        self.add_dtype('reward_scheduleL','f8', (1,)) 
        self.add_dtype('target_index', 'i', (1,))
        super(ApproachAvoidanceTask, self).init()
        self.trial_allocation = np.zeros(1000)

    def _cycle(self):
        ''' Calls any update functions necessary and redraws screen. Runs 60x per second. '''

        ## Run graphics commands to show/hide the arm if the visibility has changed
        if self.plant_type != 'cursor_14x14':
            if self.arm_visible != self.arm_vis_prev:
                self.arm_vis_prev = self.arm_visible
                self.show_object(self.arm, show=self.arm_visible)

        self.move_arm()
        #self.move_plant()

        ## Save plant status to HDF file
        plant_data = self.plant.get_data_to_save()
        for key in plant_data:
            self.task_data[key] = plant_data[key]

        self.update_cursor()

        if self.plant_type != 'cursor_14x14':
            self.task_data['joint_angles'] = self.get_arm_joints()

        super(ApproachAvoidanceTask, self)._cycle()
        
    ## Plant functions
    def get_cursor_location(self):
        # arm returns it's position as if it was anchored at the origin, so have to translate it to the correct place
        return self.get_arm_endpoint()

    def get_arm_endpoint(self):
        return self.plant.get_endpoint_pos() 

    def set_arm_endpoint(self, pt, **kwargs):
        self.plant.set_endpoint_pos(pt, **kwargs) 

    def set_arm_joints(self, angles):
        self.arm.set_intrinsic_coordinates(angles)

    def get_arm_joints(self):
        return self.arm.get_intrinsic_coordinates()

    def update_cursor(self):
        '''
        Update the cursor's location and visibility status.
        '''
        pt = self.get_cursor_location()
        self.update_cursor_visibility()
        if pt is not None:
            self.move_cursor(pt)

    def move_cursor(self, pt):
        ''' Move the cursor object to the specified 3D location. '''
        # if not hasattr(self.arm, 'endpt_cursor'):
        self.cursor.translate(*pt[:3],reset=True)

    ##    


    ##### HELPER AND UPDATE FUNCTIONS ####

    def move_arm(self):
        ''' Returns the 3D coordinates of the cursor. For manual control, uses
        joystick data. If no joystick data available, returns None'''

        pt = self.joystick.get()
        if len(pt) > 0:
            pt = pt[-1][0]
            pt[0]=1-pt[0]; #Switch L / R axes
            calib = [0.497,0.517] #Sometimes zero point is subject to drift this is the value of the incoming joystick when at 'rest' 

            if self.joystick_method==0:                
                pos = np.array([(pt[0]-calib[0]), 0, calib[1]-pt[1]])
                pos[0] = pos[0]*36
                pos[2] = pos[2]*24
                self.current_pt = pos

            elif self.joystick_method==1:
                vel=np.array([(pt[0]-calib[0]), 0, calib[1]-pt[1]])
                epsilon = 2*(10**-2) #Define epsilon to stabilize cursor movement
                if sum((vel)**2) > epsilon:
                    self.current_pt=self.last_pt+self.joystick_speed*vel*(1/60) #60 Hz update rate, dt = 1/60
                else:
                    self.current_pt = self.last_pt

                if self.current_pt[0] < -25: self.current_pt[0] = -25
                if self.current_pt[0] > 25: self.current_pt[0] = 25
                if self.current_pt[-1] < -14: self.current_pt[-1] = -14
                if self.current_pt[-1] > 14: self.current_pt[-1] = 14

            self.set_arm_endpoint(self.current_pt)
            self.last_pt = self.current_pt.copy()

    def convert_to_cm(self, val):
        return val/10.0

    def update_cursor_visibility(self):
        ''' Update cursor visible flag to hide cursor if there has been no good data for more than 3 frames in a row'''
        prev = self.cursor_visible
        if self.no_data_count < 3:
            self.cursor_visible = True
            if prev != self.cursor_visible:
            	self.show_object(self.cursor, show=True)
            	self.requeue()
        else:
            self.cursor_visible = False
            if prev != self.cursor_visible:
            	self.show_object(self.cursor, show=False)
            	self.requeue()

    def calc_n_successfultrials(self):
        trialendtimes = np.array([state[1] for state in self.state_log if state[0]=='check_reward'])
        return len(trialendtimes)

    def calc_n_rewards(self):
        rewardtimes = np.array([state[1] for state in self.state_log if state[0]=='reward'])
        return len(rewardtimes)

    def calc_trial_num(self):
        '''Calculates the current trial count: completed + aborted trials'''
        trialtimes = [state[1] for state in self.state_log if state[0] in ['wait']]
        return len(trialtimes)-1

    def calc_targetH_num(self):
        '''Calculates the total number of times the high-value target was selected'''
        trialtimes = [state[1] for state in self.state_log if state[0] in ['hold_targetH']]
        return len(trialtimes) - 1

    def calc_rewards_per_min(self, window):
        '''Calculates the Rewards/min for the most recent window of specified number of seconds in the past'''
        rewardtimes = np.array([state[1] for state in self.state_log if state[0]=='reward'])
        if (self.get_time() - self.task_start_time) < window:
            divideby = (self.get_time() - self.task_start_time)/sec_per_min
        else:
            divideby = window/sec_per_min
        return np.sum(rewardtimes >= (self.get_time() - window))/divideby

    def calc_success_rate(self, window):
        '''Calculates the rewarded trials/initiated trials for the most recent window of specified length in sec'''
        trialtimes = np.array([state[1] for state in self.state_log if state[0] in ['reward', 'timeout_penalty', 'hold_penalty']])
        rewardtimes = np.array([state[1] for state in self.state_log if state[0]=='reward'])
        if len(trialtimes) == 0:
            return 0.0
        else:
            return float(np.sum(rewardtimes >= (self.get_time() - window)))/np.sum(trialtimes >= (self.get_time() - window))

    def update_report_stats(self):
        '''Function to update any relevant report stats for the task. Values are saved in self.reportstats,
        an ordered dictionary. Keys are strings that will be displayed as the label for the stat in the web interface,
        values can be numbers or strings. Called every time task state changes.'''
        super(ApproachAvoidanceTask, self).update_report_stats()
        self.reportstats['Trial #'] = self.calc_trial_num()
        self.reportstats['Reward/min'] = np.round(self.calc_rewards_per_min(120),decimals=2)
        self.reportstats['High-value target selections'] = self.calc_targetH_num()
        #self.reportstats['Success rate'] = str(np.round(self.calc_success_rate(120)*100.0,decimals=2)) + '%'
        start_time = self.state_log[0][1]
        rewardtimes=np.array([state[1] for state in self.state_log if state[0]=='reward'])
        if len(rewardtimes):
            rt = rewardtimes[-1]-start_time
        else:
            rt= np.float64("0.0")

        sec = str(np.int(np.mod(rt,60)))
        if len(sec) < 2:
            sec = '0'+sec
        self.reportstats['Time Of Last Reward'] = str(np.int(np.floor(rt/60))) + ':' + sec



    #### TEST FUNCTIONS ####
    def _test_enter_center(self, ts):
        #return true if the distance between center of cursor and target is smaller than the cursor radius

        d = np.sqrt((self.cursor.xfm.move[0]-self.target_location1[0])**2 + (self.cursor.xfm.move[1]-self.target_location1[1])**2 + (self.cursor.xfm.move[2]-self.target_location1[2])**2)
        #print 'TARGET SELECTED', self.target_selected
        return d <= self.target_radius - self.cursor_radius

    def _test_enter_targetL(self, ts):
        if self.target_index == 1 and self.LH_target_on[0]==0:
            #return false if instructed trial and this target is not on
            return False
        else:
            #return true if the distance between center of cursor and target is smaller than the cursor radius
            d = np.sqrt((self.cursor.xfm.move[0]-self.target_locationL[0])**2 + (self.cursor.xfm.move[1]-self.target_locationL[1])**2 + (self.cursor.xfm.move[2]-self.target_locationL[2])**2)
            self.target_selected = 'L'
            #print 'TARGET SELECTED', self.target_selected
            return d <= self.target_radius - self.cursor_radius

    def _test_enter_targetH(self, ts):
        if self.target_index ==1 and self.LH_target_on[1]==0:
            return False
        else:
            #return true if the distance between center of cursor and target is smaller than the cursor radius
            d = np.sqrt((self.cursor.xfm.move[0]-self.target_locationH[0])**2 + (self.cursor.xfm.move[1]-self.target_locationH[1])**2 + (self.cursor.xfm.move[2]-self.target_locationH[2])**2)
            self.target_selected = 'H'
            #print 'TARGET SELECTED', self.target_selected
            return d <= self.target_radius - self.cursor_radius
    def _test_leave_early_center(self, ts):
        # return true if cursor moves outside the exit radius (gives a bit of slack around the edge of target once cursor is inside)
        d = np.sqrt((self.cursor.xfm.move[0]-self.target_location1[0])**2 + (self.cursor.xfm.move[1]-self.target_location1[1])**2 + (self.cursor.xfm.move[2]-self.target_location1[2])**2)
        rad = self.target_radius - self.cursor_radius
        return d > rad

    def _test_leave_early_L(self, ts):
        # return true if cursor moves outside the exit radius (gives a bit of slack around the edge of target once cursor is inside)
        d = np.sqrt((self.cursor.xfm.move[0]-self.target_locationL[0])**2 + (self.cursor.xfm.move[1]-self.target_locationL[1])**2 + (self.cursor.xfm.move[2]-self.target_locationL[2])**2)
        rad = self.target_radius - self.cursor_radius
        return d > rad

    def _test_leave_early_H(self, ts):
        # return true if cursor moves outside the exit radius (gives a bit of slack around the edge of target once cursor is inside)
        d = np.sqrt((self.cursor.xfm.move[0]-self.target_locationH[0])**2 + (self.cursor.xfm.move[1]-self.target_locationH[1])**2 + (self.cursor.xfm.move[2]-self.target_locationH[2])**2)
        rad = self.target_radius - self.cursor_radius
        return d > rad

    def _test_hold_center_complete(self, ts):
        return ts>=self.hold_time
    
    def _test_hold_complete(self, ts):
        return ts>=self.hold_time

    def _test_timeout(self, ts):
        return ts>self.timeout_time

    def _test_timeout_penalty_end(self, ts):
        return ts>self.timeout_penalty_time

    def _test_hold_penalty_end(self, ts):
        return ts>self.hold_penalty_time

    def _test_trial_complete(self, ts):
        #return self.target_index==self.chain_length-1
        return not self.timedout

    def _test_trial_incomplete(self, ts):
        return (not self._test_trial_complete(ts)) and (self.tries<self.max_attempts)

    def _test_trial_abort(self, ts):
        return (not self._test_trial_complete(ts)) and (self.tries==self.max_attempts)

    def _test_yes_reward(self,ts):
        if self.target_selected == 'H':
            #reward_assigned = self.targs[0,1]
            reward_assigned = self.rewardH
        else:
            #reward_assigned = self.targs[1,1]
            reward_assigned = self.rewardL
        if self.reward_SmallLarge==1:
            self.reward_time = reward_assigned*self.reward_time_large + (1 - reward_assigned)*self.reward_time_small   # update reward time if using Small/large schedule
            reward_assigned = 1    # always rewarded
        return bool(reward_assigned)

    def _test_no_reward(self,ts):
        if self.target_selected == 'H':
            #reward_assigned = self.targs[0,1]
            reward_assigned = self.rewardH
        else:
            #reward_assigned = self.targs[1,1]
            reward_assigned = self.rewardL
        if self.reward_SmallLarge==True:
            self.reward_time = reward_assigned*self.reward_time_large + (1 - reward_assigned)*self.reward_time_small   # update reward time if using Small/large schedule
            reward_assigned = 1    # always rewarded
        return bool(not reward_assigned)

    def _test_reward_end(self, ts):
        time_ended = (ts > self.reward_time)
        self.reward_counter = self.reward_counter + 1
        return time_ended

    def _test_stop(self, ts):
        if self.session_length > 0 and (time.time() - self.task_start_time) > self.session_length:
            self.end_task()
        return self.stop

    #### STATE FUNCTIONS ####

    def show_object(self, obj, show=False):
        '''
        Show or hide an object
        '''
        if show:
            obj.attach()
        else:
            obj.detach()
        self.requeue()


    def _start_wait(self):
        super(ApproachAvoidanceTask, self)._start_wait()
        self.tries = 0
        self.target_index = 0     # indicator for instructed or free-choice trial
        #hide targets
        self.show_object(self.target1, False)
        self.show_object(self.targetL, False)
        self.show_object(self.targetH, False)


        #get target positions and reward assignments for this trial
        self.targs = self.next_trial
        if self.plant_type != 'cursor_14x14' and np.random.rand() < self.arm_hide_rate:
            self.arm_visible = False
        else:
            self.arm_visible = True
        #self.chain_length = self.targs.shape[0] #Number of sequential targets in a single trial

        #self.task_data['target'] = self.target_locationH.copy()
        assign_reward = np.random.randint(0,100,size=2)
        self.rewardH = np.greater(self.targs[0,1],assign_reward[0])
        #print 'high value target reward prob', self.targs[0,1]
        self.rewardL = np.greater(self.targs[1,1],assign_reward[1])

        
        #print 'TARGET GENERATOR', self.targs[0,]
        self.task_data['targetH'] = self.targs[0,].copy()
        self.task_data['reward_scheduleH'] = self.rewardH.copy()
        self.task_data['targetL'] = self.targs[1,].copy()
        self.task_data['reward_scheduleL'] = self.rewardL.copy()
        
        self.requeue()

    def _start_center(self):

        #self.target_index += 1

        
        self.show_object(self.target1, True)
        self.show_object(self.cursor, True)
        
        # Third argument in self.targs determines if target is on left or right
        # First argument in self.targs determines if location is offset to farther distances
        offsetH = (2*self.targs[0,2] - 1)*(self.starting_dist + self.location_offset_allowed*self.targs[0,0]*4.0)
        moveH = np.array([offsetH,0,0]) 
        offsetL = (2*self.targs[1,2] - 1)*(self.starting_dist + self.location_offset_allowed*self.targs[1,0]*4.0)
        moveL = np.array([offsetL,0,0])

        self.targetL.translate(*moveL, reset=True) 
        #self.targetL.move_to_position(*moveL, reset=True)           
        ##self.targetL.translate(*self.targs[self.target_index], reset=True)
        self.show_object(self.targetL, True)
        self.target_locationL = self.targetL.xfm.move

        self.targetH.translate(*moveH, reset=True)
        #self.targetR.move_to_position(*moveR, reset=True)
        ##self.targetR.translate(*self.targs[self.target_index], reset=True)
        self.show_object(self.targetH, True)
        self.target_locationH = self.targetH.xfm.move


        # Insert instructed trials within free choice trials
        if self.trial_allocation[self.calc_trial_num()] == 1:
        #if (self.calc_trial_num() % 10) < (self.percentage_instructed_trials/10):
            self.target_index = 1    # instructed trial
            leftright_coinflip = np.random.randint(0,2)
            if leftright_coinflip == 0:
                self.show_object(self.targetL, False)
                self.LH_target_on = (0, 1)
            else:
                self.show_object(self.targetH, False)
                self.LR_coinflip = 0
                self.LH_target_on = (1, 0)
        else:
            self.target_index = 2   # free-choice trial

        self.cursor_visible = True
        self.task_data['target_index'] = self.target_index
        self.requeue()

    def _start_target(self):

    	#self.target_index += 1

        #move targets to current location and set location attribute.  Target1 (center target) position does not change.                    
        
        self.show_object(self.target1, False)
        #self.target_location1 = self.target1.xfm.move
        self.show_object(self.cursor, True)
       
        self.update_cursor()
        self.requeue()

    def _start_hold_center(self):
        self.show_object(self.target1, True)
        self.timedout = False
        self.requeue()

    def _start_hold_targetL(self):
        #make next target visible unless this is the final target in the trial
        #if 1 < self.chain_length:
            #self.targetL.translate(*self.targs[self.target_index+1], reset=True)
         #   self.show_object(self.targetL, True)
         #   self.requeue()
        self.show_object(self.targetL, True)
        self.timedout = False
        self.requeue()

    def _start_hold_targetH(self):
        #make next target visible unless this is the final target in the trial
        #if 1 < self.chain_length:
            #self.targetR.translate(*self.targs[self.target_index+1], reset=True)
         #   self.show_object(self.targetR, True)
          #  self.requeue()
        self.show_object(self.targetH, True)
        self.timedout = False
        self.requeue()

    def _end_hold_center(self):
        self.target1.radius = 0.7*self.target_radius # color target green
    
    def _end_hold_targetL(self):
        self.targetL.color = (0,1,0,0.5)    # color target green

    def _end_hold_targetH(self):
        self.targetH.color = (0,1,0,0.5)    # color target green

    def _start_hold_penalty(self):
    	#hide targets
        self.show_object(self.target1, False)
        self.show_object(self.targetL, False)
        self.show_object(self.targetH, False)
        self.timedout = True
        self.requeue()
        self.tries += 1
        #self.target_index = -1
    
    def _start_timeout_penalty(self):
    	#hide targets
        self.show_object(self.target1, False)
        self.show_object(self.targetL, False)
        self.show_object(self.targetH, False)
        self.timedout = True
        self.requeue()
        self.tries += 1
        #self.target_index = -1


    def _start_targ_transition(self):
        #hide targets

        self.show_object(self.target1, False)
        self.show_object(self.targetL, False)
        self.show_object(self.targetH, False)
        self.requeue()

    def _start_check_reward(self):
        #hide targets
        self.show_object(self.target1, False)
        self.show_object(self.targetL, False)
        self.show_object(self.targetH, False)
        self.requeue()

    def _start_reward(self):
        #super(ApproachAvoidanceTask, self)._start_reward()
        if self.target_selected == 'L':
            self.show_object(self.targetL, True)  
            #reward_assigned = self.targs[1,1]
        else:
            self.show_object(self.targetH, True)
            #reward_assigned = self.targs[0,1]
        #self.reward_counter = self.reward_counter + float(reward_assigned)
        self.requeue()

    @staticmethod
    def colored_targets_with_probabilistic_reward(length=1000, boundaries=(-18,18,-10,10,-15,15),reward_high_prob=80,reward_low_prob=40):

        """
        Generator should return array of ntrials x 2 x 3. The second dimension is for each target.
        For example, first is the target with high probability of reward, and the second 
        entry is for the target with low probability of reward.  The third dimension holds three variables indicating 
        position offset (yes/no), reward probability (fixed in this case), and location (binary returned where the
        ouput indicates either left or right).

        UPDATE: CHANGED SO THAT THE SECOND DIMENSION CARRIES THE REWARD PROBABILITY RATHER THAN THE REWARD SCHEDULE
        """

        position_offsetH = np.random.randint(2,size=(1,length))
        position_offsetL = np.random.randint(2,size=(1,length))
        location_int = np.random.randint(2,size=(1,length))

        # coin flips for reward schedules, want this to be elementwise comparison
        #assign_rewardH = np.random.randint(0,100,size=(1,length))
        #assign_rewardL = np.random.randint(0,100,size=(1,length))
        high_prob = reward_high_prob*np.ones((1,length))
        low_prob = reward_low_prob*np.ones((1,length))
        
        #reward_high = np.greater(high_prob,assign_rewardH)
        #reward_low = np.greater(low_prob,assign_rewardL)

        pairs = np.zeros([length,2,3])
        pairs[:,0,0] = position_offsetH
        #pairs[:,0,1] = reward_high
        pairs[:,0,1] = high_prob
        pairs[:,0,2] = location_int

        pairs[:,1,0] = position_offsetL
        #pairs[:,1,1] = reward_low
        pairs[:,1,1] = low_prob
        pairs[:,1,2] = 1 - location_int

        return pairs

    @staticmethod
    def block_probabilistic_reward(length=1000, boundaries=(-18,18,-10,10,-15,15),reward_high_prob=80,reward_low_prob=40):
        pairs = colored_targets_with_probabilistic_reward(length=length, boundaries=boundaries,reward_high_prob=reward_high_prob,reward_low_prob=reward_low_prob)
        return pairs

    @staticmethod
    def colored_targets_with_randomwalk_reward(length=1000,reward_high_prob=80,reward_low_prob=40,reward_high_span = 20, reward_low_span = 20,step_size_mean = 0, step_size_var = 1):

        """
        Generator should return array of ntrials x 2 x 3. The second dimension is for each target.
        For example, first is the target with high probability of reward, and the second 
        entry is for the target with low probability of reward.  The third dimension holds three variables indicating 
        position offset (yes/no), reward probability, and location (binary returned where the
        ouput indicates either left or right).  The variables reward_high_span and reward_low_span indicate the width
        of the range that the high or low reward probability are allowed to span respectively, e.g. if reward_high_prob
        is 80 and reward_high_span is 20, then the reward probability for the high value target will be bounded
        between 60 and 100 percent.
        """

        position_offsetH = np.random.randint(2,size=(1,length))
        position_offsetL = np.random.randint(2,size=(1,length))
        location_int = np.random.randint(2,size=(1,length))

        # define variables for increments: amount of increment and in which direction (i.e. increasing or decreasing)
        assign_rewardH = np.random.randn(1,length)
        assign_rewardL = np.random.randn(1,length)
        assign_rewardH_direction = np.random.randn(1,length)
        assign_rewardL_direction = np.random.randn(1,length)

        r_0_high = reward_high_prob
        r_0_low = reward_low_prob
        r_lowerbound_high = r_0_high - (reward_high_span/2)
        r_upperbound_high = r_0_high + (reward_high_span/2)
        r_lowerbound_low = r_0_low - (reward_low_span/2)
        r_upperbound_low = r_0_low + (reward_low_span/2)
        
        reward_high = np.zeros(length)
        reward_low = np.zeros(length)
        reward_high[0] = r_0_high
        reward_low[0] = r_0_low

        eps_high = assign_rewardH*step_size_mean + [2*(assign_rewardH_direction > 0) - 1]*step_size_var
        eps_low = assign_rewardL*step_size_mean + [2*(assign_rewardL_direction > 0) - 1]*step_size_var

        eps_high = eps_high.ravel()
        eps_low = eps_low.ravel()

        for i in range(1,length):
            '''
            assign_rewardH_direction = np.random.randn(1)
            assign_rewardL_direction = np.random.randn(1)
            assign_rewardH = np.random.randn(1)
            if assign_rewardH_direction[i-1,] < 0:
                eps_high = step_size_mean*assign_rewardH[i-1] - step_size_var
            else:
                eps_high = step_size_mean*assign_rewardH[i-1] + step_size_var

            if assign_rewardL_direction[i] < 0:
                eps_low = step_size_mean*assign_rewardL[i] - step_size_var
            else:
                eps_low = step_size_mean*assign_rewardL[i] + step_size_var
            '''
            reward_high[i] = reward_high[i-1] + eps_high[i-1]
            reward_low[i] = reward_low[i-1] + eps_low[i-1]

            reward_high[i] = (r_lowerbound_high < reward_high[i] < r_upperbound_high)*reward_high[i] + (r_lowerbound_high > reward_high[i])*(r_lowerbound_high+ eps_high[i-1]) + (r_upperbound_high < reward_high[i])*(r_upperbound_high - eps_high[i-1])
            reward_low[i] = (r_lowerbound_low < reward_low[i] < r_upperbound_low)*reward_low[i] + (r_lowerbound_low > reward_low[i])*(r_lowerbound_low+ eps_low[i-1]) + (r_upperbound_low < reward_low[i])*(r_upperbound_low - eps_low[i-1])

        pairs = np.zeros([length,2,3])
        pairs[:,0,0] = position_offsetH
        pairs[:,0,1] = reward_high
        pairs[:,0,2] = location_int

        pairs[:,1,0] = position_offsetL
        pairs[:,1,1] = reward_low
        pairs[:,1,2] = 1 - location_int

        return pairs

    @staticmethod
    def randomwalk_probabilistic_reward(length=1000,reward_high_prob=80,reward_low_prob=40,reward_high_span = 20, reward_low_span = 20,step_size_mean = 0, step_size_var = 1):
        pairs = colored_targets_with_randomwalk_reward(length=length,reward_high_prob=reward_high_prob,reward_low_prob=reward_low_prob,reward_high_span = reward_high_span, reward_low_span = reward_low_span,step_size_mean = step_size_mean, step_size_var = step_size_var)
        return pairs
Ejemplo n.º 11
0
class Window(LogExperiment):
    '''
    Generic stereo window
    '''
    status = dict(draw=dict(stop=None))
    state = "draw"
    stop = False

    # XPS computer
    # window_size = (1920*2, 1080)
    window_size = traits.Tuple(monitor_res['monitor_2D'],
                               desc='Window size, in pixels')
    background = traits.Tuple((0., 0., 0., 1.),
                              desc="Background color (R,G,B,A)")
    fullscreen = traits.Bool(True, desc="Fullscreen window")

    #Screen parameters, all in centimeters -- adjust for monkey
    screen_dist = traits.Float(44.5 + 3, desc="Screen to eye distance (cm)")
    screen_half_height = traits.Float(10.75, desc="Screen half height (cm)")
    iod = traits.Float(
        2.5, desc="Intraocular distance (cm)")  # intraocular distance

    show_environment = traits.Int(0,
                                  desc="Show wireframe box around environment")

    hidden_traits = [
        'screen_dist', 'screen_half_height', 'iod', 'show_environment',
        'background'
    ]

    def __init__(self, *args, **kwargs):
        self.display_start_pos = kwargs.pop('display_start_pos', "0,0")
        super(Window, self).__init__(*args, **kwargs)

        self.models = []
        self.world = None
        self.event = None

        # os.popen('sudo vbetool dpms on')
        self.fov = np.degrees(
            np.arctan(self.screen_half_height / self.screen_dist)) * 2
        self.screen_cm = [
            2 * self.screen_half_height * self.window_size[0] /
            self.window_size[1], 2 * self.screen_half_height
        ]

        if self.show_environment:
            self.add_model(Box())

    def set_os_params(self):
        os.environ['SDL_VIDEO_WINDOW_POS'] = self.display_start_pos
        os.environ['SDL_VIDEO_X11_WMCLASS'] = "monkey_experiment"

    def screen_init(self):
        self.set_os_params()
        pygame.init()

        pygame.display.gl_set_attribute(pygame.GL_DEPTH_SIZE, 24)
        flags = pygame.DOUBLEBUF | pygame.HWSURFACE | pygame.OPENGL | pygame.NOFRAME
        if self.fullscreen:
            flags = flags | pygame.FULLSCREEN
        try:
            pygame.display.gl_set_attribute(pygame.GL_MULTISAMPLEBUFFERS, 1)
            self.screen = pygame.display.set_mode(self.window_size, flags)
        except:
            pygame.display.gl_set_attribute(pygame.GL_MULTISAMPLEBUFFERS, 0)
            self.screen = pygame.display.set_mode(self.window_size, flags)

        glEnable(GL_BLEND)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glEnable(GL_TEXTURE_2D)
        glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
        glClearColor(*self.background)
        glClearDepth(1.0)
        glDepthMask(GL_TRUE)

        self.renderer = self._get_renderer()

        #this effectively determines the modelview matrix
        self.world = Group(self.models)
        self.world.init()

        #up vector is always (0,0,1), why would I ever need to roll the camera?!
        self.set_eye((0, -self.screen_dist, 0), (0, 0))

        pygame.mouse.set_visible(False)

    def _get_renderer(self):
        near = 1
        far = 1024
        return stereo.MirrorDisplay(self.window_size, self.fov, near, far,
                                    self.screen_dist, self.iod)

    def set_eye(self, pos, vec, reset=True):
        '''Set the eye's position and direction. Camera starts at (0,0,0), pointing towards positive y'''
        self.world.translate(pos[0], pos[2], pos[1], reset=True).rotate_x(-90)
        self.world.rotate_y(vec[0]).rotate_x(vec[1])

    def add_model(self, model):
        if self.world is None:
            #world doesn't exist yet, add the model to cache
            self.models.append(model)
        else:
            #We're already running, initialize the model and add it to the world
            model.init()
            self.world.add(model)

    def show_object(self, obj, show=False):
        '''
        Show or hide an object. This function is an abstraction so that tasks don't need to know about attach/detach
        '''
        if show:
            obj.attach()
        else:
            obj.detach()

    def draw_world(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        self.renderer.draw(self.world)
        pygame.display.flip()
        self.renderer.draw_done()

    def _get_event(self):
        for e in pygame.event.get(pygame.KEYDOWN):
            return (e.key, e.type)

    def _start_None(self):
        pygame.display.quit()

    def _test_stop(self, ts):
        '''
        Stop the task if the escape key is pressed, or if the super _test_stop instructs a stop
        '''
        super_stop = super(Window, self)._test_stop(ts)
        from pygame import K_ESCAPE
        key_stop = self.event is not None and self.event[0] == K_ESCAPE
        if key_stop:
            print("Window closed. Stopping task")
        return super_stop or key_stop

    def update_report_stats(self):
        self.reportstats['FPS'] = round(self.clock.get_fps(), 2)
        return super().update_report_stats()

    def requeue(self):
        self.renderer._queue_render(self.world)

    def _cycle(self):
        self.requeue()
        self.draw_world()
        super(Window, self)._cycle(
        )  # is this order intentional? why not cycle first then draw the screen?
        self.event = self._get_event()
Ejemplo n.º 12
0
class ScreenSync(NIDAQSync):
    '''Adds a square in one corner that switches color with every flip.'''

    sync_position = {
        'TopLeft': (-1, 1),
        'TopRight': (1, 1),
        'BottomLeft': (-1, -1),
        'BottomRight': (1, -1)
    }
    sync_position_2D = {
        'TopLeft': (-1, -1),
        'TopRight': (1, -1),
        'BottomLeft': (-1, 1),
        'BottomRight': (1, 1)
    }
    sync_corner = traits.OptionsList(tuple(sync_position.keys()),
                                     desc="Position of sync square")
    sync_size = traits.Float(1, desc="Sync square size (cm)")
    sync_color_off = traits.Tuple((0., 0., 0., 1.),
                                  desc="Sync off color (R,G,B,A)")
    sync_color_on = traits.Tuple((1., 1., 1., 1.),
                                 desc="Sync on color (R,G,B,A)")
    sync_state_duration = 1  # How long to delay the start of the experiment (seconds)

    def __init__(self, *args, **kwargs):

        # Create a new "sync" state at the beginning of the experiment
        if isinstance(self.status, dict):
            self.status["sync"] = dict(start_experiment="wait",
                                       stoppable=False)
        else:
            from riglib.fsm.fsm import StateTransitions
            self.status.states["sync"] = StateTransitions(
                start_experiment="wait", stoppable=False)
        self.state = "sync"

        super().__init__(*args, **kwargs)
        self.sync_state = False
        if hasattr(self, 'is_pygame_display'):
            screen_center = np.divide(self.window_size, 2)
            sync_size_pix = self.sync_size * self.window_size[
                0] / self.screen_cm[0]
            sync_center = [sync_size_pix / 2, sync_size_pix / 2]
            from_center = np.multiply(self.sync_position_2D[self.sync_corner],
                                      np.subtract(screen_center, sync_center))
            top_left = screen_center + from_center - sync_center
            self.sync_rect = pygame.Rect(top_left, np.multiply(sync_center, 2))
        else:
            from_center = np.multiply(
                self.sync_position[self.sync_corner],
                np.subtract(self.screen_cm, self.sync_size))
            pos = np.array(
                [from_center[0] / 2, self.screen_dist, from_center[1] / 2])
            self.sync_square = VirtualRectangularTarget(
                target_width=self.sync_size,
                target_height=self.sync_size,
                target_color=self.sync_color_off,
                starting_pos=pos)
            # self.sync_square = VirtualCircularTarget(target_radius=self.sync_size, target_color=self.sync_color_off, starting_pos=pos)
            for model in self.sync_square.graphics_models:
                self.add_model(model)

    def screen_init(self):
        super().screen_init()
        if hasattr(self, 'is_pygame_display'):
            self.sync = pygame.Surface(self.window_size)
            self.sync.fill(TRANSPARENT)
            self.sync.set_colorkey(TRANSPARENT)

    def _draw_other(self):
        # For pygame display
        color = self.sync_color_on if self.sync_state else self.sync_color_off
        self.sync.fill(255 * np.array(color), rect=self.sync_rect)
        self.screen.blit(self.sync, (0, 0))

    def init(self):
        self.add_dtype('sync_square', bool, (1, ))
        super().init()

    def _while_sync(self):
        '''
        Deliberate "startup sequence":
            1. Send a clock pulse to denote the start of the FSM loop
            2. Turn off the clock and send a single, longer, impulse
                to enable measurement of the screen latency
            3. Turn the clock back on
        '''

        # Turn off the clock after the first cycle is synced
        if self.cycle_count == 1:
            self.sync_every_cycle = False

        # Send an impulse to measure latency halfway through the sync state
        key_cycle = int(self.fps * self.sync_state_duration / 2)
        impulse_duration = 5  # cycles, to make sure it appears on the screen
        if self.cycle_count == key_cycle:
            self.sync_every_cycle = True
        elif self.cycle_count == key_cycle + 1:
            self.sync_every_cycle = False
        elif self.cycle_count == key_cycle + impulse_duration:
            self.sync_every_cycle = True
        elif self.cycle_count == key_cycle + impulse_duration + 1:
            self.sync_every_cycle = False

    def _end_sync(self):
        self.sync_every_cycle = True

    def _test_start_experiment(self, ts):
        return ts > self.sync_state_duration

    def _cycle(self):
        super()._cycle()

        # Update the sync state
        if self.sync_every_cycle:
            self.sync_state = not self.sync_state
        self.task_data['sync_square'] = copy.deepcopy(self.sync_state)

        # For OpenGL display, update the graphics
        if not hasattr(self, 'is_pygame_display'):
            color = self.sync_color_on if self.sync_state else self.sync_color_off
            self.sync_square.cube.color = color
Ejemplo n.º 13
0
class MachineControlExoJointVel_w_Positioner(BMIControlExoJointVel, PositionerTaskController):
    '''
    Task to automatically go between different joint configurations with the target positioner following
    '''
    sequence_generators = ['exo_joint_space_targets']
    current_assist_level = 1

    status = FSMTable(
        wait=StateTransitions(start_trial='move'),
        move=StateTransitions(reached_config='reward'),
        reward=StateTransitions(time_expired='wait'),
    )

    state = 'wait'

    reward_time = traits.Float(3, desc='Time that reward solenoid is open')
    config_tolerances_deg = traits.Tuple((10, 10, 10, 7.5, 10), desc='Time that reward solenoid is open')
    
    def __init__(self, *args, **kwargs):
        super(MachineControlExoJointVel, self).__init__(*args, **kwargs)
        self.config_tolerances = np.deg2rad(np.array(self.config_tolerances_deg))
        self.config_tolerances[-1] = np.inf

    @staticmethod 
    def exo_joint_space_targets(n_blocks=10):
        # neutral_config = np.array([-0.64732247,  0.79,  0.19634043,  0.97628754, -0.02114062])
        target1 = np.array([-1.05, 0.7, 0.4, 1, 0]) 
        target2 = np.array([-0.25, 0.7, 0.4, 1, 0])
        target3 = np.array([-0.85, 1.3 , 0.4, 1, 0])
        target4 = np.array([-0.65, 1.3, 0.4, 0.2, 0])



        trial_target_ls = []
        for k in range(n_blocks):
            configs = np.random.permutation([target1, target2, target3, target4]) # generate the target sequence for each block
            for config in configs:
                trial_target_ls.append(dict(target_config=config))
        return trial_target_ls


    def _test_reached_config(self, *args, **kwargs):
        target_endpt = self.plant.kin_chain.endpoint_pos(self._gen_target_config)
        current_endpt = self.plant.kin_chain.endpoint_pos(self.plant.joint_angles)
        pos_diff = np.linalg.norm(current_endpt - target_endpt)
        joint_diff = np.abs(self.plant.joint_angles - self._gen_target_config)
        # print pos_diff 
        # print np.round(np.rad2deg(joint_diff), decimals=2)
        # print 
        return (pos_diff < 3) or np.all(joint_diff < self.config_tolerances)
        

    def load_decoder(self):
        self.ssm = StateSpaceJointVelocityActiveExo()
        A, B, W = self.ssm.get_ssm_matrices()
        filt = MachineOnlyFilter(A, W)
        units = []
        self.decoder = Decoder(filt, units, self.ssm, binlen=0.1)
        self.decoder.n_features = 1

    def create_feature_extractor(self):
        from riglib.bmi.extractor import DummyExtractor

        self.extractor = DummyExtractor()
        self._add_feature_extractor_dtype()

    def _start_reward(self):
        print "trial complete!"
Ejemplo n.º 14
0
class BMIJointPerturb(BMIControlMulti):
    '''
    Task where the virtual plant starts in configuration sampled from a discrete set and resets every trial
    '''
    status = dict(wait=dict(start_trial="premove", stop=None),
                  premove=dict(premove_complete="target"),
                  target=dict(enter_target="hold",
                              timeout="timeout_penalty",
                              stop=None),
                  hold=dict(leave_early="hold_penalty",
                            hold_complete="targ_transition"),
                  targ_transition=dict(trial_complete="reward",
                                       trial_abort="wait",
                                       trial_incomplete="target",
                                       trial_restart="premove"),
                  timeout_penalty=dict(timeout_penalty_end="targ_transition"),
                  hold_penalty=dict(hold_penalty_end="targ_transition"),
                  reward=dict(reward_end="wait"))

    sequence_generators = BMIControlMulti.sequence_generators + [
        'tentacle_multi_start_config'
    ]

    pert_angles = traits.Tuple(
        (np.pi, -3 * np.pi / 4, 3 * np.pi / 4),
        desc="Possible wrist angles for perturbed configurations")
    # pert_angles = traits.Tuple((np.pi, -7*np.pi/8, 7*np.pi/8, -3*np.pi/4, 3*np.pi/4), desc="Possible wrist angles for perturbed configurations")
    # pert_angles = traits.Tuple((-3*np.pi/4), desc="Possible wrist angles for perturbed configurations")
    #np.pi,
    #tuple(np.linspace(-np.pi, np.pi, 8)
    premove_time = traits.Float(
        .1, desc='Time before subject must start doing BMI control')

    # static_states = ['premove'] # states in which the decoder is not run

    def __init__(self, *args, **kwargs):
        super(BMIJointPerturb, self).__init__(*args, **kwargs)

    def _parse_next_trial(self):
        self.targs, self.curr_pert_angle = self.next_trial
        # self.targs, self.curr_pert_angle, self.arm_visible = self.next_trial

    # def _start_wait(self):
    #     self.curr_pert_angle = self.pert_angles[np.random.randint(0, high=len(self.pert_angles))]
    #     super(BMIJointPerturb, self)._start_wait()

    def _while_premove(self):
        self.decoder.filt.state.mean = self.calc_perturbed_ik(self.targs[0])

    def _end_timeout_penalty(self):
        pass

    def calc_perturbed_ik(self, endpoint_pos):
        distal_angles = np.array([self.curr_pert_angle, -np.pi / 20])
        # second angle above used to be self.init_decoder_mean[3,0] for center out version of task
        joints = self.plant.perform_ik(endpoint_pos,
                                       distal_angles=-distal_angles)
        return np.mat(np.hstack([joints, np.zeros(4), 1]).reshape(-1, 1))

    def _test_premove_complete(self, ts):
        return ts >= self.premove_time

    def _test_hold_complete(self, ts):
        if self.target_index == 0:
            return True
        else:
            return ts >= self.hold_time

    def _test_trial_incomplete(self, ts):
        return (self.target_index < self.chain_length - 1) and (
            self.target_index != -1) and (self.tries < self.max_attempts)

    def _test_trial_restart(self, ts):
        return (self.target_index == -1) and (self.tries < self.max_attempts)

    @staticmethod
    def tentacle_multi_start_config(nblocks=100,
                                    ntargets=4,
                                    distance=8,
                                    startangle=45):
        elbow_angles = np.array([
            135, 180, 225
        ]) * np.pi / 180  # TODO make this a function argument!
        startangle = 45 * np.pi / 180
        n_configs_per_target = len(elbow_angles)
        target_angles = np.arange(startangle, startangle + (2 * np.pi),
                                  2 * np.pi / ntargets)
        targets = distance * np.vstack(
            [np.cos(target_angles), 0 * target_angles,
             np.sin(target_angles)])

        seq = []
        from itertools import izip
        import random
        for i in range(nblocks):
            target_inds = np.tile(np.arange(ntargets),
                                  (n_configs_per_target, 1)).T.ravel()
            config_inds = np.tile(np.arange(n_configs_per_target), ntargets)

            sub_seq = []
            inds = np.arange(n_configs_per_target * ntargets)
            random.shuffle(inds)
            for k in inds:
                targ_ind = target_inds[k]
                config_ind = config_inds[k]

                seq_item = (np.vstack([targets[:, targ_ind],
                                       np.zeros(3)]), elbow_angles[config_ind])
                seq.append(seq_item)

        return seq
class CLDAManipulatedFB(BMIControlManipulatedFB):
    '''
    BMI task that periodically refits the decoder parameters based on intended
    movements toward the targets. Inherits directly from BMIControl. Can be made
    to automatically linearly decrease assist level over set time period, or
    to provide constant assistance by setting assist_level and assist_min equal.
    '''

    batch_time = traits.Float(80.0, desc='The length of the batch in seconds')
    half_life = traits.Tuple((120., 120.0), desc='Half life of the adaptation in seconds')
    decoder_sequence = traits.String('test', desc='signifier to group together sequences of decoders')
    #assist_min = traits.Float(0, desc="Assist level to end task at")
    #half_life_final = traits.Float(120.0, desc='Half life of the adaptation in seconds')
    half_life_decay_time = traits.Float(900.0, desc="Time to go from initial half life to final")


    def __init__(self, *args, **kwargs):
        super(CLDAManipulatedFB, self).__init__(*args, **kwargs)
        #self.assist_start = self.assist_level
        self.learn_flag = True

    def init(self):
        '''
        Secondary init function. Decoder has already been created by inclusion
        of the 'bmi' feature in the task. Create the 'learner' and 'updater'
        components of the CLDA algorithm
        '''
        # Add CLDA-specific data to save to HDF file 
        self.dtype.append(('half_life', 'f8', (1,)))

        super(CLDAManipulatedFB, self).init()

        self.batch_size = int(self.batch_time/self.decoder.binlen)
        self.create_learner()

        # Create the updater second b/c the update algorithm might need to force
        # a particular batch size for the learner
        self.create_updater()

        # Create the BMI system which combines the decoder, learner, and updater
        self.bmi_system = riglib.bmi.BMISystem(self.decoder, self.learner,
            self.updater)

        

    def create_learner(self):
        self.learner = clda.CursorGoalLearner(self.batch_size)

        # Start "learn flag" at True
        self.learn_flag = True
        homedir = os.getenv('HOME')
        f = open(os.path.join(homedir, 'learn_flag_file'), 'w')
        f.write('1')
        f.close()

    def create_updater(self):
        clda_input_queue = mp.Queue()
        clda_output_queue = mp.Queue()
        half_life_start, half_life_end = self.half_life
        self.updater = clda.KFSmoothbatch(clda_input_queue, clda_output_queue,self.batch_time, half_life_start)

    def update_learn_flag(self):
        # Tell the adaptive BMI when to learn (skip parts of the task where we
        # assume the subject is not trying to move toward the target)
        prev_learn_flag = self.learn_flag

        # Open file to read learn flag
        try:
            homedir = os.getenv('HOME')
            f = open(os.path.join(homedir, 'learn_flag_file'))
            new_learn_flag = bool(int(f.readline().rstrip('\n')))
        except:
            new_learn_flag = True

        if new_learn_flag and not prev_learn_flag:
            print "CLDA enabled"
        elif prev_learn_flag and not new_learn_flag:
            try:
                print "CLDA disabled after %d successful trials" % self.calc_n_rewards()
            except:
                print "CLDA disabled"
        self.learn_flag = new_learn_flag

    def call_decoder(self, spike_counts):
        half_life_start, half_life_end = self.half_life
        current_half_life = self._linear_change(half_life_start, half_life_end, self.half_life_decay_time)
        self.task_data['half_life'] = current_half_life

        # Get the decoder output
        decoder_output, uf =  self.bmi_system(spike_counts, self.target_location,
            self.state, task_data=self.task_data, assist_level=self.current_assist_level,
            target_radius=self.target_radius, speed=self.assist_speed*self.decoder.binlen, 
            learn_flag=self.learn_flag, half_life=current_half_life)
        if uf:
            #send msg to hdf file to indicate decoder update
            self.hdf.sendMsg("update_bmi")
        return decoder_output #self.decoder['hand_px', 'hand_py', 'hand_pz']

    def _cycle(self):
        self.update_learn_flag()
        super(CLDAManipulatedFB, self)._cycle()

    def cleanup(self, database, saveid, **kwargs):
        super(CLDAManipulatedFB, self).cleanup(database, saveid, **kwargs)
        import tempfile, cPickle, traceback, datetime

        # Open a log file in case of error b/c errors not visible to console
        # at this point
        f = open(os.path.join(os.getenv('HOME'), 'Desktop/log'), 'a')
        f.write('Opening log file\n')
        
        # save out the parameter history and new decoder unless task was stopped
        # before 1st update
        try:
            f.write('# of paramter updates: %d\n' % len(self.bmi_system.param_hist))
            if len(self.bmi_system.param_hist) > 0:
                f.write('Starting to save parameter hist\n')
                tf = tempfile.NamedTemporaryFile()
                # Get the update history for C and Q matrices and save them
                #C, Q, m, sd, intended_kin, spike_counts = zip(*self.bmi_system.param_hist)
                #np.savez(tf, C=C, Q=Q, mean=m, std=sd, intended_kin=intended_kin, spike_counts=spike_counts)
                pickle.dump(self.bmi_system.param_hist, tf)
                tf.flush()
                # Add the parameter history file to the database entry for this
                # session
                database.save_data(tf.name, "bmi_params", saveid)
                f.write('Finished saving parameter hist\n')

                # Save the final state of the decoder as a new decoder
                tf2 = tempfile.NamedTemporaryFile(delete=False) 
                cPickle.dump(self.decoder, tf2)
                tf2.flush()
                # create suffix for new decoder that has the sequence and the current day
                # and time. This suffix will be appended to the name of the
                # decoder that we started with and saved as a new decoder.
                now = datetime.datetime.now()
                decoder_name = self.decoder_sequence + '%02d%02d%02d%02d' % (now.month, now.day, now.hour, now.minute)
                database.save_bmi(decoder_name, saveid, tf2.name)
        except:
            traceback.print_exc(file=f)
        f.close()
class RatBMI(BMILoop, LogExperiment):
    status = dict(wait=dict(start_trial='feedback_on', stop=None),
                  feedback_on=dict(baseline_hit='periph_targets', stop=None),
                  periph_targets=dict(target_hit='check_reward',
                                      timeout='noise_burst',
                                      stop=None),
                  check_reward=dict(rewarded_target='reward',
                                    unrewarded_target='feedback_pause'),
                  feedback_pause=dict(end_feedback_pause='wait'),
                  reward=dict(reward_end='wait'),
                  noise_burst=dict(noise_burst_end='noise_burst_timeout'),
                  noise_burst_timeout=dict(noise_burst_timeout_end='wait'))

    #Flag for feedback on or not
    feedback = False
    prev_targ_hit = 't1'
    timeout_time = traits.Float(30.)
    noise_burst_time = traits.Float(3.)
    noise_burst_timeout_time = traits.Float(1.)
    reward_time = traits.Float(1., desc='reward time')
    #Frequency range:
    aud_freq_range = traits.Tuple((1000., 20000.))
    plant_type = traits.OptionsList(*plantlist,
                                    desc='',
                                    bmi3d_input_options=plantlist.keys())

    #Time to average over:
    nsteps = traits.Float(10.)
    feedback_pause = traits.Float(3.)

    def __init__(self, *args, **kwargs):
        super(RatBMI, self).__init__(*args, **kwargs)

        if hasattr(self, 'decoder'):
            print self.decoder
        else:
            self.decoder = kwargs['decoder']
        dec_params = dict(nsteps=self.nsteps, freq_lim=self.aud_freq_range)
        for k, (key, val) in enumerate(dec_params.items()):
            print key, val, self.decoder.filt.dec_params[key]
            assert self.decoder.filt.dec_params[key] == val
        self.decoder.filt.init_from_task(self.decoder.n_units, **dec_params)
        self.plant = plantlist[self.plant_type]

    def init(self, *args, **kwargs):
        self.add_dtype('cursor', 'f8', (2, ))
        self.add_dtype('freq', 'f8', (2, ))
        super(RatBMI, self).init()
        self.decoder.count_max = self.feature_accumulator.count_max

    def _cycle(self):
        self.rat_cursor = self.decoder.filt.get_mean()
        self.task_data['cursor'] = self.rat_cursor
        self.task_data['freq'] = self.decoder.filt.F
        self.decoder.cnt = self.feature_accumulator.count
        self.decoder.feedback = self.feedback
        super(RatBMI, self)._cycle()

    # def move_plant(self):
    #     if self.feature_accumulator.count == self.feature_accumulator.count_max:
    #         print 'self.plant.drive from task.py'
    #         self.plant.drive(self.decoder)
    def _start_wait(self):
        return True

    def _test_start_trial(self, ts):
        return True

    def _test_rewarded_target(self, ts):
        if self.prev_targ_hit == 't1':
            return False
        elif self.prev_targ_hit == 't2':
            return True

    def _test_unrewarded_target(self, ts):
        if self.prev_targ_hit == 't1':
            return True
        elif self.prev_targ_hit == 't2':
            return False

    def _start_feedback_pause(self):
        self.feedback = False

    def _test_end_feedback_pause(self, ts):
        return ts > self.feedback_pause

    def _start_reward(self):
        print 'reward!'

    def _start_feedback_on(self):
        self.feedback = True

    def _test_baseline_hit(self, ts):
        if self.prev_targ_hit == 't1':
            #Must go below baseline:
            return self.rat_cursor <= self.decoder.filt.mid
        elif self.prev_targ_hit == 't2':
            #Must rise above baseline:
            return self.rat_cursor >= self.decoder.filt.mid
        else:
            return False

    def _test_target_hit(self, ts):
        if self.rat_cursor >= self.decoder.filt.t1:
            self.prev_targ_hit = 't1'
            self.feedback = False
            return True
        elif self.rat_cursor <= self.decoder.filt.t2:
            self.prev_targ_hit = 't2'
            self.feedback = False
            return True
        else:
            return False

    def _test_timeout(self, ts):
        return ts > self.timeout_time

    def _test_noise_burst_end(self, ts):
        return ts > self.noise_burst_time

    def _test_noise_burst_timeout_end(self, ts):
        return ts > self.noise_burst_timeout_time

    def _start_noise_burst(self):
        self.feedback = False
        self.plant.play_white_noise()

    def move_plant(self):
        super(RatBMI, self).move_plant()

    def get_current_assist_level(self):
        return 0.
Ejemplo n.º 17
0
class BMIControlExoEndpt(ExoAssist, BMIControlExoJointVel, PositionerTaskController):
    status = dict(
        go_to_origin = dict(microcontroller_done='wait', stop=None),
        wait = dict(start_trial='init_exo', stop=None),
        init_exo = dict(exo_ready='move_target', stop=None),
        move_target = dict(microcontroller_done='pause'),
        pause = dict(time_expired='reach', stop=None),
        reach = dict(force_applied='reward', new_target_set_remotely='move_target', skip='wait', stop=None),
        reward = dict(time_expired='wait', stop=None),
    )
    state = 'go_to_origin'
    pause_time = 2
    

    trial_end_states = ['reward']

    sequence_generators = ['exo_endpt_targets']

    min_force_on_target = traits.Float(1., desc='Force that needs to be applied, in Newtons')
    reward_time = traits.Float(3., desc='reward time for solenoid')
    config_tolerances_deg = traits.Tuple((10, 10, 10, 7.5, 10), desc='Time that reward solenoid is open')


    def __init__(self, *args, **kwargs):
        super(BMIControlExoEndpt, self).__init__(*args, **kwargs)
        self.config_tolerances = np.deg2rad(np.array(self.config_tolerances_deg))
        self.config_tolerances[-1] = np.inf

        # fixed target location, for now
        self._gen_int_target_pos = np.array([ 27.3264, 47.383, -22.79])
        #36.4222245 ,  26.27462972, -11.38728596
        self.neutral_config = np.array([-0.64732247,  0.79,  0.19634043,  0.97628754, -0.02114062])

        self._gen_target_config = np.array([-0.50579373,  1.28357092,  0.66706522,  0.6, -0.02114062])
        self.current_assist_level = 1

    def init(self):
        self.add_dtype('starting_config', 'f8', (5,))
        super(BMIControlExoEndpt, self).init()

    def init_decoder_state(self):
        pass

    @staticmethod 
    def exo_endpt_targets(n_blocks=1):
        final_config = np.array([-0.50579373,  1.28357092,  0.66706522,  0.6, -0.02114062])
        configs = [final_config]
        trial_target_ls = []

        init_configs = [
            np.array([-0.64732247,  0.79,  0.19634043,  0.97628754, -0.02114062]),
            np.array([-0.64732247,  1.5,  0.19634043,  0.97628754, -0.02114062]),                
            np.array([-1.05,  0.79,  0.19634043,  0.97628754, -0.02114062]),
            np.array([-1.05,  0.79,  -0.07,  0.97628754, -0.02114062]),            
        ]

        for k in range(n_blocks):
            for config in init_configs:
                trial_target_ls.append(dict(target_config=final_config, init_config=config))
        return trial_target_ls        

    def get_target_BMI_state(self, *args):
        '''
        Run the goal calculator to determine the target state of the task
        '''
        if self.state in ['reach', 'reward']:
            target_config = self._gen_target_config
        elif hasattr(self, '_gen_init_config'):
            target_config = self._gen_init_config
        else: # before the wait state
            target_config = self.neutral_config
            
        target_state = np.hstack([target_config, np.zeros_like(target_config), np.zeros_like(target_config), 1])
        target_state = target_state.reshape(-1, 1)

        self.print_to_terminal(self.decoder.get_state())
        self.print_to_terminal(self.plant.joint_angles)
        self.print_to_terminal(target_state[0:5,0].ravel())
        self.print_to_terminal('')
        return target_state

    def _test_exo_ready(self, *args, **kwargs):
        target_endpt = self.plant.kin_chain.endpoint_pos(self._gen_init_config)
        current_endpt = self.plant.kin_chain.endpoint_pos(self.plant.joint_angles)
        pos_diff = np.linalg.norm(current_endpt - target_endpt)
        joint_diff = np.abs(self.plant.joint_angles - self._gen_init_config)
        return (pos_diff < 3) or np.all(joint_diff < self.config_tolerances)

    def _cycle(self):
        if hasattr(self, '_gen_init_config'):
            starting_config = self._gen_init_config
        else: # before the wait state
            starting_config = self.neutral_config

        self.task_data['starting_config'] = starting_config
        super(BMIControlExoEndpt, self)._cycle()

    def _end_go_to_origin(self):
        steps_actuated = self.pos_uctrl_iface.end_continuous_move(stiff=True)

        self.loc = np.zeros(3)
        self.steps_from_origin = np.zeros(3)

    def _end_move_target(self):
        # send command to kill motors
        steps_actuated = self.pos_uctrl_iface.end_continuous_move(stiff=True)
        self._integrate_steps(steps_actuated, self.pos_uctrl_iface.motor_dir)

    def _test_force_applied(self, *args, **kwargs):
        return self.plant.force_N > self.min_force_on_target
Ejemplo n.º 18
0
class Window(LogExperiment):
    '''
    Generic stereo window 
    '''
    status = dict(draw=dict(stop=None))
    state = "draw"
    stop = False

    window_size = traits.Tuple((1920 * 2, 1080),
                               descr='window size, in pixels')
    # window_size = (1920*2, 1080)
    background = (0, 0, 0, 1)

    #Screen parameters, all in centimeters -- adjust for monkey
    fov = np.degrees(np.arctan(14.65 / (44.5 + 3))) * 2
    screen_dist = 44.5 + 3
    iod = 2.5  # intraocular distance

    show_environment = traits.Int(0)

    def __init__(self, *args, **kwargs):
        super(Window, self).__init__(*args, **kwargs)

        self.models = []
        self.world = None
        self.event = None

        # os.popen('sudo vbetool dpms on')

        if self.show_environment:
            self.add_model(Box())

    def set_os_params(self):
        os.environ['SDL_VIDEO_WINDOW_POS'] = config.display_start_pos
        os.environ['SDL_VIDEO_X11_WMCLASS'] = "monkey_experiment"

    def screen_init(self):
        self.set_os_params()
        pygame.init()

        pygame.display.gl_set_attribute(pygame.GL_DEPTH_SIZE, 24)
        flags = pygame.DOUBLEBUF | pygame.HWSURFACE | pygame.OPENGL | pygame.NOFRAME
        try:
            pygame.display.gl_set_attribute(pygame.GL_MULTISAMPLEBUFFERS, 1)
            self.surf = pygame.display.set_mode(self.window_size, flags)
        except:
            pygame.display.gl_set_attribute(pygame.GL_MULTISAMPLEBUFFERS, 0)
            self.surf = pygame.display.set_mode(self.window_size, flags)

        glEnable(GL_BLEND)
        glDepthFunc(GL_LESS)
        glEnable(GL_DEPTH_TEST)
        glEnable(GL_TEXTURE_2D)
        glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
        glClearColor(*self.background)
        glClearDepth(1.0)
        glDepthMask(GL_TRUE)

        self.renderer = self._get_renderer()

        #this effectively determines the modelview matrix
        self.world = Group(self.models)
        self.world.init()

        #up vector is always (0,0,1), why would I ever need to roll the camera?!
        self.set_eye((0, -self.screen_dist, 0), (0, 0))

    def _get_renderer(self):
        near = 1
        far = 1024
        return stereo.MirrorDisplay(self.window_size, self.fov, near, far,
                                    self.screen_dist, self.iod)

    def set_eye(self, pos, vec, reset=True):
        '''Set the eye's position and direction. Camera starts at (0,0,0), pointing towards positive y'''
        self.world.translate(pos[0], pos[2], pos[1], reset=True).rotate_x(-90)
        self.world.rotate_y(vec[0]).rotate_x(vec[1])

    def add_model(self, model):
        if self.world is None:
            #world doesn't exist yet, add the model to cache
            self.models.append(model)
        else:
            #We're already running, initialize the model and add it to the world
            model.init()
            self.world.add(model)

    def show_object(self, obj, show=False):
        '''
        Show or hide an object. This function is an abstraction so that tasks don't need to know about attach/detach
        '''
        if show:
            obj.attach()
        else:
            obj.detach()

    def draw_world(self):
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
        self.renderer.draw(self.world)
        pygame.display.flip()
        self.renderer.draw_done()

    def _get_event(self):
        for e in pygame.event.get(pygame.KEYDOWN):
            return (e.key, e.type)

    def _start_None(self):
        pygame.display.quit()

    def _test_stop(self, ts):
        '''
        Stop the task if the escape key is pressed, or if the super _test_stop instructs a stop
        '''
        super_stop = super(Window, self)._test_stop(ts)
        from pygame import K_ESCAPE
        return super_stop or self.event is not None and self.event[
            0] == K_ESCAPE

    def requeue(self):
        self.renderer._queue_render(self.world)

    def _cycle(self):
        self.requeue()
        self.draw_world()
        super(Window, self)._cycle()
        self.event = self._get_event()
class MovementTraining(Window):
    status = dict(wait=dict(stop=None, move_start="movement"),
                  movement=dict(move_end="reward", move_stop="wait",
                                stop=None),
                  reward=dict(reward_end="wait"))
    log_exclude = set((("wait", "move_start"), ("movement", "move_stop")))

    #initial state
    state = "wait"

    path = [[0, 0, 0]]
    speed = 0
    frame_offset = 2
    over = 0
    inside = 0

    #settable traits
    movement_distance = traits.Float(
        1, desc="Minimum movement distance to trigger reward")
    speed_range = traits.Tuple(
        20, 30, desc="Range of movement speed in cm/s to trigger reward")
    reward_time = traits.Float(14)

    #initialize
    def __init__(self, **kwargs):
        super(MovementTraining, self).__init__(**kwargs)
        self.cursor = Sphere(radius=.5, color=(.5, 0, .5, 1))
        self.add_model(self.cursor)

    def update_cursor(self):
        #get data from 13th marker on motion tracker- take average of all data points since last poll
        pt = self.motiondata.get()
        if len(pt) > 0:
            pt = pt[:, 14, :]
            # NOTE!!! The marker on the hand was changed from #0 to #14 on
            # 5/19/13 after LED #0 broke. All data files saved before this date
            # have LED #0 controlling the cursor.
            pt = pt[~np.isnan(pt).any(1)]
        if len(pt) > 0:
            pt = pt.mean(0)
            self.path.append(pt)
            #ignore y direction
            t = pt * .25
            t[1] = 0
            #move cursor to marker location
            self.cursor.translate(*t[:3], reset=True)
        else:
            self.path.append(self.path[-1])
        if len(self.path) > self.frame_offset:
            self.path.pop(0)
            d = np.sqrt((self.path[-1][0] - self.path[0][0])**2 +
                        (self.path[-1][1] - self.path[0][1])**2 +
                        (self.path[-1][2] - self.path[0][2])**2)
            self.speed = d / (self.frame_offset / 60)
            if self.speed > self.speed_range[0]:
                self.over += 1
            if self.speed_range[0] < self.speed < self.speed_range[1]:
                self.inside += 1
        #write to screen
        self.draw_world()

    def _start_wait(self):
        self.over = 0
        self.inside = 0

    def _while_wait(self):
        self.update_cursor()

    def _while_movement(self):
        self.update_cursor()

    def _while_reward(self):
        self.update_cursor()

    def _test_move_start(self, ts):
        return self.over > self.frame_offset

    def _test_move_end(self, ts):
        return ts > self.movement_distance / self.speed_range[0]

    def _test_move_stop(self, ts):
        return self.inside > self.frame_offset

    def _test_reward_end(self, ts):
        return ts > self.reward_time
class BMIResettingObstacles(BMIResetting):

    status = dict(
        wait=dict(start_trial="premove", stop=None),
        premove=dict(premove_complete="target"),
        target=dict(enter_target="hold",
                    timeout="timeout_penalty",
                    enter_obstacle="obstacle_penalty",
                    stop=None),
        hold=dict(leave_early="hold_penalty", hold_complete="targ_transition"),
        targ_transition=dict(trial_complete="reward",
                             trial_abort="wait",
                             trial_incomplete="target",
                             trial_restart="premove"),
        timeout_penalty=dict(timeout_penalty_end="targ_transition"),
        hold_penalty=dict(hold_penalty_end="targ_transition"),
        obstacle_penalty=dict(obstacle_penalty_end="targ_transition"),
        reward=dict(reward_end="wait"))

    sequence_generators = [
        'centerout_2D_discrete_w_obstacle', 'centerout_2D_discrete'
    ]
    obstacle_sizes = traits.Tuple((2, 3), desc='must match generator sizes!')
    is_bmi_seed = True

    def __init__(self, *args, **kwargs):
        super(BMIResettingObstacles, self).__init__(*args, **kwargs)
        self.add_obstacles()

    def add_obstacles(self):
        import target_graphics
        #Add obstacle
        self.obstacle_list = []
        self.obstacle_dict = {}
        for i in self.obstacle_sizes:
            obstacle = target_graphics.VirtualRectangularTarget(
                target_width=i,
                target_height=i,
                target_color=(0, 0, 1, .5),
                starting_pos=np.zeros(3))
            self.obstacle_list.append(obstacle)
            self.obstacle_dict[i] = len(self.obstacle_list) - 1
            for model in obstacle.graphics_models:
                self.add_model(model)

    def init(self, *args, **kwargs):
        self.add_dtype('obstacle_size', 'f8', (1, ))
        self.add_dtype('obstacle_location', 'f8', (3, ))
        super(BMIResettingObstacles, self).init(*args, **kwargs)

    def create_goal_calculator(self):
        self.goal_calculator = goal_calculators.Obs_Goal_Calc(self.decoder.ssm)

    def create_assister(self, *args, **kwargs):
        self.assister = OFCEndpointAssister()
        #self.assister = ObstacleAssist(self.decoder.ssm)

    def _start_wait(self):
        for obs in self.obstacle_list:
            obs.hide()
        super(BMIResetting, self)._start_wait()

    def _start_premove(self):
        super(BMIResettingObstacles, self)._start_premove()

    def _start_target(self):
        print 'start target BMIRes'
        self.goal_calculator.clear()
        super(BMIResettingObstacles, self)._start_target()

    # def _start_target(self):
    #     super(BMIResettingObstacles, self)._start_target()

    def _parse_next_trial(self):
        self.targs = self.next_trial[0]
        #Width and height of obstacle
        self.obstacle_size = self.next_trial[1]
        self.obstacle = self.obstacle_list[self.obstacle_dict[
            self.obstacle_size]]
        self.obstacle_location = self.next_trial[2]

    def _start_target(self):
        super(BMIResettingObstacles, self)._start_target()
        self.obstacle.move_to_position(self.obstacle_location)
        self.obstacle.cube.color = (0., 0., 1., .5)
        self.obstacle.show()
        # print 'targ loc: ', self.target_location.astype(int)
        # print 'obstacle_location: ', self.obstacle_location.astype(int)
        # print 'self.targs: ', self.targs.astype(int)

    def _test_enter_obstacle(self, ts):
        cursor_pos = self.plant.get_endpoint_pos()
        centered_cursor_pos = np.abs(cursor_pos - self.obstacle_location)
        return np.all(centered_cursor_pos < self.obstacle_size / 2.)

    def _test_obstacle_penalty_end(self, ts):
        self.obstacle.cube.color = (1., 1., 0., .5)
        return ts >= self.timeout_penalty_time

    def _start_obstacle_penalty(self):
        #hide targets
        for target in self.targets:
            target.hide()

        self.tries += 1
        self.target_index = -1

    def _cycle(self):
        self.add_obstacle_data()
        super(BMIResettingObstacles, self)._cycle()

    def add_obstacle_data(self):
        self.task_data['obstacle_size'] = self.obstacle_size
        self.task_data['obstacle_location'] = self.obstacle_location

    @staticmethod
    def centerout_2D_discrete_w_obstacle(nblocks=100,
                                         ntargets=8,
                                         boundaries=(-18, 18, -12, 12),
                                         distance=10,
                                         obstacle_sizes=(2, 3)):
        '''

        Generates a sequence of 2D (x and z) target pairs with the first target
        always at the origin.

        Parameters
        ----------
        length : int
            The number of target pairs in the sequence.
        boundaries: 6 element Tuple
            The limits of the allowed target locations (-x, x, -z, z)
        distance : float
            The distance in cm between the targets in a pair.
        obstacle_sizes: tuple of varying sizes

        Returns
        -------
        pairs : [nblocks*ntargets x 2 x 3] array of pairs of target locations


        '''

        # Choose a random sequence of points on the edge of a circle of radius
        # "distance"

        theta = []
        for i in range(nblocks):
            temp_master = []
            for o in obstacle_sizes:
                angs = np.arange(0, 2 * np.pi, 2 * np.pi / ntargets)
                obs_sz = [o] * ntargets
                temp = np.vstack((angs, obs_sz)).T
                np.random.shuffle(temp)
                temp_master.append(temp)
            x = np.vstack((temp_master))
            np.random.shuffle(x)
            theta = theta + [x]
        theta = np.vstack(theta)

        x = distance * np.cos(theta[:, 0])
        y = np.zeros(len(theta[:, 0]))
        z = distance * np.sin(theta[:, 0])

        pairs = np.zeros([len(theta), 2, 3])
        pairs[:, 1, :] = np.vstack([x, y, z]).T

        obstacle_location = (pairs[:, 1, :] - pairs[:, 0, :]) * 0.5

        return zip(pairs, theta[:, 1], obstacle_location)
Ejemplo n.º 21
0
class ScreenTargetCapture(TargetCapture, Window):
    """Concrete implementation of TargetCapture task where targets
    are acquired by "holding" a cursor in an on-screen target"""

    limit2d = 1

    sequence_generators = [
        'out_2D',
        'centerout_2D',
        'centeroutback_2D',
        'rand_target_chain_2D',
        'rand_target_chain_3D',
    ]

    hidden_traits = [
        'cursor_color', 'target_color', 'cursor_bounds', 'cursor_radius',
        'plant_hide_rate', 'starting_pos'
    ]

    is_bmi_seed = True

    # Runtime settable traits
    target_radius = traits.Float(2, desc="Radius of targets in cm")
    target_color = traits.OptionsList("yellow",
                                      *target_colors,
                                      desc="Color of the target",
                                      bmi3d_input_options=list(
                                          target_colors.keys()))
    plant_hide_rate = traits.Float(
        0.0,
        desc=
        'If the plant is visible, specifies a percentage of trials where it will be hidden'
    )
    plant_type = traits.OptionsList(*plantlist,
                                    bmi3d_input_options=list(plantlist.keys()))
    plant_visible = traits.Bool(
        True,
        desc='Specifies whether entire plant is displayed or just endpoint')
    cursor_radius = traits.Float(.5, desc='Radius of cursor in cm')
    cursor_color = traits.OptionsList("pink",
                                      *target_colors,
                                      desc='Color of cursor endpoint',
                                      bmi3d_input_options=list(
                                          target_colors.keys()))
    cursor_bounds = traits.Tuple(
        (-10., 10., 0., 0., -10., 10.),
        desc='(x min, x max, y min, y max, z min, z max)')
    starting_pos = traits.Tuple((5., 0., 5.),
                                desc='Where to initialize the cursor')

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # Initialize the plant
        if not hasattr(self, 'plant'):
            self.plant = plantlist[self.plant_type]
        self.plant.set_endpoint_pos(np.array(self.starting_pos))
        self.plant.set_bounds(np.array(self.cursor_bounds))
        self.plant.set_color(target_colors[self.cursor_color])
        self.plant.set_cursor_radius(self.cursor_radius)
        self.plant_vis_prev = True
        self.cursor_vis_prev = True

        # Add graphics models for the plant and targets to the window
        if hasattr(self.plant, 'graphics_models'):
            for model in self.plant.graphics_models:
                self.add_model(model)

        # Instantiate the targets
        instantiate_targets = kwargs.pop('instantiate_targets', True)
        if instantiate_targets:

            # Need two targets to have the ability for delayed holds
            target1 = VirtualCircularTarget(
                target_radius=self.target_radius,
                target_color=target_colors[self.target_color])
            target2 = VirtualCircularTarget(
                target_radius=self.target_radius,
                target_color=target_colors[self.target_color])

            self.targets = [target1, target2]

        # Declare any plant attributes which must be saved to the HDF file at the _cycle rate
        for attr in self.plant.hdf_attrs:
            self.add_dtype(*attr)

    def init(self):
        self.add_dtype('trial', 'u4', (1, ))
        self.add_dtype('plant_visible', '?', (1, ))
        super().init()

    def _cycle(self):
        '''
        Calls any update functions necessary and redraws screen
        '''
        self.move_effector()

        ## Run graphics commands to show/hide the plant if the visibility has changed
        self.update_plant_visibility()
        self.task_data['plant_visible'] = self.plant_visible

        ## Save plant status to HDF file
        plant_data = self.plant.get_data_to_save()
        for key in plant_data:
            self.task_data[key] = plant_data[key]

        # Update the trial index
        self.task_data['trial'] = self.calc_trial_num()

        super()._cycle()

    def move_effector(self):
        '''Move the end effector, if a robot or similar is being controlled'''
        pass

    def run(self):
        '''
        See experiment.Experiment.run for documentation.
        '''
        # Fire up the plant. For virtual/simulation plants, this does little/nothing.
        self.plant.start()

        # Include some cleanup in case the parent class has errors
        try:
            super().run()
        finally:
            self.plant.stop()

    ##### HELPER AND UPDATE FUNCTIONS ####
    def update_plant_visibility(self):
        ''' Update plant visibility'''
        if self.plant_visible != self.plant_vis_prev:
            self.plant_vis_prev = self.plant_visible
            self.plant.set_visibility(self.plant_visible)

    #### TEST FUNCTIONS ####
    def _test_enter_target(self, ts):
        '''
        return true if the distance between center of cursor and target is smaller than the cursor radius
        '''
        cursor_pos = self.plant.get_endpoint_pos()
        d = np.linalg.norm(cursor_pos - self.targs[self.target_index])
        return d <= (self.target_radius - self.cursor_radius)

    def _test_leave_target(self, ts):
        '''
        return true if cursor moves outside the exit radius
        '''
        cursor_pos = self.plant.get_endpoint_pos()
        d = np.linalg.norm(cursor_pos - self.targs[self.target_index])
        rad = self.target_radius - self.cursor_radius
        return d > rad

    #### STATE FUNCTIONS ####
    def _start_wait(self):
        super()._start_wait()

        if self.calc_trial_num() == 0:

            # Instantiate the targets here so they don't show up in any states that might come before "wait"
            for target in self.targets:
                for model in target.graphics_models:
                    self.add_model(model)
                    target.hide()

    def _start_target(self):
        super()._start_target()

        # Show target if it is hidden (this is the first target, or previous state was a penalty)
        target = self.targets[self.target_index % 2]
        if self.target_index == 0:
            target.move_to_position(self.targs[self.target_index])
            target.show()
            self.sync_event('TARGET_ON', self.gen_indices[self.target_index])

    def _start_hold(self):
        super()._start_hold()
        self.sync_event('CURSOR_ENTER_TARGET',
                        self.gen_indices[self.target_index])

    def _start_delay(self):
        super()._start_delay()

        # Make next target visible unless this is the final target in the trial
        next_idx = (self.target_index + 1)
        if next_idx < self.chain_length:
            target = self.targets[next_idx % 2]
            target.move_to_position(self.targs[next_idx])
            target.show()
            self.sync_event('TARGET_ON', self.gen_indices[next_idx])
        else:
            # This delay state should only last 1 cycle, don't sync anything
            pass

    def _start_targ_transition(self):
        super()._start_targ_transition()
        if self.target_index == -1:

            # Came from a penalty state
            pass
        elif self.target_index + 1 < self.chain_length:

            # Hide the current target if there are more
            self.targets[self.target_index % 2].hide()
            self.sync_event('TARGET_OFF', self.gen_indices[self.target_index])

    def _start_hold_penalty(self):
        self.sync_event('HOLD_PENALTY')
        super()._start_hold_penalty()
        # Hide targets
        for target in self.targets:
            target.hide()
            target.reset()

    def _end_hold_penalty(self):
        super()._end_hold_penalty()
        self.sync_event('TRIAL_END')

    def _start_delay_penalty(self):
        self.sync_event('DELAY_PENALTY')
        super()._start_delay_penalty()
        # Hide targets
        for target in self.targets:
            target.hide()
            target.reset()

    def _end_delay_penalty(self):
        super()._end_delay_penalty()
        self.sync_event('TRIAL_END')

    def _start_timeout_penalty(self):
        self.sync_event('TIMEOUT_PENALTY')
        super()._start_timeout_penalty()
        # Hide targets
        for target in self.targets:
            target.hide()
            target.reset()

    def _end_timeout_penalty(self):
        super()._end_timeout_penalty()
        self.sync_event('TRIAL_END')

    def _start_reward(self):
        self.targets[self.target_index % 2].cue_trial_end_success()
        self.sync_event('REWARD')

    def _end_reward(self):
        super()._end_reward()
        self.sync_event('TRIAL_END')

        # Hide targets
        for target in self.targets:
            target.hide()
            target.reset()

    #### Generator functions ####
    '''
    Note to self: because of the way these get into the database, the parameters don't
    have human-readable descriptions like the other traits. So it is useful to define
    the descriptions elsewhere, in models.py under Generator.to_json().

    Ideally someone should take the time to reimplement generators as their own classes
    rather than static methods that belong to a task.
    '''

    @staticmethod
    def static(pos=(0, 0, 0), ntrials=0):
        '''Single location, finite (ntrials!=0) or infinite (ntrials==0)'''
        if ntrials == 0:
            while True:
                yield [0], np.array(pos)
        else:
            for _ in range(ntrials):
                yield [0], np.array(pos)

    @staticmethod
    def out_2D(nblocks=100, ntargets=8, distance=10, origin=(0, 0, 0)):
        '''
        Generates a sequence of 2D (x and z) targets at a given distance from the origin

        Parameters
        ----------
        nblocks : int
            The number of ntarget pairs in the sequence.
        ntargets : int
            The number of equally spaced targets
        distance : float
            The distance in cm between the center and peripheral targets.
        origin : 3-tuple
            Location of the central targets around which the peripheral targets span

        Returns
        -------
        [nblocks*ntargets x 1] array of tuples containing trial indices and [1 x 3] target coordinates

        '''
        rng = np.random.default_rng()
        for _ in range(nblocks):
            order = np.arange(ntargets) + 1  # target indices, starting from 1
            rng.shuffle(order)
            for t in range(ntargets):
                idx = order[t]
                theta = 2 * np.pi * idx / ntargets
                pos = np.array(
                    [distance * np.cos(theta), 0, distance * np.sin(theta)]).T
                yield [idx], [pos + origin]

    @staticmethod
    def centerout_2D(nblocks=100, ntargets=8, distance=10, origin=(0, 0, 0)):
        '''
        Pairs of central targets at the origin and peripheral targets centered around the origin

        Returns
        -------
        [nblocks*ntargets x 1] array of tuples containing trial indices and [2 x 3] target coordinates
        '''
        gen = ScreenTargetCapture.out_2D(nblocks, ntargets, distance, origin)
        for _ in range(nblocks * ntargets):
            idx, pos = next(gen)
            targs = np.zeros([2, 3]) + origin
            targs[1, :] = pos[0]
            indices = np.zeros([2, 1])
            indices[1] = idx
            yield indices, targs

    @staticmethod
    def centeroutback_2D(nblocks=100,
                         ntargets=8,
                         distance=10,
                         origin=(0, 0, 0)):
        '''
        Triplets of central targets, peripheral targets, and central targets

        Returns
        -------
        [nblocks*ntargets x 1] array of tuples containing trial indices and [3 x 3] target coordinates
        '''
        gen = ScreenTargetCapture.out_2D(nblocks, ntargets, distance, origin)
        for _ in range(nblocks * ntargets):
            idx, pos = next(gen)
            targs = np.zeros([3, 3]) + origin
            targs[1, :] = pos[0]
            indices = np.zeros([3, 1])
            indices[1] = idx
            yield indices, targs

    @staticmethod
    def rand_target_chain_2D(ntrials=100,
                             chain_length=1,
                             boundaries=(-12, 12, -12, 12)):
        '''
        Generates a sequence of 2D (x and z) target pairs.

        Parameters
        ----------
        ntrials : int
            The number of target chains in the sequence.
        chain_length : int
            The number of targets in each chain
        boundaries: 4 element Tuple
            The limits of the allowed target locations (-x, x, -z, z)

        Returns
        -------
        [ntrials x chain_length x 3] array of target coordinates
        '''
        rng = np.random.default_rng()
        idx = 0
        for t in range(ntrials):

            # Choose a random sequence of points within the boundaries
            pts = rng.uniform(size=(chain_length, 3)) * (
                (boundaries[1] - boundaries[0]), 0,
                (boundaries[3] - boundaries[2]))
            pts = pts + (boundaries[0], 0, boundaries[2])
            yield idx + np.arange(chain_length), pts
            idx += chain_length

    @staticmethod
    def rand_target_chain_3D(ntrials=100,
                             chain_length=1,
                             boundaries=(-12, 12, -10, 10, -12, 12)):
        '''
        Generates a sequence of 3D target pairs.
        Parameters
        ----------
        ntrials : int
            The number of target chains in the sequence.
        chain_length : int
            The number of targets in each chain
        boundaries: 6 element Tuple
            The limits of the allowed target locations (-x, x, -y, y, -z, z)

        Returns
        -------
        [ntrials x chain_length x 3] array of target coordinates
        '''
        rng = np.random.default_rng()
        idx = 0
        for t in range(ntrials):

            # Choose a random sequence of points within the boundaries
            pts = rng.uniform(size=(chain_length, 3)) * (
                (boundaries[1] - boundaries[0]),
                (boundaries[3] - boundaries[2]),
                (boundaries[5] - boundaries[4]))
            pts = pts + (boundaries[0], boundaries[2], boundaries[4])
            yield idx + np.arange(chain_length), pts
            idx += chain_length