Exemplo n.º 1
0
class SimCLDAControlMultiDispl2D_PPF(SimTime, Autostart, WindowDispl2D,
                                     SimCosineTunedPointProc,
                                     SimPPFDecoderCursorShuffled,
                                     cursor_clda_tasks.CLDAControlPPFContAdapt
                                     ):
    win_res = (250, 140)
    tau = traits.Float(2.7, desc="Magic parameter for speed of OFC.")
    param_noise_scale = traits.Float(1.0, desc="Stuff")
    half_life = (0, 0)
    half_life_time = 1

    def __init__(self, *args, **kwargs):
        from riglib.bmi.state_space_models import StateSpaceEndptVel2D
        ssm = StateSpaceEndptVel2D()
        A, B, W = ssm.get_ssm_matrices(update_rate=1. / 180)
        Q = np.mat(np.diag([1., 1, 1, 0, 0, 0, 0]))
        R = 100 * np.mat(np.diag([1., 1., 1.]))
        self.fb_ctrl = LQRController(A, B, Q, R)

        self.ssm = ssm

        super(SimCLDAControlMultiDispl2D_PPF, self).__init__(*args, **kwargs)
        self.batch_time = 1. / 10  #60.  # TODO 10 Hz running seems to be hardcoded somewhere
        self.assist_level = 0., 0.
        self.assist_level_time = 120.
        self.last_get_spike_counts_time = -1. / 60
        self.learn_flag = True
Exemplo n.º 2
0
class ButtonTask(LogExperiment):
    side = traits.String("left",
                         desc='Use "left" for one side, "right" for the other')
    reward_time = traits.Float(5, desc='Amount of reward (in seconds)')
    penalty_time = traits.Float(5, desc='Amount of penalty (in seconds)')

    status = dict(
        left=dict(left_correct="reward", left_incorrect="penalty", stop=None),
        right=dict(right_correct="reward",
                   right_incorrect="penalty",
                   stop=None),
        reward=dict(post_reward="picktrial"),
        penalty=dict(post_penalty="picktrial"),
    )

    state = "picktrial"

    def __init__(self, **kwargs):
        from riglib import button
        super(ButtonTask, self).__init__(**kwargs)
        self.button = button.Button()

    def _start_picktrial(self):
        self.set_state(self.side)

    def _get_event(self):
        if self.button is not None:
            return self.button.pressed()
        return None

    def _while_left(self):
        self.event = self._get_event()

    def _while_right(self):
        self.event = self._get_event()

    def _test_left_correct(self, ts):
        return self.event is not None and self.event in [1, 2]

    def _test_left_incorrect(self, ts):
        return self.event is not None and self.event in [8, 4]

    def _test_right_correct(self, ts):
        return self.event is not None and self.event in [8, 4]

    def _test_right_incorrect(self, ts):
        return self.event is not None and self.event in [1, 2]

    def _test_post_reward(self, ts):
        return ts > self.reward_time

    def _test_post_penalty(self, ts):
        return ts > self.penalty_time

    def _test_both_correct(self, ts):
        return self.event is not None

    def _start_None(self):
        pass
class FixationStart(CalibratedEyeData):
    '''Triggers the start_trial event whenever fixation exceeds *fixation_length*'''
    fixation_length = traits.Float(
        2., desc="Length of fixation required to start the task")
    fixation_dist = traits.Float(
        50., desc="Distance from center that is considered a broken fixation")

    def __init__(self, *args, **kwargs):
        '''
        Docstring

        Parameters
        ----------

        Returns
        -------
        '''
        super(FixationStart, self).__init__(*args, **kwargs)
        self.status['wait']['fixation_break'] = "wait"
        self.log_exclude.add(("wait", "fixation_break"))

    def _start_wait(self):
        '''
        Docstring

        Parameters
        ----------

        Returns
        -------
        '''
        self.eyedata.get()
        super(FixationStart, self)._start_wait()

    def _test_fixation_break(self, ts):
        '''
        Docstring

        Parameters
        ----------

        Returns
        -------
        '''
        return (np.sqrt(
            (self.eyedata.get()**2).sum(1)) > self.fixation_dist).any()

    def _test_start_trial(self, ts):
        '''
        Docstring

        Parameters
        ----------

        Returns
        -------
        '''
        return ts > self.fixation_length
Exemplo n.º 4
0
class Conditions(Sequence):

    status = dict(
        wait = dict(start_trial="trial"),
        trial = dict(end_trial="wait", stoppable=False, end_state=True),
    )
    
    wait_time = traits.Float(5.0, desc="Inter-trial interval (s)")
    trial_time = traits.Float(1.0, desc="Trial duration (s)")
    sequence_generators = ['null_sequence']

    def init(self):
        self.trial_dtype = np.dtype([('trial', 'u4'), ('index', 'u4')])
        super().init()

    def _parse_next_trial(self):
        self.trial_index = self.next_trial

        # Send record of trial to sinks
        self.trial_record['trial'] = self.calc_trial_num()
        self.trial_record['index'] = self.trial_index
        self.sinks.send("trials", self.trial_record)

    def _test_start_trial(self, ts):
        return ts > self.wait_time and not self.pause
    
    def _test_end_trial(self, ts):
        return ts > self.trial_time

    def _start_trial(self):
        self.sync_event('TRIAL_START', self.trial_index)

    def _end_trial(self):
        self.sync_event('TRIAL_END')

    @staticmethod
    def gen_random_conditions(nreps, *args, replace=False):
        ''' Generate random sequence of all combinations of the given arguments'''
        unique = list(itertools.product(*args))
        conds = np.random.choice(nreps*len(unique), nreps*len(unique), replace=replace)
        seq = [[i % len(unique)] + list(unique[i % len(unique)]) for i in conds] # list of [index, arg1, arg2, ..., argn]
        return tuple(zip(*seq))

    @staticmethod
    def gen_conditions(nreps, *args, ascend=True):
        ''' Generate a sequential sequence of all combinations of the given arguments'''
        unique = list(itertools.product(*args))
        conds = np.tile(range(len(unique)), nreps)
        if not ascend: # descending
            conds = np.flipud(conds)
        seq = [[i % len(unique)] + list(unique[i % len(unique)]) for i in conds] # list of [index, arg1, arg2, ..., argn]
        return tuple(zip(*seq))

    @staticmethod
    def null_sequence(ntrials=100):
        return [0 for _ in range(ntrials)]
class BMIControlCursorElip(BMIControlMulti):
    w_aspect_ratio = traits.Float(2., desc="aspect ratio for W matrix")
    w_zero_scale = traits.Float(0.5, desc="aspect ratio for W matrix")
    v_max = traits.Float(20, desc="aspect ratio for W matrix")

    def init(self):
        super(BMIControlCursorElip, self).init()
        self.decoder.filt.w_aspect_ratio = self.w_aspect_ratio
        self.decoder.filt.w_zero_scale = self.w_zero_scale
        self.decoder.filt.v_max = self.v_max
class BMIControlCursorHold(BMIControlMulti):
    '''
    A cursor BMI task where reward is proportional to length of hold
    '''
    _target_color = (0, 0, 1, .5)
    status = dict(wait=dict(start_trial="target", stop=None),
                  target=dict(enter_target="hold",
                              timeout="timeout_penalty",
                              stop=None),
                  hold=dict(hold_complete="targ_transition"),
                  targ_transition=dict(trial_complete="reward",
                                       trial_abort="wait",
                                       trial_incomplete="target"),
                  timeout_penalty=dict(timeout_penalty_end="targ_transition"),
                  reward=dict(reward_end="wait"))
    reward_mult_factor = traits.Float(
        1.0, desc="reward time = reward_mult_factor * hold_time")
    max_hold = traits.Float(2.0, desc="max time for rewarded hold")

    def __init__(self, *args, **kwargs):
        super(BMIControlCursorHold, self).__init__(*args, **kwargs)
        self.hold_start_time = np.nan

    def init(self):
        self.add_dtype('reward_time', 'f8', (1, ))
        super(BMIControlCursorHold, self).init()

    def _cycle(self):
        self.task_data['reward_time'] = self.reward_time
        super(BMIControlCursorHold, self)._cycle()

    def _start_hold(self):
        self.hold_start_time = self.get_time()

    def _test_hold_complete(self, ts):
        cursor_pos = self.plant.get_endpoint_pos()
        d = np.linalg.norm(cursor_pos - self.target_location)
        rad = self.target_radius - self.cursor_radius

        outside_targ = d > rad
        compl = (ts > self.max_hold) or outside_targ
        if compl:
            self.reward_time += ts  #(ts - self.hold_start_time)
        return compl

    def _test_trial_abort(self, ts):
        abort = super(BMIControlCursorHold, self)._test_trial_abort(ts)
        if abort:
            self.reward_time = 0
        return abort

    def _start_wait(self):
        self.reward_time = 0
        super(BMIControlCursorHold, self)._start_wait()
Exemplo n.º 7
0
class JoystickDrivenCursorOPSBiased(JoystickDrivenCursorOPS):
    bias_angle = traits.Float(0,
                              desc="Angle to bias cursor velocity, in degrees")
    bias_gain = traits.Float(
        0, desc="Gain of directional velocity bias in cm/sec")

    def load_decoder(self):
        super(JoystickDrivenCursorOPSBiased, self).load_decoder()
        assert isinstance(self.decoder, kfdecoder.KFDecoder)
        self.decoder.filt.A[3, -1] += self.bias_gain * np.cos(
            self.bias_angle * np.pi / 180)
        self.decoder.filt.A[5, -1] += self.bias_gain * np.sin(
            self.bias_angle * np.pi / 180)
        print self.decoder.filt.A
class CLDAControlPPFContAdapt(CLDAControlMulti):
    tau = traits.Float(2.7, desc="Magic parameter for speed of OFC.")
    param_noise_scale = traits.Float(1.0, desc="Stuff")
    exclude_parent_traits = ['half_life', 'half_life_decay_time', 'batch_time']

    ordered_traits = [
        'session_length', 'assist_level', 'assist_time', 'tau',
        'param_noise_scale'
    ]

    def create_learner(self):
        self.learn_flag = True

        kwargs = dict()
        dt = kwargs.pop('dt', 1. / 180)
        use_tau_unNat = self.tau
        self.tau = use_tau_unNat
        print "learner cost fn param: %g" % use_tau_unNat
        tau_scale = 28 * use_tau_unNat / 1000
        bin_num_ms = (dt / 0.001)
        w_r = 3 * tau_scale**2 / 2 * (bin_num_ms)**2 * 26.61

        I = np.eye(3)
        zero_col = np.zeros([3, 1])
        zero_row = np.zeros([1, 3])
        zero = np.zeros([1, 1])
        one = np.ones([1, 1])
        A = np.bmat([[I, dt * I, zero_col], [0 * I, 0 * I, zero_col],
                     [zero_row, zero_row, one]])
        B = np.bmat([[0 * I], [dt / 1e-3 * I], [zero_row]])
        Q = np.mat(np.diag([1., 1, 1, 0, 0, 0, 0]))
        R = np.mat(np.diag([w_r, w_r, w_r]))

        F = feedback_controllers.LQRController.dlqr(A, B, Q, R)
        F_dict = dict(target=F, hold=F)

        fb_ctrl = feedback_controllers.MultiModalLFC(A=A, B=B, F_dict=F_dict)

        batch_size = 1

        self.learner = clda.OFCLearner(batch_size, A, B, F_dict)

        # Tell BMISystem that this learner wants the most recent output
        # of the decoder rather than the second most recent, to match MATLAB
        self.learner.input_state_index = 0

    def create_updater(self):
        self.updater = clda.PPFContinuousBayesianUpdater(
            self.decoder, param_noise_scale=self.param_noise_scale)
class BMIControlManipulatedFB(bmimultitasks.BMIControlMulti):

    feedback_rate = traits.Float(60, desc="Rate in hz that cursor position is updated on screen (best if factor of 60)")
    task_update_rate = traits.Float(60, desc="Rate in hz that decoded cursor position is updated within task (best if factor of 60)")
    ordered_traits = ['session_length', 'assist_level', 'assist_time', 'feedback_rate', 'task_update_rate']

    def __init__(self, *args, **kwargs):
        super(BMIControlManipulatedFB, self).__init__(*args, **kwargs)
        self.visible_cursor = Sphere(radius=self.cursor_radius, color=(1,1,1,1))
        self.add_model(self.visible_cursor)
        self.cursor_visible = True

    def init(self):
        self.dtype.append(('visible_cursor','f8',3))
        super(BMIControlManipulatedFB, self).init()
        
        self.feedback_num = int(60.0/self.feedback_rate)
        self.task_update_num = int(60.0/self.task_update_rate)
        self.loopcount = 0

    def update_cursor(self):
        ''' Update the cursor's location and visibility status.'''
        pt = self.get_cursor_location()
        prev = self.cursor_visible
        self.cursor_visible = False
        if prev != self.cursor_visible:
            self.show_object(self.cursor, show=False) #self.cursor.detach()
            self.requeue()
        #update the "real" cursor location only according to specified task update rate
        if self.loopcount%self.task_update_num==0:
            if pt is not None:
                self.move_cursor(pt)
        #update the visible cursor location only according to specified feedback rate
        if self.loopcount%self.feedback_num==0:
            loc = self.cursor.xfm.move
            self.visible_cursor.translate(*loc,reset=True)

    def _cycle(self):
        ''' Overwriting parent methods since this one works differently'''
        self.update_assist_level()
        self.task_data['assist_level'] = self.current_assist_level
        self.update_cursor()
        self.task_data['cursor'] = self.cursor.xfm.move.copy()
        self.task_data['target'] = self.target_location.copy()
        self.task_data['target_index'] = self.target_index
        self.task_data['visible_cursor'] = self.visible_cursor.xfm.move.copy()
        self.loopcount += 1
        #write to screen
        self.draw_world()
class FixationTraining(Window):
    status = dict(wait=dict(start_trial="reward", stop=None),
                  reward=dict(reward_end="wait"))

    #initial state
    state = "wait"

    #settable traits
    reward_time = traits.Float(.5, desc="Length of juice reward")

    #initialize and create fixation point object
    def __init__(self, **kwargs):
        super(FixationTraining, self).__init__(**kwargs)
        self.fixation_point = Sphere(radius=.1, color=(1, 0, 0, 1))
        #keep fixation point hidden for now
        #self.add_model(self.fixation_point)

    def _get_renderer(self):
        return stereo.MirrorDisplay(self.window_size, self.fov, 1, 1024,
                                    self.screen_dist, self.iod)

    def _test_reward_end(self, ts):
        return ts > self.reward_time

    def _while_wait(self):
        self.draw_world()

    def _while_reward(self):
        self.draw_world()
Exemplo n.º 11
0
    def test_norm_trait(self):
        from db.tracker import json_param
        from riglib.experiment import traits

        t = traits.Float(1, descr='test trait')
        t1 = json_param.norm_trait(t, 1.0)
        self.assertEqual(t1, 1.0)
class CLDAControlMulti(BMIControlMulti, LinearlyDecreasingHalfLife):
    '''
    BMI task that periodically refits the decoder parameters based on intended
    movements toward the targets. Inherits directly from BMIControl. Can be made
    to automatically linearly decrease assist level over set time period, or
    to provide constant assistance by setting assist_level and assist_min equal.
    '''

    batch_time = traits.Float(80.0, desc='The length of the batch in seconds')
    decoder_sequence = traits.String(
        'test', desc='signifier to group together sequences of decoders')

    ordered_traits = [
        'session_length', 'assist_level', 'assist_level_time', 'batch_time',
        'half_life', 'half_life_time'
    ]

    def __init__(self, *args, **kwargs):
        super(CLDAControlMulti, self).__init__(*args, **kwargs)
        self.learn_flag = True

    def create_learner(self):
        self.batch_size = int(self.batch_time / self.decoder.binlen)
        self.learner = CursorGoalLearner2(self.batch_size)
        self.learn_flag = True

    def create_updater(self):
        half_life_start, half_life_end = self.half_life
        self.updater = clda.KFSmoothbatch(self.batch_time, half_life_start)

    def call_decoder(self, *args, **kwargs):
        kwargs['half_life'] = self.current_half_life
        return super(CLDAControlMulti, self).call_decoder(*args, **kwargs)
Exemplo n.º 13
0
class RewardSystem(traits.HasTraits):
    '''
    Feature for the Crist solenoid reward system
    '''
    trials_per_reward = traits.Float(
        1, desc='Number of successful trials before solenoid is opened')

    def __init__(self, *args, **kwargs):
        from riglib import reward
        super(RewardSystem, self).__init__(*args, **kwargs)
        self.reward = reward.open()

    def _start_reward(self):
        self.reward_start = self.get_time()
        if self.reward is not None:
            self.reportstats['Reward #'] += 1
            if self.reportstats['Reward #'] % self.trials_per_reward == 0:
                self.reward.reward(self.reward_time * 1000.)
        super(RewardSystem, self)._start_reward()

    def _test_reward_end(self, ts):
        if self.reportstats['Reward #'] % self.trials_per_reward == 0:
            return ts > self.reward_time
        else:
            return True
Exemplo n.º 14
0
class CLDATentacleRL(CLDAControlTentacle):
    n_trial_ofc_learner = traits.Float(
        16.0,
        desc=
        'Number of rewards before switching from continuous adaptation to trial-based adaptation'
    )
    batch_size = 0.1

    def create_learner(self):
        A, B, _ = self.decoder.ssm.get_ssm_matrices()

        kin_chain = self.plant.kin_chain
        Q = np.mat(np.diag(np.hstack([kin_chain.link_lengths, np.zeros(5)])))
        R = 100000 * np.mat(np.eye(B.shape[1]))
        self.ofc_learner = OFCLearnerTentacle(
            1,
            A,
            B,
            Q,
            R,
            done_states=['reward', 'hold_penalty'],
            reset_states=['timeout_penalty'])
        self.rl_learner = TentacleValueLearner(
            np.inf,
            done_states=['reward', 'hold_penalty'],
            reset_states=['timeout_penalty'],
            kin_chain=self.plant.kin_chain)
        self.learner = self.ofc_learner

    def _cycle(self):
        super(CLDATentacleRL, self)._cycle()
        if (self.calc_state_occurrences('reward') > self.n_trial_ofc_learner
            ) and self.state not in ['reward', 'hold_penalty']:
            # switch learner to the trial-based learner
            self.bmi_system.learner = self.rl_learner
class LinearlyDecreasingN(LinearlyDecreasingAttribute):
    memory_decay_rate = traits.Tuple((-1., 0.5), desc="")
    memory_decay_rate_time = traits.Float(300, desc="")

    def __init__(self, *args, **kwargs):
        super(LinearlyDecreasingAssist, self).__init__(*args, **kwargs)
        if 'memory_decay_rate' not in self.attrs:
            self.attrs.append('memory_decay_rate')
class CLDACursorAdaptRscale(CLDAControlMulti):
    r_scale = traits.Float('20')

    def init(self):
        self.decoder.filt.r_scale = self.r_scale
        super(CLDACursorAdaptRscale, self).init()

    def create_updater(self):
        self.updater = RScaleUpdater(self.batch_time, self.half_life[0])
Exemplo n.º 17
0
class KinarmFreeChoice(manualcontrolmultitasks.JoystickSpeedFreeChoice):
    sequence_generators = manualcontrolmultitasks.JoystickSpeedFreeChoice.sequence_generators
    pre_choise_pause_time = traits.Float(
        3., desc='time before allowed to make a choice')
    status = dict(wait=dict(start_trial="targ_transition", stop=None),
                  pre_choice_orig=dict(enter_orig='choice_target',
                                       timeout='timeout_penalty',
                                       stop=None),
                  choice_target=dict(enter_choice_target='targ_transition',
                                     timeout='timeout_penalty',
                                     stop=None),
                  target=dict(enter_target="hold",
                              timeout="timeout_penalty",
                              stop=None),
                  hold=dict(leave_early="hold_penalty",
                            hold_complete="targ_transition"),
                  targ_transition=dict(trial_complete="reward",
                                       trial_abort="wait",
                                       trial_incomplete="target",
                                       make_choice='pre_choice_orig'),
                  timeout_penalty=dict(timeout_penalty_end="targ_transition"),
                  hold_penalty=dict(hold_penalty_end="targ_transition"),
                  reward=dict(reward_end="wait"))

    def move_effector(self):
        self = kinarm_move_effector(self)

        self.current_pt = self.current_pt + (
            np.array([np.random.rand() - 0.5, 0.,
                      np.random.rand() - 0.5]) * self.joystick_speed)
        self.plant.set_endpoint_pos(self.current_pt)
        self.last_pt = self.current_pt.copy()

    def _test_enter_choice_target(self, ts):
        cursor_pos = self.plant.get_endpoint_pos()
        enter_targ = 0
        for ic, c in enumerate(self.choice_locs):
            d = np.linalg.norm(cursor_pos - c)
            if d <= self.choice_target_rad:  #NOTE, gets in if CENTER of cursor is in target (not entire cursor)
                enter_targ += 1

                #Set chosen as new input:
                self.chosen_input_ix = ic
                self.joystick_speed = self.input_type_dict[ic]
                print 'trial: ', self.choice_instructed, self.joystick_speed

                #Declare that choice has been made:
                self.choice_made = 1

                #Change color of cursor:
                sph = self.plant.graphics_models[0]
                sph.color = self.input_type_dict[ic, 'color']
        if ts > self.pre_choise_pause_time:
            return enter_targ > 0
        else:
            return False
Exemplo n.º 18
0
class LinearlyDecreasingReachAngle(LinearlyDecreasingAssist):
    '''
    For the reach direction task, decrease the maximum reach angle linearly
    '''
    reach_angle_time = traits.Float(
        600, desc="Number of seconds to go from initial to final reach angle")

    def __init__(self, *args, **kwargs):
        super(LinearlyDecreasingHalfLife, self).__init__(*args, **kwargs)
        if 'half_life' not in self.attrs:
            self.attrs.append('half_life')
Exemplo n.º 19
0
class Dots(TrialTypes, Pygame):
    trial_types = ["flat", "depth"]
    saturation = traits.Float(1.)

    def init(self):
        super(Dots, self).init()

        self.width, self.height = self.surf.get_size()
        mask = squaremask()
        mid = self.height / 2 - mask.shape[0] / 2
        lc = self.width / 4 - mask.shape[1] / 2
        rc = 3 * self.width / 4 - mask.shape[1] / 2

        self.mask = mask
        self.coords = (lc, mid), (rc, mid)

    def _start_depth(self):
        c = 255 * (1 - self.saturation)
        left, right, flat = generate(self.mask)
        sleft = pygame.surfarray.make_surface(left.T)
        sright = pygame.surfarray.make_surface(right.T)
        sleft.set_palette([(0, 0, 0, 255), (c, c, 255, 255), (c, 255, c, 255)])
        sright.set_palette([(0, 0, 0, 255), (c, c, 255, 255),
                            (c, 255, c, 255)])
        self.sleft, self.sright = sleft, sright

    def _start_flat(self):
        left, right, flat = generate(self.mask)
        sflat = pygame.surfarray.make_surface(flat.T)
        sflat.set_palette([(0, 0, 0, 255), (255, 255, 255, 255)])
        self.sflat = sflat

    def _while_depth(self):
        self.surf.blit(self.sleft, self.coords[0])
        self.surf.blit(self.sright, self.coords[1])
        self.flip_wait()

    def _while_flat(self):
        self.surf.blit(self.sflat, self.coords[0])
        self.surf.blit(self.sflat, self.coords[1])
        self.flip_wait()

    def _test_flat_correct(self, ts):
        return self.event in [1, 2]

    def _test_flat_incorrect(self, ts):
        return self.event in [4, 8]

    def _test_depth_correct(self, ts):
        return self.event in [4, 8]

    def _test_depth_incorrect(self, ts):
        return self.event in [1, 2]
Exemplo n.º 20
0
class VisualFeedbackMulti(manualcontrolmultitasks.ManualControlMulti):
    '''
    Displays task to subject but cursor moves automatically to targets with some
    noise added. Subject still gets reward when cursor hits targets.
    '''
    background = (.5, .5, .5, 1)  # Set the screen background color to grey
    noise_level = traits.Float(
        0.5, desc="Percent noise to add to straight line movements.")
    smoothparam = 10  # number of frames to move in one direction before switching
    smoothcounter = 9
    velnoise = np.array([0, 0, 0])
    ordered_traits = ['session_length']
    exclude_parent_traits = ['marker_count', 'marker_num']

    def __init__(self, *args, **kwargs):
        self.cursor_visible = True
        super(VisualFeedbackMulti, self).__init__(*args, **kwargs)
        self.prev_cursor = self.plant.get_endpoint_pos()

    def move_effector(self):
        ''' 
        Returns the 3D coordinates of the cursor.
        '''
        # calculate straight line x and z velocities
        targetvec = self.target_location - self.prev_cursor
        vecmag = np.sqrt(targetvec[0]**2 + targetvec[1]**2 + targetvec[2]**2)
        if vecmag < .05:
            velideal = np.array([0, 0, 0])
        else:
            direction = targetvec / vecmag
            velideal = direction * .1  # constant velocity for now, maybe change to bell shaped curve later??
        if self.smoothcounter == (self.smoothparam - 1):
            # create random noise x and z velocities
            self.velnoise = np.array(
                [np.random.randn(1)[0], 0,
                 np.random.randn(1)[0]]) * .1
            self.smoothcounter = 0
        else:
            self.smoothcounter += 1
        # combine ideal velocity with noise
        vel = velideal * (1 -
                          self.noise_level) + self.velnoise * self.noise_level

        # calculate new cursor position
        #self.set_arm_endpoint(self.prev_cursor + vel, time_limit=0.012)
        self.plant.set_endpoint_pos(self.prev_cursor + vel)

    def update_cursor(self):
        ''' Update the cursor's location and visibility status.'''
        pt = self.get_cursor_location()
        self.move_cursor(pt)
        self.prev_cursor = pt.copy()
class CLDA_BMIResettingObstacles(BMIResettingObstacles,
                                 LinearlyDecreasingHalfLife):
    sequence_generators = [
        'centerout_2D_discrete', 'centerout_2D_discrete_w_obstacle'
    ]
    batch_time = traits.Float(0.1, desc='The length of the batch in seconds')
    decoder_sequence = traits.String(
        'test', desc='signifier to group together sequences of decoders')
    memory_decay_rate = traits.Float(0.5, desc="")
    ordered_traits = [
        'session_length', 'assist_level', 'assist_level_time', 'batch_time',
        'half_life', 'half_life_time'
    ]

    def __init__(self, *args, **kwargs):
        super(CLDA_BMIResettingObstacles, self).__init__(*args, **kwargs)
        self.learn_flag = True

    def call_decoder(self, *args, **kwargs):
        kwargs['half_life'] = self.current_half_life
        return super(CLDA_BMIResettingObstacles,
                     self).call_decoder(*args, **kwargs)

    def create_updater(self):
        self.updater = clda.KFRML(self.batch_time, self.half_life[0])
        self.updater.default_gain = self.memory_decay_rate

    def create_learner(self):
        # self.batch_size = int(self.batch_time/self.decoder.binlen)
        # self.learner = ObstacleLearner(self.batch_size)
        # self.learn_flag = True

        self.batch_size = int(self.batch_time / self.decoder.binlen)
        A, B, _ = self.decoder.ssm.get_ssm_matrices()
        Q = np.mat(np.diag([10, 10, 10, 5, 5, 5, 0]))
        R = 10**6 * np.mat(np.eye(B.shape[1]))
        from tentaclebmitasks import OFCLearnerTentacle
        self.learner = OFCLearnerTentacle(self.batch_size, A, B, Q, R)
        self.learn_flag = True
Exemplo n.º 22
0
class LinearlyDecreasingHalfLife(LinearlyDecreasingAttribute):
    '''
    Specific case of LinearlyDecreasingAttribute for a linearly decreasing CLDA half-life
    '''
    half_life = traits.Tuple((450., 450.),
                             desc="Initial and final half life for CLDA")
    half_life_time = traits.Float(
        600, desc="Number of seconds to go from initial to final half life")

    def __init__(self, *args, **kwargs):
        super(LinearlyDecreasingHalfLife, self).__init__(*args, **kwargs)
        if 'half_life' not in self.attrs:
            self.attrs.append('half_life')
Exemplo n.º 23
0
class CLDAControlTentaclePPF(CLDAControlTentacle):
    param_noise_scale = traits.Float(1.0, desc="Stuff")

    def create_updater(self):
        vel_gain = 1e-8
        # vel_gain *= self.param_noise_scale
        const_var = 1e-4 * 0.06 / 50
        vel_var = vel_gain * 0.13
        param_noise_variances = np.array([
            vel_var / 225, vel_var / 225, vel_var / 5, vel_var / 5, const_var
        ])
        self.updater = clda.PPFContinuousBayesianUpdater(
            self.decoder, param_noise_variances=param_noise_variances)
class CLDAControlPPFContAdapt2(CLDAControlMulti):
    exclude_parent_traits = ['half_life', 'half_life_decay_time', 'batch_time']
    param_noise_scale = traits.Float(1.0, desc="Stuff")
    cost_fn_scale = traits.Float(10000, desc="Stuff")
    ordered_traits = [
        'session_length', 'assist_level', 'assist_time', 'param_noise_scale'
    ]

    def create_learner(self):
        self.batch_size = 1
        A, B, _ = self.decoder.ssm.get_ssm_matrices(
            update_rate=self.decoder.binlen)

        Q = np.mat(np.diag([1., 1, 1, 0, 0, 0, 0]))
        R = self.cost_fn_scale * np.mat(np.eye(B.shape[1]))
        from tasks.tentaclebmitasks import OFCLearnerTentacle
        self.learner = OFCLearnerTentacle(self.batch_size, A, B, Q, R)
        self.learn_flag = True

    def create_updater(self):
        self.updater = clda.PPFContinuousBayesianUpdater(
            self.decoder, param_noise_scale=self.param_noise_scale)
Exemplo n.º 25
0
class EndpointManualControl(ExoBase, PositionerTaskController):
    status = dict(
        go_to_origin = dict(microcontroller_done='wait', stop=None),
        wait = dict(start_trial='move_target', stop=None),
        move_target = dict(microcontroller_done='reach'),
        reach = dict(force_applied='reward', new_target_set_remotely='move_target', skip='wait', stop=None),
        reward = dict(time_expired='wait', stop=None),
    )

    trial_end_states = ['reward']

    min_force_on_target = traits.Float(1., desc='Force that needs to be applied, in Newtons')
    reward_time = traits.Float(3., desc='reward time for solenoid')
    

    def move_plant(self):
        self.plant._get_sensor_data()

    def _test_force_applied(self, *args, **kwargs):
        return self.plant.force_N > self.min_force_on_target

    def _end_move_target(self):
        # send command to kill motors
        steps_actuated = self.pos_uctrl_iface.end_continuous_move(stiff=True)
        self._integrate_steps(steps_actuated, self.pos_uctrl_iface.motor_dir)

    def _cycle(self):
        # print "_cycle"
        super(EndpointManualControl, self)._cycle()

    def init(self):
        import pygame
        pygame.init()
        super(EndpointManualControl, self).init()

    def _test_skip(self, *args, **kwargs):
        import pygame
        keys = pygame.key.get_pressed()
        return keys[pygame.K_RIGHT]
Exemplo n.º 26
0
class LinearlyDecreasingAssist(LinearlyDecreasingAttribute):
    '''
    Specific case of LinearlyDecreasingAttribute for a linearly decreasing assist parameter
    '''
    assist_level = traits.Tuple((0.0, 0.0),
                                desc="Level of assist to apply to BMI output")
    assist_level_time = traits.Float(
        600,
        desc="Number of seconds to go from initial to minimum assist level")

    def __init__(self, *args, **kwargs):
        super(LinearlyDecreasingAssist, self).__init__(*args, **kwargs)
        if 'assist_level' not in self.attrs:
            self.attrs.append('assist_level')
Exemplo n.º 27
0
class LinearlyDecreasingAngAssist(LinearlyDecreasingAttribute):
    ''' 
    linearly decreasing angular assist -- for psi and ReHand
    '''
    rh_assist_level = traits.Tuple(
        (0.0, 0.0), desc='level of assist to apply to ang output')
    rh_assist_level_time = traits.Float(
        600,
        desc="Number of seconds to go from initial to minimum assist level")

    def __init__(self, *args, **kwargs):
        super(LinearlyDecreasingAngAssist, self).__init__(*args, **kwargs)
        if 'rh_assist_level' not in self.attrs:
            self.attrs.append('rh_assist_level')
class CLDAControlKFCGRML(CLDAControlMulti):
    memory_decay_rate = traits.Float(
        0.45, desc='Shape parameter for the impulse response of the KF')
    ordered_traits = [
        'session_length', 'assist_level', 'assist_time', 'batch_time',
        'half_life', 'half_life_decay_time', 'memory_decay_rate'
    ]

    def create_learner(self):
        self.batch_size = int(self.batch_time / self.decoder.binlen)
        self.learner = CursorGoalLearner2(self.batch_size,
                                          int_speed_type='decoded_speed')
        self.learn_flag = True

    def create_updater(self):
        self.updater = clda.KFRML(self.batch_time, self.half_life[0])
Exemplo n.º 29
0
class CLDAControlExoEndpt(BMIControlExoEndpt, LinearlyDecreasingHalfLife):
    batch_time = traits.Float(0.1, desc='The length of the batch in seconds')
    decoder_sequence = traits.String('exo', desc='signifier to group together sequences of decoders')

    def create_updater(self):
        self.updater = clda.KFRML(self.batch_time, self.half_life[0])

    def init(self):
        super(CLDAControlExoEndpt, self).init()
        self.batch_time = self.decoder.binlen
        self.updater.init(self.decoder)        

    def create_learner(self):
        self.batch_size = int(self.batch_time/self.decoder.binlen)
        self.learner = clda.FeedbackControllerLearner(self.batch_size, joint_vel_fb_ctrl, reset_states=['go_to_origin', 'wait', 'init_exo', 'move_target', 'pause', 'reward' ])
        self.learn_flag = True
class BMICursorKinematicCurlField(BMIResetting):
    rot_factor = traits.Float(
        10., desc='scaling factor from speed to rotation angle in degrees')

    def load_decoder(self):
        super(BMICursorKinematicCurlField, self).load_decoder()
        # Conver the KF to a curl-field generating KF
        dec = self.decoder
        filt = CurlFieldKalmanFilter(A=self.decoder.filt.A,
                                     W=self.decoder.filt.W,
                                     C=dec.filt.C,
                                     Q=dec.filt.Q,
                                     is_stochastic=dec.filt.is_stochastic)
        filt.C_xpose_Q_inv = dec.filt.C_xpose_Q_inv
        filt.C_xpose_Q_inv_C = dec.filt.C_xpose_Q_inv_C
        filt.rot_factor = self.rot_factor
        self.decoder.filt = filt