Пример #1
0
class PositionerTaskController(Sequence):
    '''
    Interface between the positioner and the task interface. The positioner should run asynchronously
    so that the task event loop does not have to wait for a serial port response from the microcontroller.
    '''

    status = FSMTable(
        go_to_origin=StateTransitions(microcontroller_done='wait'),
        wait=StateTransitions(start_trial='move_target'),
        move_target=StateTransitions(microcontroller_done='reach',
                                     stoppable=False),
        reach=StateTransitions(time_expired='reward',
                               new_target_set_remotely='move_target'),
        reward=StateTransitions(time_expired='wait'),
    )
    # status = dict(
    #     go_to_origin = dict(microcontroller_done='wait', stop=None),
    #     wait = dict(start_trial='move_target', stop=None),
    #     move_target = dict(microcontroller_done='reach'),
    #     reach = dict(time_expired='reward', stop=None),
    #     reward = dict(time_expired='wait'),
    # )

    state = 'go_to_origin'

    sequence_generators = ['random_target_calibration', 'xy_sweep']
    reward_time = 1
    reach_time = 1

    @staticmethod
    def random_target_calibration(n_blocks=10):
        #     # constants selected approximately from one subject's ROM
        #     targets = [
        #         (x_min, y_min, z_min),
        #         (x_max, y_min, z_min),
        #         (x_min, y_max, z_min),
        #         (x_min, y_min, z_max),
        #         (x_max, y_max, z_min),
        #         (x_max, y_min, z_max),
        #         (x_min, y_max, z_max),
        #         (x_max, y_max, z_max),
        #     ]

        #     trial_target_ls = []
        #     for k in range(n_blocks):
        #         random.shuffle(targets)
        #         for targ in targets:
        #             trial_target_ls.append(dict(int_target_pos=targ))

        #     # set the last target to be the origin since the purpose of this generator is to measure the drift in # of steps
        #     trial_target_ls.append(dict(int_target_pos=np.zeros(3)))
        #     return trial_target_ls

        # @staticmethod
        # def calibration_targets(nblocks=1):
        targets = [
            (45, 34, 0),
            (50, 38, -25),
            (40, 35, 0),
            (40, 35, -25),
            (30, 29, 0),
            (30, 29, -25),
            (20, 35, 0),
            (20, 35, -25),
            # (10, 38, 0), # reachable?
            # (10, 38, -25), # reachable?
        ]
        trial_target_ls = []
        for k in range(n_blocks):
            random.shuffle(targets)
            for targ in targets:
                trial_target_ls.append(dict(int_target_pos=targ))

        # set the last target to be the origin since the purpose of this generator is to measure the drift in # of steps
        trial_target_ls.append(dict(int_target_pos=np.zeros(3)))
        return trial_target_ls

    @staticmethod
    def xy_sweep(z_min=-25, z_max=0, zpts=6):

        xy_target_locs = np.vstack([
            [8.20564516129, 37.6302083333],
            [9.61693548387, 34.1145833333],
            [15.1209677419, 31.1848958333],
            [15.5443548387, 34.5703125],
            [18.2258064516, 36.5234375],
            [23.4475806452, 34.7005208333],
            [22.8830645161, 32.3567708333],
            [23.0241935484, 29.1666666667],
            [28.9516129032, 34.8307291667],
            [28.9516129032, 32.2265625],
            [29.2338709677, 30.1432291667],
            [33.3266129032, 35.4166666667],
            [33.8911290323, 33.1380208333],
            [30.5040322581, 30.078125],
            [20.4838709677, 28.1901041667],
            [35.5846774194, 36.5885416667],
            [39.2540322581, 33.5286458333],
            [41.5120967742, 38.5416666667],
            [47.439516129, 37.6953125],
        ])

        trial_target_ls = []
        z_range = np.linspace(z_min, z_max, zpts)
        for zpt in z_range:
            for xy_targ in xy_target_locs:
                trial_target_ls.append(
                    dict(int_target_pos=np.hstack([xy_targ, zpt])))

        return trial_target_ls

    def __init__(self, *args, **kwargs):
        '''
        Constructor for PositionerTaskController

        Parameters
        ----------
        # x_len : float
        #     measured distance the positioner can travel in the x-dimension
        # y_len : float
        #     measured distance the positioner can travel in the y-dimension
        # z_len : float
        #     measured distance the positioner can travel in the z-dimension
        dev : str, optional, default=/dev/ttyACM1
            Serial port to use to communicate with Arduino controller
        x_cm_per_rev : int, optional, default=12
            Number of cm traveled for one full revolution of the stepper motors in the x-dimension
        y_cm_per_rev : int, optional, default=12
            Number of cm traveled for one full revolution of the stepper motors in the y-dimension
        z_cm_per_rev : float, optional, default=7.6
            Number of cm traveled for one full revolution of the stepper motors in the z-dimension
        x_step_size : float, optional, default=0.25
            Microstepping mode in the x-dimension
        y_step_size : float, optional, default=0.25
            Microstepping mode in the y-dimension
        z_step_size : float, optional, default=0.25
            Microstepping mode in the z-dimension
    
        Returns
        -------
        PositionerTaskController instance
        '''

        # TODO make these input arguments
        positioner_dev = '/dev/arduino_positioner'

        # cm/rev based on measured data
        x_cm_per_rev = 12.4
        y_cm_per_rev = 12.4
        z_cm_per_rev = 8.0

        x_step_size = 1. / 4
        y_step_size = 1. / 4
        z_step_size = 1. / 4

        self.loc = np.ones(
            3
        ) * np.nan  # position of the target relative to origin is unknown until the origin limit switches are hit
        self.pos_uctrl_iface = Positioner(dev=positioner_dev)
        # self.pos_uctrl_iface.sleep_motors()

        self.steps_from_origin = np.ones(
            3
        ) * np.nan  # cumulative number of steps taken from the origin. Unknown until the origin limit switches are hit.
        self.step_size = np.array([x_step_size, y_step_size, z_step_size],
                                  dtype=np.float64)
        self.cm_per_rev = np.array([x_cm_per_rev, y_cm_per_rev, z_cm_per_rev],
                                   dtype=np.float64)

        self.full_steps_per_rev = 200.

        super(PositionerTaskController, self).__init__(*args, **kwargs)

    def init(self):
        self.add_dtype('positioner_loc', np.float64, (3, ))
        self.add_dtype('positioner_steps_from_origin', np.float64, (3, ))
        super(PositionerTaskController, self).init()

        # open an rx_socket for reading new commands
        import socket
        self.rx_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.rx_sock.bind(('localhost', 60005))

    def terminate(self):
        # close the rx socket used for reading remote commands
        super(PositionerTaskController, self).terminate()
        self.rx_sock.close()

    ##### Helper functions #####
    def _calc_steps_to_pos(self, target_pos):
        displ_cm = target_pos - self.loc

        # compute the number of steps needed to travel the desired displacement
        displ_rev = displ_cm / self.cm_per_rev
        displ_full_steps = displ_rev * self.full_steps_per_rev
        displ_microsteps = displ_full_steps / self.step_size
        displ_microsteps = displ_microsteps.astype(int)
        return displ_microsteps

    def _steps_to_cm(self, n_steps_actuated):
        steps_moved = n_steps_actuated * self.step_size
        self.steps_from_origin += steps_moved
        return steps_moved / self.full_steps_per_rev * self.cm_per_rev

    def _integrate_steps(self, n_steps_actuated, signs):
        steps_moved = n_steps_actuated * signs * self.step_size
        self.steps_from_origin += steps_moved
        self.loc += steps_moved / self.full_steps_per_rev * self.cm_per_rev

    def _test_microcontroller_done(self, *args, **kwargs):
        # check if any data has returned from the microcontroller interface
        # self.print_to_terminal("starting to check for data")
        bytes_avail = self.pos_uctrl_iface.data_available()

        # self.print_to_terminal(bytes_avail)

        # remember to actually read the data out of the buffer in an '_end' function
        return bytes_avail > 0

    # def update_report_stats(self, *args, **kwargs):
    #     super(PositionerTaskController, self).update_report_stats()
    #     self.reportstats['resp_bytes'] = self.pos_uctrl_iface.data_available()

    def _test_new_target_set_remotely(self, *args, **kwargs):
        # print "checking for new target set"
        socket_list = [self.rx_sock]
        # Get the list sockets which are readable
        read_sockets, write_sockets, error_sockets = select.select(
            socket_list, [], [], 0)
        if self.rx_sock in read_sockets:
            raw_data = self.rx_sock.recv(8 * 3)
            import struct
            new_pos = struct.unpack('ddd', raw_data)
            print "received new position!"
            print new_pos
            self._gen_int_target_pos = new_pos
            return True
        else:
            return False

    ##### State transition functions #####
    def _start_go_to_origin(self):
        print "_start_go_to_origin"
        self.pos_uctrl_iface.start_continuous_move(1000, 1000, -1000)

    def _start_go_to_target(self, num_x, num_y, num_z):
        # AY modification - _start_go_to_origin sends the positioner to a predetermined location.  Need to be able to send it
        # different target locations for the different target positions.  Not currently working (also not implemented in tasklilst yet)
        print "_start_go_to_target"
        self.pos_uctrl_iface.start_continuous_move(num_x, num_y, num_z)

    def _start_reward(self):
        pass

    def _end_go_to_origin(self):
        steps_actuated = self.pos_uctrl_iface.end_continuous_move(stiff=True)

        self.loc = np.zeros(3)
        self.steps_from_origin = np.zeros(3)

    def _start_move_target(self):
        # calc number of steps from current pos to target pos
        displ_microsteps = self._calc_steps_to_pos(self._gen_int_target_pos)

        # send command to initiatem movement
        self.pos_uctrl_iface.start_continuous_move(*displ_microsteps)

    def _end_move_target(self):
        # send command to kill motors
        steps_actuated = self.pos_uctrl_iface.end_continuous_move()
        self._integrate_steps(steps_actuated, self.pos_uctrl_iface.motor_dir)

    def _cycle(self):
        self.task_data['positioner_loc'] = self.loc
        self.task_data['positioner_steps_from_origin'] = self.steps_from_origin
        super(PositionerTaskController, self)._cycle()

    ### Old functions ###
    def go_to_origin(self):
        '''
        Tap the origin limit switches so the absolute position of the target can be estimated. 
        Run at initialization and/or periodically to correct for any accumulating errors.
        '''
        steps_moved = np.array(
            self.pos_uctrl_iface.continuous_move(-10000, -10000, 10000))
        step_signs = np.array([-1, -1, 1], dtype=np.float64) * self.step_size

        if not np.any(np.isnan(
                self.steps_from_origin)):  # error accumulation correction
            self.steps_from_origin += step_signs * steps_moved

            # if no position errors were accumulated, then self.steps_from_origin should all be 0
            acc_error = self.steps_from_origin
            print "accumulated step errors"
            print acc_error

        self.loc = np.zeros(3)
        self.steps_from_origin = np.zeros(3)

    def go_to_position(self, target_pos):
        if np.any(np.isnan(self.loc)):
            raise Exception(
                "System must be 'zeroed' before it can go to an absolute location!"
            )

        displ_cm = target_pos - self.loc

        # compute the number of steps needed to travel the desired displacement
        displ_rev = displ_cm / self.cm_per_rev
        displ_full_steps = displ_rev * full_steps_per_rev
        displ_microsteps = displ_full_steps / self.step_size

        steps_moved = np.array(
            self.pos_uctrl_iface.continuous_move(*displ_microsteps))
        steps_moved = steps_moved * np.sign(displ_microsteps) * self.step_size

        self.steps_from_origin += steps_moved
        self.loc += steps_moved / self.full_steps_per_rev * self.cm_per_rev

    def run(self):
        '''
        Tell the positioner motors to turn off when the task ends
        '''
        try:
            super(PositionerTaskController, self).run()
        finally:
            self.pos_uctrl_iface.sleep_motors()
class MockLogExperiment(LogExperiment):
    status = FSMTable(
        state1=StateTransitions(event1to2='state2', event1to3='state3'),
        state2=StateTransitions(event2to3='state3', event2to1='state1'),
        state3=StateTransitions(event3to2='state2', event3to1='state1'),
    )
    state = 'state1'

    def __init__(self, *args, **kwargs):
        self.iter_idx = 0
        super(MockLogExperiment, self).__init__(*args, **kwargs)

    def _cycle(self):
        self.iter_idx += 1
        super(MockLogExperiment, self)._cycle()

    def _start_state3(self):
        pass

    def _while_state3(self):
        pass

    def _end_state3(self):
        pass

    def _start_state2(self):
        pass

    def _while_state2(self):
        pass

    def _end_state2(self):
        pass

    def _start_state1(self):
        pass

    def _while_state1(self):
        pass

    def _end_state1(self):
        pass

    ################## State trnasition test functions ##################
    def _test_event3to1(self, time_in_state):
        return event3to1[self.iter_idx]

    def _test_event3to2(self, time_in_state):
        return event3to2[self.iter_idx]

    def _test_event2to3(self, time_in_state):
        return event2to3[self.iter_idx]

    def _test_event2to1(self, time_in_state):
        return event2to1[self.iter_idx]

    def _test_event1to3(self, time_in_state):
        return event1to3[self.iter_idx]

    def _test_event1to2(self, time_in_state):
        return event1to2[self.iter_idx]

    def _test_stop(self, time_in_state):
        return self.iter_idx >= len(event1to2) - 1

    def get_time(self):
        return self.iter_idx
Пример #3
0
class MachineControlExoJointVel_w_Positioner(BMIControlExoJointVel, PositionerTaskController):
    '''
    Task to automatically go between different joint configurations with the target positioner following
    '''
    sequence_generators = ['exo_joint_space_targets']
    current_assist_level = 1

    status = FSMTable(
        wait=StateTransitions(start_trial='move'),
        move=StateTransitions(reached_config='reward'),
        reward=StateTransitions(time_expired='wait'),
    )

    state = 'wait'

    reward_time = traits.Float(3, desc='Time that reward solenoid is open')
    config_tolerances_deg = traits.Tuple((10, 10, 10, 7.5, 10), desc='Time that reward solenoid is open')
    
    def __init__(self, *args, **kwargs):
        super(MachineControlExoJointVel, self).__init__(*args, **kwargs)
        self.config_tolerances = np.deg2rad(np.array(self.config_tolerances_deg))
        self.config_tolerances[-1] = np.inf

    @staticmethod 
    def exo_joint_space_targets(n_blocks=10):
        # neutral_config = np.array([-0.64732247,  0.79,  0.19634043,  0.97628754, -0.02114062])
        target1 = np.array([-1.05, 0.7, 0.4, 1, 0]) 
        target2 = np.array([-0.25, 0.7, 0.4, 1, 0])
        target3 = np.array([-0.85, 1.3 , 0.4, 1, 0])
        target4 = np.array([-0.65, 1.3, 0.4, 0.2, 0])



        trial_target_ls = []
        for k in range(n_blocks):
            configs = np.random.permutation([target1, target2, target3, target4]) # generate the target sequence for each block
            for config in configs:
                trial_target_ls.append(dict(target_config=config))
        return trial_target_ls


    def _test_reached_config(self, *args, **kwargs):
        target_endpt = self.plant.kin_chain.endpoint_pos(self._gen_target_config)
        current_endpt = self.plant.kin_chain.endpoint_pos(self.plant.joint_angles)
        pos_diff = np.linalg.norm(current_endpt - target_endpt)
        joint_diff = np.abs(self.plant.joint_angles - self._gen_target_config)
        # print pos_diff 
        # print np.round(np.rad2deg(joint_diff), decimals=2)
        # print 
        return (pos_diff < 3) or np.all(joint_diff < self.config_tolerances)
        

    def load_decoder(self):
        self.ssm = StateSpaceJointVelocityActiveExo()
        A, B, W = self.ssm.get_ssm_matrices()
        filt = MachineOnlyFilter(A, W)
        units = []
        self.decoder = Decoder(filt, units, self.ssm, binlen=0.1)
        self.decoder.n_features = 1

    def create_feature_extractor(self):
        from riglib.bmi.extractor import DummyExtractor

        self.extractor = DummyExtractor()
        self._add_feature_extractor_dtype()

    def _start_reward(self):
        print "trial complete!"
class MockSequence(Sequence):
    event1to2 = [
        False, True, False, True, False, True, False, True, False, True, False
    ]
    event1to3 = [
        False, False, False, False, False, False, False, False, False, False,
        False
    ]
    event2to3 = [
        False, False, False, False, False, False, False, False, False, False,
        False
    ]
    event2to1 = [
        False, False, True, False, True, False, True, False, True, False, False
    ]
    event3to2 = [
        False, False, False, False, False, False, False, False, False, False,
        False
    ]
    event3to1 = [
        False, False, False, False, False, False, False, False, False, False,
        False
    ]

    status = FSMTable(
        wait=StateTransitions(event1to2='state2', event1to3='state3'),
        state2=StateTransitions(event2to3='state3', event2to1='wait'),
        state3=StateTransitions(event3to2='state2', event3to1='wait'),
    )
    state = 'wait'

    def __init__(self, *args, **kwargs):
        self.iter_idx = 0
        self.target_history = []
        super(MockSequence, self).__init__(*args, **kwargs)

    def _cycle(self):
        self.iter_idx += 1
        super(MockSequence, self)._cycle()

    def _start_state3(self):
        pass

    def _while_state3(self):
        pass

    def _end_state3(self):
        pass

    def _start_state2(self):
        pass

    def _while_state2(self):
        pass

    def _end_state2(self):
        pass

    def _start_state1(self):
        pass

    def _while_state1(self):
        pass

    def _end_state1(self):
        pass

    ################## State trnasition test functions ##################
    def _test_event3to1(self, time_in_state):
        return self.event3to1[self.iter_idx]

    def _test_event3to2(self, time_in_state):
        return self.event3to2[self.iter_idx]

    def _test_event2to3(self, time_in_state):
        return self.event2to3[self.iter_idx]

    def _test_event2to1(self, time_in_state):
        return self.event2to1[self.iter_idx]

    def _test_event1to3(self, time_in_state):
        return self.event1to3[self.iter_idx]

    def _test_event1to2(self, time_in_state):
        return self.event1to2[self.iter_idx]

    def _test_stop(self, time_in_state):
        return self.iter_idx >= len(event1to2) - 1

    def get_time(self):
        return self.iter_idx

    def _start_wait(self):
        super(MockSequence, self)._start_wait()
        self.target_history.append(self.next_trial)
class TargetCapture(Sequence):
    '''
    This is a generic cued target capture skeleton, to form as a common ancestor to the most
    common type of motor control task.
    '''
    status = FSMTable(
        wait=StateTransitions(start_trial="target"),
        target=StateTransitions(enter_target="hold",
                                timeout="timeout_penalty"),
        hold=StateTransitions(leave_early="hold_penalty",
                              hold_complete="targ_transition"),
        targ_transition=StateTransitions(trial_complete="reward",
                                         trial_abort="wait",
                                         trial_incomplete="target"),
        timeout_penalty=StateTransitions(timeout_penalty_end="targ_transition",
                                         end_state=True),
        hold_penalty=StateTransitions(hold_penalty_end="targ_transition",
                                      end_state=True),
        reward=StateTransitions(reward_end="wait",
                                stoppable=False,
                                end_state=True))

    trial_end_states = ['reward', 'timeout_penalty', 'hold_penalty']

    # initial state
    state = "wait"

    target_index = -1  # Helper variable to keep track of which target to display within a trial
    tries = 0  # Helper variable to keep track of the number of failed attempts at a given trial.

    sequence_generators = []

    reward_time = traits.Float(.5, desc="Length of reward dispensation")
    hold_time = traits.Float(.2, desc="Length of hold required at targets")
    hold_penalty_time = traits.Float(
        1, desc="Length of penalty time for target hold error")
    timeout_time = traits.Float(10, desc="Time allowed to go between targets")
    timeout_penalty_time = traits.Float(
        1, desc="Length of penalty time for timeout error")
    max_attempts = traits.Int(10,
                              desc='The number of attempts at a target before\
        skipping to the next one')

    def _start_wait(self):
        # Call parent method to draw the next target capture sequence from the generator
        super()._start_wait()

        # number of times this sequence of targets has been attempted
        self.tries = 0

        # index of current target presented to subject
        self.target_index = -1

        # number of targets to be acquired in this trial
        self.chain_length = len(self.targs)

    def _parse_next_trial(self):
        '''Check that the generator has the required data'''
        self.targs = self.next_trial

        # TODO error checking

    def _start_target(self):
        self.target_index += 1
        self.target_location = self.targs[self.target_index]

    def _end_target(self):
        '''Nothing generic to do.'''
        pass

    def _start_hold(self):
        '''Nothing generic to do.'''
        pass

    def _while_hold(self):
        '''Nothing generic to do.'''
        pass

    def _end_hold(self):
        '''Nothing generic to do.'''
        pass

    def _start_targ_transition(self):
        '''Nothing generic to do. Child class might show/hide targets'''
        pass

    def _while_targ_transition(self):
        '''Nothing generic to do.'''
        pass

    def _end_targ_transition(self):
        '''Nothing generic to do.'''
        pass

    def _start_timeout_penalty(self):
        self.tries += 1
        self.target_index = -1

    def _while_timeout_penalty(self):
        '''Nothing generic to do.'''
        pass

    def _end_timeout_penalty(self):
        '''Nothing generic to do.'''
        pass

    def _start_hold_penalty(self):
        self.tries += 1
        self.target_index = -1

    def _while_hold_penalty(self):
        '''Nothing generic to do.'''
        pass

    def _end_hold_penalty(self):
        '''Nothing generic to do.'''
        pass

    def _start_reward(self):
        '''Nothing generic to do.'''
        pass

    def _while_reward(self):
        '''Nothing generic to do.'''
        pass

    def _end_reward(self):
        '''Nothing generic to do.'''
        pass

    ################## State transition test functions ##################
    def _test_start_trial(self, time_in_state):
        '''Start next trial automatically. You may want this to instead be
            - a random delay
            - require some initiation action
        '''
        return True

    def _test_timeout(self, time_in_state):
        return time_in_state > self.timeout_time

    def _test_hold_complete(self, time_in_state):
        '''
        Test whether the target is held long enough to declare the
        trial a success

        Possible options
            - Target held for the minimum requred time (implemented here)
            - Sensorized object moved by a certain amount
            - Sensorized object moved to the required location
            - Manually triggered by experimenter
        '''
        return time_in_state > self.hold_time

    def _test_trial_complete(self, time_in_state):
        '''Test whether all targets in sequence have been acquired'''
        return self.target_index == self.chain_length - 1

    def _test_trial_abort(self, time_in_state):
        '''Test whether the target capture sequence should just be skipped due to too many failures'''
        return (not self._test_trial_complete(time_in_state)) and (
            self.tries == self.max_attempts)

    def _test_trial_incomplete(self, time_in_state):
        '''Test whether the target capture sequence needs to be restarted'''
        return (not self._test_trial_complete(time_in_state)) and (
            self.tries < self.max_attempts)

    def _test_timeout_penalty_end(self, time_in_state):
        return time_in_state > self.timeout_penalty_time

    def _test_hold_penalty_end(self, time_in_state):
        return time_in_state > self.hold_penalty_time

    def _test_reward_end(self, time_in_state):
        return time_in_state > self.reward_time

    def _test_enter_target(self, time_in_state):
        '''This function is task-specific and not much can be done generically'''
        return False

    def _test_leave_early(self, time_in_state):
        '''This function is task-specific and not much can be done generically'''
        return False

    def update_report_stats(self):
        '''
        see experiment.Experiment.update_report_stats for docs
        '''
        super().update_report_stats()
        self.reportstats['Trial #'] = self.calc_trial_num()
        self.reportstats['Reward/min'] = np.round(self.calc_events_per_min(
            'reward', 120.),
                                                  decimals=2)

    @classmethod
    def get_desc(cls, params, report):
        '''Used by the database infrasturcture to generate summary stats on this task'''
        duration = report[-1][-1] - report[0][-1]
        reward_count = 0
        for item in report:
            if item[0] == "reward":
                reward_count += 1
        return "{} rewarded trials in {} min".format(reward_count, duration)