class MotionData(traits.HasTraits): ''' Enable reading of raw motiontracker data from Phasespace system ''' marker_count = traits.Int(8, desc="Number of markers to return") def init(self): ''' Secondary init function. See riglib.experiment.Experiment.init() Prior to starting the task, this 'init' sets up the DataSource for interacting with the motion tracker system and registers the source with the SinkRegister so that the data gets saved to file as it is collected. ''' from riglib import source src, mkw = self.source_class self.motiondata = source.DataSource(src, **mkw) from riglib import sink self.sinks = sink.sinks self.sinks.register(self.motiondata) super(MotionData, self).init() @property def source_class(self): ''' Specify the source class as a function in case future descendant classes want to use a different type of source ''' from riglib import motiontracker return motiontracker.make(self.marker_count), dict() def run(self): ''' Code to execute immediately prior to the beginning of the task FSM executing, or after the FSM has finished running. See riglib.experiment.Experiment.run(). This 'run' method starts the motiontracker source prior to starting the experiment's main thread/process, and handle any errors by stopping the source ''' self.motiondata.start() try: super(MotionData, self).run() finally: self.motiondata.stop() def join(self): ''' See riglib.experiment.Experiment.join(). Re-join the 'motiondata' source process before cleaning up the experiment thread ''' self.motiondata.join() super(MotionData, self).join() def _start_None(self): ''' Code to run before the 'None' state starts (i.e., the task stops) ''' self.motiondata.stop() super(MotionData, self)._start_None()
class CrystaLaser(traits.HasTraits): ''' Adds an arduino-controlled crystalaser to self.lasers.''' # laser_serial_port = traits.Str(desc="Serial port used to communicate with arduino") crystalaser_pin = traits.Int(12, desc="Pin number for laser") def __init__(self, *args, **kwargs): self.lasers = [] super().__init__(*args, **kwargs) def init(self, *args, **kwargs): laser = ArduinoGPIO('/dev/crystalaser') laser.port = self.crystalaser_pin laser.name = 'crystalaser' self.lasers.append(laser) super().init(*args, **kwargs)
class RedGreen(Sequence, Pygame): status = dict( wait = dict(start_trial="pretrial", premature="penalty", stop=None), pretrial = dict(go="trial", premature="penalty"), trial = dict(correct="reward", timeout="penalty"), reward = dict(post_reward="wait"), penalty = dict(post_penalty="wait"), ) colors = traits.Array(shape=(2, 3), value=[[255,0,0],[0,255,0]], desc="Tuple of colors (c1, c2) where c* = [r,g,b] between 0 and 1") dot_radius = traits.Int(100, desc='dot size') delay_range = traits.Tuple((0.5, 5.), desc='delay before switching to second color will be drawn from uniform distribution in this range') def _while_pretrial(self): import pygame self.surf.fill(self.background) right = [self.next_trial[0] + 1920, self.next_trial[1]] ts = time.time() - self.start_time dotsize = (init_dot - self.dot_radius) * (shrinklen - min(ts, shrinklen)) + self.dot_radius if (np.mod(np.round(ts*1000),freq) < freq/2): pygame.draw.circle(self.surf, self.colors[0], self.next_trial, int(dotsize)) pygame.draw.circle(self.surf, self.colors[0], right, int(dotsize)) self.flip_wait() def _while_trial(self): import pygame self.surf.fill(self.background) right = [self.next_trial[0] + 1920, self.next_trial[1]] ts = time.time() - self.start_time if (np.mod(np.round(ts*1000),freq) < freq/2): pygame.draw.circle(self.surf, self.colors[1], self.next_trial, self.dot_radius) pygame.draw.circle(self.surf, self.colors[1], right, self.dot_radius) self.flip_wait() def _start_pretrial(self): self._wait_time = np.random.rand()*abs(self.delay_range[1]-self.delay_range[0]) + self.delay_range[0] def _test_correct(self, ts): return self.event is not None def _test_go(self, ts): return ts > self._wait_time + shrinklen def _test_premature(self, ts): return self.event is not None
class BMICursorBiasCatch(BMICursorBias): catch_interval = traits.Int(10, desc='Number of trials between bias catch trials') bias_magnitude = traits.Float(1,desc='Strength of velocity bias in cm/sec') catch_flag = False catch_count = 0 bias = 0.0 def init(self): self.add_dtype('bias', 'f8', 1) super(BMICursorBiasCatch, self).init() def _start_wait(self): if self.catch_count==self.catch_interval: self.catch_flag = True self.catch_count = 0 if np.random.rand(1)>.5: self.bias = -1*self.bias_magnitude else: self.bias = self.bias_magnitude print "bias", self.bias else: self.catch_flag = False self.catch_count+=1 super(BMICursorBias, self)._start_wait() def _cycle(self): if self.catch_flag and self.state == 'target': self.decoder.filt.state.mean[3,0] += self.bias/6 if self.catch_flag: self.task_data['bias'] = self.bias else: self.task_data['bias'] = 0.0 super(BMICursorBiasCatch, self)._cycle()
class Window(LogExperiment): ''' Generic stereo window ''' status = dict(draw=dict(stop=None)) state = "draw" stop = False window_size = traits.Tuple((1920 * 2, 1080), descr='window size, in pixels') # window_size = (1920*2, 1080) background = (0, 0, 0, 1) #Screen parameters, all in centimeters -- adjust for monkey fov = np.degrees(np.arctan(14.65 / (44.5 + 3))) * 2 screen_dist = 44.5 + 3 iod = 2.5 # intraocular distance show_environment = traits.Int(0) def __init__(self, *args, **kwargs): super(Window, self).__init__(*args, **kwargs) self.models = [] self.world = None self.event = None # os.popen('sudo vbetool dpms on') if self.show_environment: self.add_model(Box()) def set_os_params(self): os.environ['SDL_VIDEO_WINDOW_POS'] = config.display_start_pos os.environ['SDL_VIDEO_X11_WMCLASS'] = "monkey_experiment" def screen_init(self): self.set_os_params() pygame.init() pygame.display.gl_set_attribute(pygame.GL_DEPTH_SIZE, 24) flags = pygame.DOUBLEBUF | pygame.HWSURFACE | pygame.OPENGL | pygame.NOFRAME try: pygame.display.gl_set_attribute(pygame.GL_MULTISAMPLEBUFFERS, 1) self.surf = pygame.display.set_mode(self.window_size, flags) except: pygame.display.gl_set_attribute(pygame.GL_MULTISAMPLEBUFFERS, 0) self.surf = pygame.display.set_mode(self.window_size, flags) glEnable(GL_BLEND) glDepthFunc(GL_LESS) glEnable(GL_DEPTH_TEST) glEnable(GL_TEXTURE_2D) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) glClearColor(*self.background) glClearDepth(1.0) glDepthMask(GL_TRUE) self.renderer = self._get_renderer() #this effectively determines the modelview matrix self.world = Group(self.models) self.world.init() #up vector is always (0,0,1), why would I ever need to roll the camera?! self.set_eye((0, -self.screen_dist, 0), (0, 0)) def _get_renderer(self): near = 1 far = 1024 return stereo.MirrorDisplay(self.window_size, self.fov, near, far, self.screen_dist, self.iod) def set_eye(self, pos, vec, reset=True): '''Set the eye's position and direction. Camera starts at (0,0,0), pointing towards positive y''' self.world.translate(pos[0], pos[2], pos[1], reset=True).rotate_x(-90) self.world.rotate_y(vec[0]).rotate_x(vec[1]) def add_model(self, model): if self.world is None: #world doesn't exist yet, add the model to cache self.models.append(model) else: #We're already running, initialize the model and add it to the world model.init() self.world.add(model) def show_object(self, obj, show=False): ''' Show or hide an object. This function is an abstraction so that tasks don't need to know about attach/detach ''' if show: obj.attach() else: obj.detach() def draw_world(self): glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) self.renderer.draw(self.world) pygame.display.flip() self.renderer.draw_done() def _get_event(self): for e in pygame.event.get(pygame.KEYDOWN): return (e.key, e.type) def _start_None(self): pygame.display.quit() def _test_stop(self, ts): ''' Stop the task if the escape key is pressed, or if the super _test_stop instructs a stop ''' super_stop = super(Window, self)._test_stop(ts) from pygame import K_ESCAPE return super_stop or self.event is not None and self.event[ 0] == K_ESCAPE def requeue(self): self.renderer._queue_render(self.world) def _cycle(self): self.requeue() self.draw_world() super(Window, self)._cycle() self.event = self._get_event()
class MouseSpeller(LinearlyDecreasingAssist, BMILoop, Sequence, Window): ## Traits: runtime-configurable parameters reward_time = traits.Float(.5, desc="Length of juice reward") target_radius = traits.Float(1.3, desc="Radius of targets in cm") window_size = traits.Tuple((1920*2, 1080), descr='window size') hold_time = traits.Float(.2, desc="Length of hold required at targets") hold_penalty_time = traits.Float(1, desc="Length of penalty time for target hold error") timeout_time = traits.Float(10, desc="Time allowed to go between targets") timeout_penalty_time = traits.Float(1, desc="Length of penalty time for timeout error") max_attempts = traits.Int(10, desc='The number of attempts at a target before\ skipping to the next one') max_error_penalty = traits.Float(3, desc='Max number of penalties (unrewarded backspaces) for false positive clicks') background = (0.5, 0.5, 0.5, 1) state = 'wait' sequence_generators = ['rand_key_seq_gen', 'mackenzie_soukoreff_corpus'] status = dict( wait = dict(start_trial="target", stop=None), target = dict(enter_target="hold", timeout="timeout_penalty", stop=None, false_click="target"), hold = dict(hold_complete="targ_transition", timeout="timeout_penalty", leave_early="target"), targ_transition = dict(trial_complete="reward", trial_abort="wait", trial_incomplete="target"), timeout_penalty = dict(timeout_penalty_end="targ_transition"), hold_penalty = dict(hold_penalty_end="targ_transition"), reward = dict(reward_end="wait") ) trial_end_states = ['reward', 'timeout_penalty', 'hold_penalty'] def __init__(self, *args, **kwargs): kwargs['instantiate_targets'] = False self.plant = mouse #plants.CursorPlant() #MouseFakeKF() #plantlist[self.plant_type] super(MouseSpeller, self).__init__(*args, **kwargs) ## instantiate the keyboard targets n_target_per_row = [5, 10, 9, 9, 6] self.targets = dict() for key in keyboard_spec: targ = KeyTarget(target_radius=self.target_radius, target_color=np.array([0.25, 0.25, 0.25, 1])) # targ = VirtualRectangularTarget(target_width=self.target_radius*2, target_color=np.array([0.25, 0.25, 0.25, 1])) self.targets[key] = targ p = keyboard_spec[key].pos p[1] += 2 targ.move_to_position(p) for model in targ.graphics_models: self.add_model(model) # Add graphics models for the plant and targets to the window if hasattr(self.plant, 'graphics_models'): for model in self.plant.graphics_models: self.add_model(model) self.current_target = None for attr in self.plant.hdf_attrs: self.add_dtype(*attr) self.text_output = '' def init(self): self.add_dtype('target', 'f8', (3,)) self.add_dtype('target_index', 'i', (1,)) super(MouseSpeller, self).init() def _cycle(self): ''' Calls any update functions necessary and redraws screen. Runs 60x per second. ''' for key, targ in self.targets.items(): in_targ = targ.pt_inside(self.plant.get_endpoint_pos()) if in_targ and self.plant.click_: targ.click() else: targ.unclick() if not (self.current_target is None): self.task_data['target'] = self.current_target.get_position() else: self.task_data['target'] = np.ones(3)*np.nan self.task_data['target_index'] = self.target_index self.move_plant() ## Save plant status to HDF file plant_data = self.plant.get_data_to_save() for key in plant_data: self.task_data[key] = plant_data[key] super(MouseSpeller, self)._cycle() def move_plant(self): super(MouseSpeller, self).move_plant() def _test_enter_target(self, ts): ''' return true if the distance between center of cursor and target is smaller than the cursor radius ''' ent_targ = self.current_target.pt_inside(self.plant.get_endpoint_pos()) clicked = self.plant.click_ return ent_targ and not clicked def _test_leave_early(self, ts): ''' return true if cursor moves outside the exit radius ''' outside = not self.current_target.pt_inside(self.plant.get_endpoint_pos()) if outside: self.target_index -= 1 return outside def _record_char(self, new_char): new_char = self.targs[self.target_index] if new_char == 'space': new_char = ' ' if new_char == 'enter': new_char = '\n' if new_char == 'shift': new_char = '' if new_char == 'backspace' and len(self.text_output) > 0: self.text_output = self.text_output[:-1] elif new_char == 'backspace': pass else: self.text_output += new_char def _test_hold_complete(self, ts): hold_complete = self.plant.click_ and self.current_target.pt_inside(self.plant.get_endpoint_pos()) if hold_complete: self._record_char(self.targs[self.target_index]) return hold_complete def update_report_stats(self): ''' see experiment.Experiment.update_report_stats for docs ''' super(MouseSpeller, self).update_report_stats() self.reportstats['Text output'] = self.text_output def _test_timeout(self, ts): return ts>self.timeout_time def _test_timeout_penalty_end(self, ts): return ts>self.timeout_penalty_time def _test_hold_penalty_end(self, ts): return ts>self.hold_penalty_time def _test_trial_complete(self, ts): return self.target_index == len(self.targs) - 1 def _test_trial_incomplete(self, ts): return (not self._test_trial_complete(ts)) and (self.tries<self.max_attempts) def _test_trial_abort(self, ts): return (not self._test_trial_complete(ts)) and (self.tries==self.max_attempts) def _test_reward_end(self, ts): return ts>self.reward_time def _test_false_click(self, ts): if self.plant.click_posedge: for key, targ in self.targets.items(): if targ == self.current_target: return False in_targ = targ.pt_inside(self.plant.get_endpoint_pos()) if in_targ: # add backspace as a target self.n_false_clicks_this_trial += 1 if self.n_false_clicks_this_trial < self.max_error_penalty: self.targs.insert(self.target_index, 'backspace') self.target_index -= 1 self.current_target.reset() self._record_char(key) return True # if the click was outside any of the targets return False else: return False #### STATE FUNCTIONS #### def _parse_next_trial(self): self.targs = list(self.next_trial) def _start_wait(self): super(MouseSpeller, self)._start_wait() self.n_false_clicks_this_trial = 0 self.tries = 0 self.target_index = -1 #hide targets for key in self.targets: self.targets[key].reset() #get target locations for this trial self._parse_next_trial() # self.chain_length = len(self.targs) def _start_target(self): self.target_index += 1 #move a target to current location (target1 and target2 alternate moving) and set location attribute target = self.targets[self.targs[self.target_index]] self.current_target = target # self.target_location = target.pos target.cue_trial_start() def _end_hold(self): # change current target color to green self.current_target.reset() def _start_hold_penalty(self): #hide targets for key in self.targets: self.targets[key].reset() self.tries += 1 self.target_index = -1 def _start_timeout_penalty(self): #hide targets for key in self.targets: self.targets[key].reset() self.tries += 1 self.target_index = -1 def _start_targ_transition(self): self.current_target.reset() def _start_reward(self): super(MouseSpeller, self)._start_reward() self.current_target.cue_trial_end_success() @staticmethod def rand_key_seq_gen(length=1000, seq_len=2): key_sequences = [] keys = keyboard_spec.keys() n_keys = len(keys) for k in range(length): inds = np.random.randint(0, n_keys, seq_len) key_sequences.append([keys[m] for m in inds]) return key_sequences @staticmethod def mackenzie_soukoreff_corpus(length=1000): trials = [] fh = open('/storage/task_data/MouseSpeller/phrases2.txt') missing_chars = [] for line in fh: for char in line: if char == ' ': char = 'space' elif char in ['\n', '\r']: char = 'enter' if char in keyboard_spec: trials.append([char]) elif chr(ord(char) + (ord('a') - ord('A'))) in keyboard_spec: char = chr(ord(char) + (ord('a') - ord('A'))) trials.append(['shift', char]) elif char not in keyboard_spec: missing_chars.append(char) print "missing_chars", missing_chars return trials def create_assister(self): self.assister = FeedbackControllerAssist(mouse_motion_model, style='mixing') def create_goal_calculator(self): self.goal_calculator = goal_calculators.ZeroVelocityGoal(self.decoder.ssm) def get_target_BMI_state(self, *args): ''' Run the goal calculator to determine the target state of the task ''' if self.current_target is None: target_pos = np.zeros(2) else: target_pos = self.current_target.get_position()[[0,2]] opt_click_state = float(self.state == 'hold') target_pos_state = np.hstack([target_pos, opt_click_state]) data, solution_updated = self.goal_calculator(target_pos_state) target_state, error = data return np.tile(np.array(target_state).reshape(-1,1), [1, self.decoder.n_subbins])
class LFP_Mod(BMILoop, Sequence, Window): background = (0,0,0,1) plant_visible = traits.Bool(True, desc='Specifies whether entire plant is displayed or just endpoint') lfp_cursor_rad = traits.Float(.5, desc="length of LFP cursor") lfp_cursor_color = (.5,0,.5,.75) lfp_plant_type_options = plantlist.keys() lfp_plant_type = traits.OptionsList(*plantlist, bmi3d_input_options=plantlist.keys()) window_size = traits.Tuple((1920*2, 1080), desc='window size') lfp_frac_lims = traits.Tuple((0., 0.35), desc='fraction limits') xlfp_frac_lims = traits.Tuple((-.7, 1.7), desc = 'x dir fraction limits') lfp_control_band = traits.Tuple((25, 40), desc='beta power band limits') lfp_totalpw_band = traits.Tuple((1, 100), desc='total power band limits') xlfp_control_band = traits.Tuple((0, 5), desc = 'x direction band limits') n_steps = traits.Int(2, desc='moving average for decoder') powercap = traits.Float(1, desc="Timeout for total power above this") zboundaries=(-12,12) status = dict( wait = dict(start_trial="lfp_target", stop=None), lfp_target = dict(enter_lfp_target="lfp_hold", powercap_penalty="powercap_penalty", stop=None), lfp_hold = dict(leave_early="lfp_target", lfp_hold_complete="reward", powercap_penalty="powercap_penalty"), powercap_penalty = dict(powercap_penalty_end="lfp_target"), reward = dict(reward_end="wait") ) static_states = [] # states in which the decoder is not run trial_end_states = ['reward'] lfp_cursor_on = ['lfp_target', 'lfp_hold'] #initial state state = "wait" #create settable traits reward_time = traits.Float(.5, desc="Length of juice reward") lfp_target_rad = traits.Float(3.6, desc="Length of targets in cm") lfp_hold_time = traits.Float(.2, desc="Length of hold required at lfp targets") lfp_hold_var = traits.Float(.05, desc="Length of hold variance required at lfp targets") hold_penalty_time = traits.Float(1, desc="Length of penalty time for target hold error") powercap_penalty_time = traits.Float(1, desc="Length of penalty time for timeout error") # max_attempts = traits.Int(10, desc='The number of attempts at a target before\ # skipping to the next one') session_length = traits.Float(0, desc="Time until task automatically stops. Length of 0 means no auto stop.") #plant_hide_rate = traits.Float(0.0, desc='If the plant is visible, specifies a percentage of trials where it will be hidden') lfp_target_color = (123/256.,22/256.,201/256.,.5) mc_target_color = (1,0,0,.5) target_index = -1 # Helper variable to keep track of which target to display within a trial #tries = 0 # Helper variable to keep track of the number of failed attempts at a given trial. cursor_visible = False # Determines when to hide the cursor. no_data_count = 0 # Counter for number of missing data frames in a row sequence_generators = ['lfp_mod_4targ'] def __init__(self, *args, **kwargs): super(LFP_Mod, self).__init__(*args, **kwargs) self.cursor_visible = True print 'INIT FRAC LIMS: ', self.lfp_frac_lims dec_params = dict(lfp_frac_lims = self.lfp_frac_lims, xlfp_frac_lims = self.xlfp_frac_lims, powercap = self.powercap, zboundaries = self.zboundaries, lfp_control_band = self.lfp_control_band, lfp_totalpw_band = self.lfp_totalpw_band, xlfp_control_band = self.xlfp_control_band, n_steps = self.n_steps) self.decoder.filt.init_from_task(**dec_params) self.decoder.init_from_task(**dec_params) self.lfp_plant = plantlist[self.lfp_plant_type] if self.lfp_plant_type == 'inv_cursor_onedimLFP': print 'MAKE SURE INVERSE GENERATOR IS ON' self.plant_vis_prev = True self.current_assist_level = 0 self.learn_flag = False if hasattr(self.lfp_plant, 'graphics_models'): for model in self.lfp_plant.graphics_models: self.add_model(model) # Instantiate the targets ''' height and width on kinarm machine are 2.4. Here we make it 2.4/8*12 = 3.6 ''' lfp_target = VirtualSquareTarget(target_radius=self.lfp_target_rad, target_color=self.lfp_target_color) self.targets = [lfp_target] # Initialize target location variable self.target_location_lfp = np.array([-100, -100, -100]) # Declare any plant attributes which must be saved to the HDF file at the _cycle rate for attr in self.lfp_plant.hdf_attrs: self.add_dtype(*attr) def init(self): self.plant = DummyPlant() self.add_dtype('lfp_target', 'f8', (3,)) self.add_dtype('target_index', 'i', (1,)) self.add_dtype('powercap_flag', 'i',(1,)) for target in self.targets: for model in target.graphics_models: self.add_model(model) super(LFP_Mod, self).init() def _cycle(self): ''' Calls any update functions necessary and redraws screen. Runs 60x per second. ''' self.task_data['loop_time'] = self.iter_time() self.task_data['lfp_target'] = self.target_location_lfp.copy() self.task_data['target_index'] = self.target_index #self.task_data['internal_decoder_state'] = self.decoder.filt.current_lfp_pos self.task_data['powercap_flag'] = self.decoder.filt.current_powercap_flag self.move_plant() ## Save plant status to HDF file, ###ADD BACK lfp_plant_data = self.lfp_plant.get_data_to_save() for key in lfp_plant_data: self.task_data[key] = lfp_plant_data[key] super(LFP_Mod, self)._cycle() def move_plant(self): feature_data = self.get_features() # Save the "neural features" (e.g. spike counts vector) to HDF file for key, val in feature_data.items(): self.task_data[key] = val Bu = None assist_weight = 0 target_state = np.zeros([self.decoder.n_states, self.decoder.n_subbins]) ## Run the decoder if self.state not in self.static_states: neural_features = feature_data[self.extractor.feature_type] self.call_decoder(neural_features, target_state, Bu=Bu, assist_level=assist_weight, feature_type=self.extractor.feature_type) ## Drive the plant to the decoded state, if permitted by the constraints of the plant self.lfp_plant.drive(self.decoder) self.task_data['decoder_state'] = decoder_state = self.decoder.get_state(shape=(-1,1)) return decoder_state def run(self): ''' See experiment.Experiment.run for documentation. ''' # Fire up the plant. For virtual/simulation plants, this does little/nothing. self.lfp_plant.start() try: super(LFP_Mod, self).run() finally: self.lfp_plant.stop() ##### HELPER AND UPDATE FUNCTIONS #### def update_cursor_visibility(self): ''' Update cursor visible flag to hide cursor if there has been no good data for more than 3 frames in a row''' prev = self.cursor_visible if self.no_data_count < 3: self.cursor_visible = True if prev != self.cursor_visible: self.show_object(self.cursor, show=True) else: self.cursor_visible = False if prev != self.cursor_visible: self.show_object(self.cursor, show=False) def update_report_stats(self): ''' see experiment.Experiment.update_report_stats for docs ''' super(LFP_Mod, self).update_report_stats() self.reportstats['Trial #'] = self.calc_trial_num() self.reportstats['Reward/min'] = np.round(self.calc_events_per_min('reward', 120), decimals=2) #### TEST FUNCTIONS #### def _test_powercap_penalty(self, ts): if self.decoder.filt.current_powercap_flag: #Turn off power cap flag: self.decoder.filt.current_powercap_flag = 0 return True else: return False def _test_enter_lfp_target(self, ts): ''' return true if the distance between center of cursor and target is smaller than the cursor radius in the x and z axis only ''' cursor_pos = self.lfp_plant.get_endpoint_pos() dx = np.linalg.norm(cursor_pos[0] - self.target_location_lfp[0]) dz = np.linalg.norm(cursor_pos[2] - self.target_location_lfp[2]) in_targ = False if dx<= (self.lfp_target_rad/2.) and dz<= (self.lfp_target_rad/2.): in_targ = True return in_targ # #return d <= (self.lfp_target_rad - self.lfp_cursor_rad) # #If center of cursor enters target at all: # return d <= (self.lfp_target_rad/2.) # #New version: # cursor_pos = self.lfp_plant.get_endpoint_pos() # d = np.linalg.norm(cursor_pos[2] - self.target_location_lfp[2]) # d <= (self.lfp_target_rad - self.lfp_cursor_rad) def _test_leave_early(self, ts): ''' return true if cursor moves outside the exit radius ''' cursor_pos = self.lfp_plant.get_endpoint_pos() dx = np.linalg.norm(cursor_pos[0] - self.target_location_lfp[0]) dz = np.linalg.norm(cursor_pos[2] - self.target_location_lfp[2]) out_of_targ = False if dx > (self.lfp_target_rad/2.) or dz > (self.lfp_target_rad/2.): out_of_targ = True #rad = self.lfp_target_rad - self.lfp_cursor_rad #return d > rad return out_of_targ def _test_lfp_hold_complete(self, ts): return ts>=self.lfp_hold_time_plus_var # def _test_lfp_timeout(self, ts): # return ts>self.timeout_time def _test_powercap_penalty_end(self, ts): if ts>self.powercap_penalty_time: self.lfp_plant.turn_on() return ts>self.powercap_penalty_time def _test_reward_end(self, ts): return ts>self.reward_time def _test_stop(self, ts): if self.session_length > 0 and (self.get_time() - self.task_start_time) > self.session_length: self.end_task() return self.stop #### STATE FUNCTIONS #### def _parse_next_trial(self): self.targs = self.next_trial def _start_wait(self): super(LFP_Mod, self)._start_wait() self.tries = 0 self.target_index = -1 #hide targets for target in self.targets: target.hide() #get target locations for this trial self._parse_next_trial() self.chain_length = 1 self.lfp_hold_time_plus_var = self.lfp_hold_time + np.random.uniform(low=-1,high=1)*self.lfp_hold_var def _start_lfp_target(self): self.target_index += 1 self.target_index = 0 #only 1 target: target = self.targets[0] self.target_location_lfp = self.targs #Just one target. target.move_to_position(self.target_location_lfp) target.cue_trial_start() def _start_lfp_hold(self): #make next target visible unless this is the final target in the trial idx = (self.target_index + 1) if idx < self.chain_length: target = self.targets[idx % 2] target.move_to_position(self.targs[idx]) def _end_lfp_hold(self): # change current target color to green self.targets[self.target_index % 2].cue_trial_end_success() def _start_timeout_penalty(self): #hide targets for target in self.targets: target.hide() self.tries += 1 self.target_index = -1 def _start_reward(self): super(LFP_Mod, self)._start_reward() #self.targets[self.target_index % 2].show() def _start_powercap_penalty(self): for target in self.targets: target.hide() self.lfp_plant.turn_off() @staticmethod def lfp_mod_4targ(nblocks=100, boundaries=(-18,18,-12,12), xaxis=-8): '''Mimics beta modulation task from Kinarm Rig: In Kinarm rig, the following linear transformations happen: 1. LFP cursor is calculated 2. mapped from fraction limits [0, .35] to [-1, 1] (unit_coordinates) 3. udp sent to kinarm machine and multiplied by 8 4. translated upward in the Y direction by + 2.5 This means, our targets which are at -8, [-0.75, 2.5, 5.75, 9.0] must be translated down by 2.5 to: -8, [-3.25, 0. , 3.25, 6.5] then divided by 8: -1, [-0.40625, 0. , 0.40625, 0.8125 ] in unit_coordinates The radius is 1.2, which is 0.15 in unit_coordinates Now, we map this to a new system: - new_zero: (y1+y2) / 2 - new_scale: (y2 - y1) / 2 (([-0.40625, 0. , 0.40625, 0.8125 ]) * new_scale ) + new_zero new_zero = 0 new_scale = 12 12 * [-0.40625, 0. , 0.40625, 0.8125 ] = array([-4.875, 0. , 4.875, 9.75 ]) ''' new_zero = (boundaries[3]+boundaries[2]) / 2. new_scale = (boundaries[3] - boundaries[2]) / 2. kin_targs = np.array([-0.40625, 0. , 0.40625, 0.8125 ]) lfp_targ_y = (new_scale*kin_targs) + new_zero for i in range(nblocks): temp = lfp_targ_y.copy() np.random.shuffle(temp) if i==0: z = temp.copy() else: z = np.hstack((z, temp)) #Fixed X axis: x = np.tile(xaxis,(nblocks*4)) y = np.zeros(nblocks*4) pairs = np.vstack([x, y, z]).T return pairs
class LFP_Mod_plus_MC_reach(LFP_Mod_plus_MC_hold): mc_cursor_radius = traits.Float(.5, desc="Radius of cursor") mc_target_radius = traits.Float(3, desc="Radius of MC target") mc_cursor_color = (.5,0,.5,1) mc_plant_type_options = plantlist.keys() mc_plant_type = traits.OptionsList(*plantlist, bmi3d_input_options=plantlist.keys()) origin_hold_time = traits.Float(.2, desc="Hold time in center") mc_periph_holdtime = traits.Float(.2, desc="Hold time in center") mc_timeout_time = traits.Float(10, desc="Time allowed to go between targets") exclude_parent_traits = ['goal_cache_block'] #redefine this to NOT include marker_num, marker_count marker_num = traits.Int(14,desc='Index') marker_count = traits.Int(16,desc='Num of markers') scale_factor = 3.0 #scale factor for converting hand movement to screen movement (1cm hand movement = 3.5cm cursor movement) wait_flag = 1 # NOTE!!! The marker on the hand was changed from #0 to #14 on # 5/19/13 after LED #0 broke. All data files saved before this date # have LED #0 controlling the cursor. limit2d = 1 # state_file = open("/home/helene/preeya/tot_pw.txt","w") state_cnt = 0 status = dict( wait = dict(start_trial="origin", stop=None), origin = dict(enter_origin="origin_hold", stop=None), origin_hold = dict(origin_hold_complete="lfp_target",leave_origin="hold_penalty", stop=None), lfp_target = dict(enter_lfp_target="lfp_hold", leave_origin="hold_penalty", powercap_penalty="powercap_penalty", stop=None), lfp_hold = dict(leave_early="lfp_target", lfp_hold_complete="mc_target", leave_origin="hold_penalty",powercap_penalty="powercap_penalty"), mc_target = dict(enter_mc_target='mc_hold',mc_timeout="timeout_penalty", stop=None), mc_hold = dict(leave_periph_early='hold_penalty',mc_hold_complete="reward"), powercap_penalty = dict(powercap_penalty_end="origin"), timeout_penalty = dict(timeout_penalty_end="wait"), hold_penalty = dict(hold_penalty_end="origin"), reward = dict(reward_end="wait"), ) static_states = ['origin'] # states in which the decoder is not run trial_end_states = ['reward', 'timeout_penalty'] lfp_cursor_on = ['lfp_target', 'lfp_hold', 'reward'] sequence_generators = ['lfp_mod_plus_MC_reach', 'lfp_mod_plus_MC_reach_INV'] def __init__(self, *args, **kwargs): # import pickle # decoder = pickle.load(open('/storage/decoders/cart20141216_03_cart_new2015_2.pkl')) # self.decoder = decoder super(LFP_Mod_plus_MC_reach, self).__init__(*args, **kwargs) mc_origin = VirtualCircularTarget(target_radius=self.mc_target_radius, target_color=RED) mc_periph = VirtualCircularTarget(target_radius=self.mc_target_radius, target_color=RED) lfp_target = VirtualSquareTarget(target_radius=self.lfp_target_rad, target_color=self.lfp_target_color) self.targets = [lfp_target, mc_origin, mc_periph] # #Should be unnecessary: # for target in self.targets: # for model in target.graphics_models: # self.add_model(model) # self.lfp_plant = plantlist[self.lfp_plant_type] # if hasattr(self.lfp_plant, 'graphics_models'): # for model in self.lfp_plant.graphics_models: # self.add_model(model) # self.mc_plant = plantlist[self.mc_plant_type] # if hasattr(self.mc_plant, 'graphics_models'): # for model in self.mc_plant.graphics_models: # self.add_model(model) def _parse_next_trial(self): t = self.next_trial self.lfp_targ = t['lfp'] self.mc_targ_orig = t['origin'] self.mc_targ_periph = t['periph'] def _start_mc_target(self): #Turn off LFP things self.lfp_plant.turn_off() self.targets[0].hide() self.targets[1].hide() target = self.targets[2] #MC target self.target_location_mc = self.mc_targ_periph target.move_to_position(self.target_location_mc) target.cue_trial_start() def _test_enter_mc_target(self,ts): cursor_pos = self.mc_plant.get_endpoint_pos() d = np.linalg.norm(cursor_pos - self.target_location_mc) return d <= (self.mc_target_radius - self.mc_cursor_radius) def _test_mc_timeout(self, ts): return ts>self.mc_timeout_time def _test_leave_periph_early(self, ts): cursor_pos = self.mc_plant.get_endpoint_pos() d = np.linalg.norm(cursor_pos - self.target_location_mc) rad = self.mc_target_radius - self.mc_cursor_radius return d > rad def _test_mc_hold_complete(self, ts): return ts>self.mc_periph_holdtime def _timeout_penalty_end(self, ts): print 'timeout', ts #return ts > 1. return True def _end_mc_hold(self): self.targets[2].cue_trial_end_success() # def _cycle(self): # if self.state_cnt < 3600*3: # self.state_cnt +=1 # s = "%s\n" % self.state # self.state_file.write(str(s)) # if self.state_cnt == 3600*3: # self.state_file.close() # super(LFP_Mod_plus_MC_reach, self)._cycle() def _start_reward(self): super(LFP_Mod_plus_MC_reach, self)._start_reward() lfp_targ = self.targets[0] mc_orig = self.targets[1] lfp_targ.hide() mc_orig.hide() @staticmethod def lfp_mod_plus_MC_reach(nblocks=100, boundaries=(-18,18,-12,12), xaxis=-8, target_distance=6, n_mc_targets=4, mc_target_angle_offset=0,**kwargs): new_zero = (boundaries[3]+boundaries[2]) / 2. new_scale = (boundaries[3] - boundaries[2]) / 2. kin_targs = np.array([-0.40625, 0. , 0.40625, 0.8125 ]) lfp_targ_y = (new_scale*kin_targs) + new_zero for i in range(nblocks): temp = lfp_targ_y.copy() np.random.shuffle(temp) if i==0: z = temp.copy() else: z = np.hstack((z, temp)) #Fixed X axis: x = np.tile(xaxis,(nblocks*4)) y = np.zeros(nblocks*4) lfp = np.vstack([x, y, z]).T origin = np.zeros(( lfp.shape )) theta = [] for i in range(nblocks*4): temp = np.arange(0, 2*np.pi, 2*np.pi/float(n_mc_targets)) np.random.shuffle(temp) theta = theta + [temp] theta = np.hstack(theta) theta = theta + (mc_target_angle_offset*(np.pi/180.)) x = target_distance*np.cos(theta) y = np.zeros(len(theta)) z = target_distance*np.sin(theta) periph = np.vstack([x, y, z]).T it = iter([dict(lfp=lfp[i,:], origin=origin[i,:], periph=periph[i,:]) for i in range(lfp.shape[0])]) if ('return_arrays' in kwargs.keys()) and kwargs['return_arrays']==True: return lfp, origin, periph else: return it @staticmethod def lfp_mod_plus_MC_reach_INV(nblocks=100, boundaries=(-18,18,-12,12), xaxis=-8, target_distance=6, n_mc_targets=4, mc_target_angle_offset=0): kw = dict(return_arrays=True) lfp, origin, periph = LFP_Mod_plus_MC_reach.lfp_mod_plus_MC_reach(nblocks=nblocks, boundaries=boundaries, xaxis=xaxis, target_distance=target_distance, n_mc_targets=n_mc_targets, mc_target_angle_offset=mc_target_angle_offset,**kw) #Invert LFP: lfp[:,2] = -1.0*lfp[:,2] it = iter([dict(lfp=lfp[i,:], origin=origin[i,:], periph=periph[i,:]) for i in range(lfp.shape[0])]) return it
class LFP_Mod_plus_MC_hold(LFP_Mod): mc_cursor_radius = traits.Float(.5, desc="Radius of cursor") mc_target_radius = traits.Float(3, desc="Radius of MC target") mc_cursor_color = (.5,0,.5,1) mc_plant_type_options = plantlist.keys() mc_plant_type = traits.OptionsList(*plantlist, bmi3d_input_options=plantlist.keys()) origin_hold_time = traits.Float(.2, desc="Hold time in center") exclude_parent_traits = ['goal_cache_block'] #redefine this to NOT include marker_num, marker_count marker_num = traits.Int(14,desc='Index') marker_count = traits.Int(16,desc='Num of markers') joystick_method = traits.Float(1,desc="1: Normal velocity, 0: Position control") joystick_speed = traits.Float(20, desc="Radius of cursor") move_while_in_center = traits.Float(1, desc="1 = update plant while in lfp_target, lfp_hold, 0 = don't update in these states") scale_factor = 3.0 #scale factor for converting hand movement to screen movement (1cm hand movement = 3.5cm cursor movement) wait_flag = 1 # NOTE!!! The marker on the hand was changed from #0 to #14 on # 5/19/13 after LED #0 broke. All data files saved before this date # have LED #0 controlling the cursor. limit2d = 1 status = dict( wait = dict(start_trial="origin", stop=None), origin = dict(enter_origin="origin_hold", stop=None), origin_hold = dict(origin_hold_complete="lfp_target",leave_origin="hold_penalty", stop=None), lfp_target = dict(enter_lfp_target="lfp_hold", leave_origin="hold_penalty", powercap_penalty="powercap_penalty", stop=None), lfp_hold = dict(leave_early="lfp_target", lfp_hold_complete="reward", leave_origin="hold_penalty", powercap_penalty="powercap_penalty",stop=None), powercap_penalty = dict(powercap_penalty_end="origin"), hold_penalty = dict(hold_penalty_end="origin",stop=None), reward = dict(reward_end="wait") ) static_states = ['origin'] # states in which the decoder is not run trial_end_states = ['reward'] lfp_cursor_on = ['lfp_target', 'lfp_hold', 'reward'] sequence_generators = ['lfp_mod_4targ_plus_mc_orig'] def __init__(self, *args, **kwargs): super(LFP_Mod_plus_MC_hold, self).__init__(*args, **kwargs) if self.move_while_in_center>0: self.no_plant_update_states = [] else: self.no_plant_update_states = ['lfp_target', 'lfp_hold'] mc_origin = VirtualCircularTarget(target_radius=self.mc_target_radius, target_color=RED) lfp_target = VirtualSquareTarget(target_radius=self.lfp_target_rad, target_color=self.lfp_target_color) self.targets = [lfp_target, mc_origin] self.mc_plant = plantlist[self.mc_plant_type] if hasattr(self.mc_plant, 'graphics_models'): for model in self.mc_plant.graphics_models: self.add_model(model) # Declare any plant attributes which must be saved to the HDF file at the _cycle rate for attr in self.mc_plant.hdf_attrs: self.add_dtype(*attr) self.target_location_mc = np.array([-100, -100, -100]) self.manual_control_type = None self.current_pt=np.zeros([3]) #keep track of current pt self.last_pt=np.zeros([3]) def init(self): self.add_dtype('mc_targ', 'f8', (3,)) ###ADD BACK super(LFP_Mod_plus_MC_hold, self).init() def _cycle(self): ''' Calls any update functions necessary and redraws screen. Runs 60x per second. ''' self.task_data['mc_targ'] = self.target_location_mc.copy() mc_plant_data = self.mc_plant.get_data_to_save() for key in mc_plant_data: self.task_data[key] = mc_plant_data[key] super(LFP_Mod_plus_MC_hold, self)._cycle() def _parse_next_trial(self): t = self.next_trial self.lfp_targ = t['lfp'] self.mc_targ_orig = t['origin'] def _start_origin(self): if self.wait_flag: self.origin_hold_time_store = self.origin_hold_time self.origin_hold_time = 3 self.wait_flag = 0 else: self.origin_hold_time = self.origin_hold_time_store #only 1 target: target = self.targets[1] #Origin self.target_location_mc = self.mc_targ_orig #Origin target.move_to_position(self.target_location_mc) target.cue_trial_start() #Turn off lfp things self.lfp_plant.turn_off() self.targets[0].hide() def _start_lfp_target(self): #only 1 target: target = self.targets[0] #LFP target self.target_location_lfp = self.lfp_targ #LFP target target.move_to_position(self.target_location_lfp) target.cue_trial_start() self.lfp_plant.turn_on() def _start_lfp_hold(self): #make next target visible unless this is the final target in the trial pass def _start_hold_penalty(self): #hide targets for target in self.targets: target.hide() self.tries += 1 self.target_index = -1 #Turn off lfp things self.lfp_plant.turn_off() self.targets[0].hide() def _end_origin(self): self.targets[1].cue_trial_end_success() def _test_enter_origin(self, ts): cursor_pos = self.mc_plant.get_endpoint_pos() d = np.linalg.norm(cursor_pos - self.target_location_mc) return d <= (self.mc_target_radius - self.mc_cursor_radius) # def _test_origin_timeout(self, ts): # return ts>self.timeout_time def _test_leave_origin(self, ts): if self.manual_control_type == 'joystick': if hasattr(self,'touch'): if self.touch <0.5: self.last_touch_zero_event = time.time() return True cursor_pos = self.mc_plant.get_endpoint_pos() d = np.linalg.norm(cursor_pos - self.target_location_mc) return d > (self.mc_target_radius - self.mc_cursor_radius) def _test_origin_hold_complete(self,ts): return ts>=self.origin_hold_time # def _test_enter_lfp_target(self, ts): # ''' # return true if the distance between center of cursor and target is smaller than the cursor radius # ''' # cursor_pos = self.lfp_plant.get_endpoint_pos() # cursor_pos = [cursor_pos[0], cursor_pos[2]] # targ_loc = np.array([self.target_location_lfp[0], self.target_location_lfp[2]]) # d = np.linalg.norm(cursor_pos - targ_loc) # return d <= (self.lfp_target_rad - self.lfp_cursor_rad) # def _test_leave_early(self, ts): # ''' # return true if cursor moves outside the exit radius # ''' # cursor_pos = self.lfp_plant.get_endpoint_pos() # d = np.linalg.norm(cursor_pos - self.target_location_lfp) # rad = self.lfp_target_rad - self.lfp_cursor_rad # return d > rad def _test_hold_penalty_end(self, ts): return ts>self.hold_penalty_time def _end_lfp_hold(self): # change current target color to green self.targets[0].cue_trial_end_success() def move_plant(self): if self.state in self.lfp_cursor_on: feature_data = self.get_features() # Save the "neural features" (e.g. spike counts vector) to HDF file for key, val in feature_data.items(): self.task_data[key] = val Bu = None assist_weight = 0 target_state = np.zeros([self.decoder.n_states, self.decoder.n_subbins]) ## Run the decoder neural_features = feature_data[self.extractor.feature_type] self.call_decoder(neural_features, target_state, Bu=Bu, assist_level=assist_weight, feature_type=self.extractor.feature_type) ## Drive the plant to the decoded state, if permitted by the constraints of the plant self.lfp_plant.drive(self.decoder) self.task_data['decoder_state'] = decoder_state = self.decoder.get_state(shape=(-1,1)) #return decoder_state #Sets the plant configuration based on motiontracker data. For manual control, uses #motiontracker data. If no motiontracker data available, returns None''' #get data from motion tracker- take average of all data points since last poll if self.state in self.no_plant_update_states: pt = np.array([0, 0, 0]) print 'no update' else: if self.manual_control_type == 'motiondata': pt = self.motiondata.get() if len(pt) > 0: pt = pt[:, self.marker_num, :] conds = pt[:, 3] inds = np.nonzero((conds>=0) & (conds!=4))[0] if len(inds) > 0: pt = pt[inds,:3] #scale actual movement to desired amount of screen movement pt = pt.mean(0) * self.scale_factor #Set y coordinate to 0 for 2D tasks if self.limit2d: #pt[1] = 0 pt[2] = pt[1].copy() pt[1] = 0 pt[1] = pt[1]*2 # Return cursor location self.no_data_count = 0 pt = pt * mm_per_cm #self.convert_to_cm(pt) else: #if no usable data self.no_data_count += 1 pt = None else: #if no new data self.no_data_count +=1 pt = None elif self.manual_control_type == 'joystick': pt = self.joystick.get() #if touch sensor on: try: self.touch = pt[-1][0][2] except: pass if len(pt) > 0: pt = pt[-1][0] pt[0]=1-pt[0]; #Switch L / R axes calib = [0.497,0.517] #Sometimes zero point is subject to drift this is the value of the incoming joystick when at 'rest' if self.joystick_method==0: #pt = pt[-1][0] #pt[0]=1-pt[0]; #Switch L / R axes #calib = [0.497,0.517] #Sometimes zero point is subject to drift this is the value of the incoming joystick when at 'rest' # calib = [ 0.487, 0. ] pos = np.array([(pt[0]-calib[0]), 0, calib[1]-pt[1]]) pos[0] = pos[0]*36 pos[2] = pos[2]*24 self.current_pt = pos elif self.joystick_method==1: vel=np.array([(pt[0]-calib[0]), 0, calib[1]-pt[1]]) epsilon = 2*(10**-2) #Define epsilon to stabilize cursor movement if sum((vel)**2) > epsilon: self.current_pt=self.last_pt+self.joystick_speed*vel*(1/60) #60 Hz update rate, dt = 1/60 else: self.current_pt = self.last_pt if self.current_pt[0] < -25: self.current_pt[0] = -25 if self.current_pt[0] > 25: self.current_pt[0] = 25 if self.current_pt[-1] < -14: self.current_pt[-1] = -14 if self.current_pt[-1] > 14: self.current_pt[-1] = 14 pt = self.current_pt #self.plant.set_endpoint_pos(self.current_pt) self.last_pt = self.current_pt.copy() elif self.manual_control_type == None: pt = None try: pt0 = self.motiondata.get() self.manual_control_type='motiondata' except: print 'not motiondata' try: pt0 = self.joystick.get() self.manual_control_type = 'joystick' except: print 'not joystick data' # Set the plant's endpoint to the position determined by the motiontracker, unless there is no data available if self.manual_control_type is not None: if pt is not None and len(pt)>0: self.mc_plant.set_endpoint_pos(pt) @staticmethod def lfp_mod_4targ_plus_mc_orig(nblocks=100, boundaries=(-18,18,-12,12), xaxis=-8): ''' See lfp_mod_4targ for lfp target explanation ''' new_zero = (boundaries[3]+boundaries[2]) / 2. new_scale = (boundaries[3] - boundaries[2]) / 2. kin_targs = np.array([-0.40625, 0. , 0.40625, 0.8125 ]) lfp_targ_y = (new_scale*kin_targs) + new_zero for i in range(nblocks): temp = lfp_targ_y.copy() np.random.shuffle(temp) if i==0: z = temp.copy() else: z = np.hstack((z, temp)) #Fixed X axis: x = np.tile(xaxis,(nblocks*4)) y = np.zeros(nblocks*4) lfp = np.vstack([x, y, z]).T origin = np.zeros(( lfp.shape )) it = iter([dict(lfp=lfp[i,:], origin=origin[i,:]) for i in range(lfp.shape[0])]) return it
class Optitrack(traits.HasTraits): ''' Enable reading of raw motiontracker data from Optitrack system Requires the natnet library from https://github.com/leoscholl/python_natnet To be used as a feature with the ManualControl task for the time being. However, ideally this would be implemented as a decoder :) ''' optitrack_feature = traits.OptionsList(("rigid body", "skeleton", "marker")) smooth_features = traits.Int(1, desc="How many features to average") scale = traits.Float(DEFAULT_SCALE, desc="Control scale factor") offset = traits.Array(value=DEFAULT_OFFSET, desc="Control offset") hidden_traits = ['optitrack_feature', 'smooth_features'] def init(self): ''' Secondary init function. See riglib.experiment.Experiment.init() Prior to starting the task, this 'init' sets up the DataSource for interacting with the motion tracker system and registers the source with the SinkRegister so that the data gets saved to file as it is collected. ''' # Start the natnet client and recording import natnet now = datetime.now() local_path = "C:/Users/Orsborn Lab/Documents" session = "OptiTrack/Session " + now.strftime("%Y-%m-%d") take = now.strftime("Take %Y-%m-%d %H:%M:%S") logger = Logger(take) try: client = natnet.Client.connect(logger=logger) if self.saveid is not None: take += " (%d)" % self.saveid client.set_session(os.path.join(local_path, session)) client.set_take(take) self.filename = os.path.join(session, take + '.tak') client._send_command_and_wait("LiveMode") time.sleep(0.1) if client.start_recording(): self.optitrack_status = 'recording' else: self.optitrack_status = 'streaming' except natnet.DiscoveryError: self.optitrack_status = 'Optitrack couldn\'t be started, make sure Motive is open!' client = optitrack.SimulatedClient() self.client = client # Create a source to buffer the motion tracking data from riglib import source self.motiondata = source.DataSource(optitrack.make(optitrack.System, self.client, self.optitrack_feature, 1)) # Save to the sink from riglib import sink sink_manager = sink.SinkManager.get_instance() sink_manager.register(self.motiondata) super().init() def run(self): ''' Code to execute immediately prior to the beginning of the task FSM executing, or after the FSM has finished running. See riglib.experiment.Experiment.run(). This 'run' method starts the motiondata source and stops it after the FSM has finished running ''' if not self.optitrack_status in ['recording', 'streaming']: import io self.terminated_in_error = True self.termination_err = io.StringIO() self.termination_err.write(self.optitrack_status) self.termination_err.seek(0) self.state = None super().run() else: self.motiondata.start() try: super().run() finally: print("Stopping optitrack") self.client.stop_recording() self.motiondata.stop() def _start_None(self): ''' Code to run before the 'None' state starts (i.e., the task stops) ''' #self.client.stop_recording() self.motiondata.stop() super()._start_None() def join(self): ''' See riglib.experiment.Experiment.join(). Re-join the motiondata source process before cleaning up the experiment thread ''' print("Joining optitrack datasource") self.motiondata.join() super().join() def cleanup(self, database, saveid, **kwargs): ''' Save the optitrack recorded file into the database ''' super_result = super().cleanup(database, saveid, **kwargs) print("Saving optitrack file to database...") try: database.save_data(self.filename, "optitrack", saveid, False, False) # Make sure you actually have an "optitrack" system added! except Exception as e: print(e) return False print("...done.") return super_result def _get_manual_position(self): ''' Overridden method to get input coordinates based on motion data''' # Get data from optitrack datasource data = self.motiondata.get() # List of (list of features) if len(data) == 0: # Data is not being streamed return recent = data[-self.smooth_features:] # How many recent coordinates to average averaged = np.nanmean(recent, axis=0) # List of averaged features if np.isnan(averaged).any(): # No usable coords return return averaged*100 # convert meters to centimeters
class RDS(TrialTypes, Pygame): ndots = traits.Int(250, desc="Number of dots on sphere") sphere_radius = traits.Float(250, desc="Radius of virtual sphere") dot_radius = traits.Int(5, desc="Radius of dots drawn on screen") sphere_speed = traits.Float(0.005 * np.pi, desc="Speed at which the virtual sphere turns") disparity = traits.Float(.05, desc="Amount of disparity") trial_types = ["cw", "ccw"] def init(self): super(RDS, self).init() self.screen_init() self.width, self.height = self.surf.get_size() self.loff = self.width / 4., self.height / 2. self.roff = self.width * 0.75, self.height / 2. def _init_sphere(self): u, v = np.random.rand(2, self.ndots) self._sphere = np.array([2 * np.pi * u, np.arccos(2 * v - 1)]) def _project_sphere(self, offset=True): theta, phi = self._sphere x = self.sphere_radius * np.cos(theta) * np.sin(phi) y = self.sphere_radius * np.sin(theta) * np.sin(phi) z = self.sphere_radius * np.cos(phi) d = y * self.disparity return np.array([x + d * (-1, 1)[offset], z]).T def _draw_sphere(self): import pygame self.surf.fill(self.background) for pt in (self.loff + self._project_sphere(True)).astype(int): pygame.draw.circle(self.surf, (255, 255, 255), pt, self.dot_radius) for pt in (self.roff + self._project_sphere(False)).astype(int): pygame.draw.circle(self.surf, (255, 255, 255), pt, self.dot_radius) self.flip_wait() def _start_cw(self): print 'cw' self._init_sphere() def _start_ccw(self): print 'ccw' self._init_sphere() def _while_cw(self): self._sphere[0] += self.sphere_speed self._draw_sphere() def _while_ccw(self): self._sphere[0] -= self.sphere_speed self._draw_sphere() def _test_cw_correct(self, ts): return self.event in [4, 8] def _test_cw_incorrect(self, ts): return self.event in [1, 2] def _test_ccw_correct(self, ts): return self.event in [1, 2] def _test_ccw_incorrect(self, ts): return self.event in [4, 8]
class TargetCapture(Sequence): ''' This is a generic cued target capture skeleton, to form as a common ancestor to the most common type of motor control task. ''' status = FSMTable( wait=StateTransitions(start_trial="target"), target=StateTransitions(enter_target="hold", timeout="timeout_penalty"), hold=StateTransitions(leave_early="hold_penalty", hold_complete="targ_transition"), targ_transition=StateTransitions(trial_complete="reward", trial_abort="wait", trial_incomplete="target"), timeout_penalty=StateTransitions(timeout_penalty_end="targ_transition", end_state=True), hold_penalty=StateTransitions(hold_penalty_end="targ_transition", end_state=True), reward=StateTransitions(reward_end="wait", stoppable=False, end_state=True)) trial_end_states = ['reward', 'timeout_penalty', 'hold_penalty'] # initial state state = "wait" target_index = -1 # Helper variable to keep track of which target to display within a trial tries = 0 # Helper variable to keep track of the number of failed attempts at a given trial. sequence_generators = [] reward_time = traits.Float(.5, desc="Length of reward dispensation") hold_time = traits.Float(.2, desc="Length of hold required at targets") hold_penalty_time = traits.Float( 1, desc="Length of penalty time for target hold error") timeout_time = traits.Float(10, desc="Time allowed to go between targets") timeout_penalty_time = traits.Float( 1, desc="Length of penalty time for timeout error") max_attempts = traits.Int(10, desc='The number of attempts at a target before\ skipping to the next one') def _start_wait(self): # Call parent method to draw the next target capture sequence from the generator super()._start_wait() # number of times this sequence of targets has been attempted self.tries = 0 # index of current target presented to subject self.target_index = -1 # number of targets to be acquired in this trial self.chain_length = len(self.targs) def _parse_next_trial(self): '''Check that the generator has the required data''' self.targs = self.next_trial # TODO error checking def _start_target(self): self.target_index += 1 self.target_location = self.targs[self.target_index] def _end_target(self): '''Nothing generic to do.''' pass def _start_hold(self): '''Nothing generic to do.''' pass def _while_hold(self): '''Nothing generic to do.''' pass def _end_hold(self): '''Nothing generic to do.''' pass def _start_targ_transition(self): '''Nothing generic to do. Child class might show/hide targets''' pass def _while_targ_transition(self): '''Nothing generic to do.''' pass def _end_targ_transition(self): '''Nothing generic to do.''' pass def _start_timeout_penalty(self): self.tries += 1 self.target_index = -1 def _while_timeout_penalty(self): '''Nothing generic to do.''' pass def _end_timeout_penalty(self): '''Nothing generic to do.''' pass def _start_hold_penalty(self): self.tries += 1 self.target_index = -1 def _while_hold_penalty(self): '''Nothing generic to do.''' pass def _end_hold_penalty(self): '''Nothing generic to do.''' pass def _start_reward(self): '''Nothing generic to do.''' pass def _while_reward(self): '''Nothing generic to do.''' pass def _end_reward(self): '''Nothing generic to do.''' pass ################## State transition test functions ################## def _test_start_trial(self, time_in_state): '''Start next trial automatically. You may want this to instead be - a random delay - require some initiation action ''' return True def _test_timeout(self, time_in_state): return time_in_state > self.timeout_time def _test_hold_complete(self, time_in_state): ''' Test whether the target is held long enough to declare the trial a success Possible options - Target held for the minimum requred time (implemented here) - Sensorized object moved by a certain amount - Sensorized object moved to the required location - Manually triggered by experimenter ''' return time_in_state > self.hold_time def _test_trial_complete(self, time_in_state): '''Test whether all targets in sequence have been acquired''' return self.target_index == self.chain_length - 1 def _test_trial_abort(self, time_in_state): '''Test whether the target capture sequence should just be skipped due to too many failures''' return (not self._test_trial_complete(time_in_state)) and ( self.tries == self.max_attempts) def _test_trial_incomplete(self, time_in_state): '''Test whether the target capture sequence needs to be restarted''' return (not self._test_trial_complete(time_in_state)) and ( self.tries < self.max_attempts) def _test_timeout_penalty_end(self, time_in_state): return time_in_state > self.timeout_penalty_time def _test_hold_penalty_end(self, time_in_state): return time_in_state > self.hold_penalty_time def _test_reward_end(self, time_in_state): return time_in_state > self.reward_time def _test_enter_target(self, time_in_state): '''This function is task-specific and not much can be done generically''' return False def _test_leave_early(self, time_in_state): '''This function is task-specific and not much can be done generically''' return False def update_report_stats(self): ''' see experiment.Experiment.update_report_stats for docs ''' super().update_report_stats() self.reportstats['Trial #'] = self.calc_trial_num() self.reportstats['Reward/min'] = np.round(self.calc_events_per_min( 'reward', 120.), decimals=2) @classmethod def get_desc(cls, params, report): '''Used by the database infrasturcture to generate summary stats on this task''' duration = report[-1][-1] - report[0][-1] reward_count = 0 for item in report: if item[0] == "reward": reward_count += 1 return "{} rewarded trials in {} min".format(reward_count, duration)
class ScreenTargetCapture(TargetCapture, Window): """Concrete implementation of TargetCapture task where targets are acquired by "holding" a cursor in an on-screen target""" background = (0, 0, 0, 1) cursor_color = (.5, 0, .5, 1) plant_type = traits.OptionsList(*plantlist, desc='', bmi3d_input_options=list(plantlist.keys())) starting_pos = (5, 0, 5) target_color = (1, 0, 0, .5) cursor_visible = False # Determines when to hide the cursor. no_data_count = 0 # Counter for number of missing data frames in a row scale_factor = 3.0 #scale factor for converting hand movement to screen movement (1cm hand movement = 3.5cm cursor movement) limit2d = 1 sequence_generators = ['centerout_2D_discrete'] is_bmi_seed = True _target_color = RED # Runtime settable traits reward_time = traits.Float(.5, desc="Length of juice reward") target_radius = traits.Float(2, desc="Radius of targets in cm") hold_time = traits.Float(.2, desc="Length of hold required at targets") hold_penalty_time = traits.Float( 1, desc="Length of penalty time for target hold error") timeout_time = traits.Float(10, desc="Time allowed to go between targets") timeout_penalty_time = traits.Float( 1, desc="Length of penalty time for timeout error") max_attempts = traits.Int(10, desc='The number of attempts at a target before\ skipping to the next one') plant_hide_rate = traits.Float( 0.0, desc= 'If the plant is visible, specifies a percentage of trials where it will be hidden' ) plant_type_options = list(plantlist.keys()) plant_type = traits.OptionsList(*plantlist, bmi3d_input_options=list(plantlist.keys())) plant_visible = traits.Bool( True, desc='Specifies whether entire plant is displayed or just endpoint') cursor_radius = traits.Float(.5, desc="Radius of cursor") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.cursor_visible = True # Initialize the plant if not hasattr(self, 'plant'): self.plant = plantlist[self.plant_type] self.plant_vis_prev = True # Add graphics models for the plant and targets to the window if hasattr(self.plant, 'graphics_models'): for model in self.plant.graphics_models: self.add_model(model) # Instantiate the targets instantiate_targets = kwargs.pop('instantiate_targets', True) if instantiate_targets: target1 = VirtualCircularTarget(target_radius=self.target_radius, target_color=self._target_color) target2 = VirtualCircularTarget(target_radius=self.target_radius, target_color=self._target_color) self.targets = [target1, target2] for target in self.targets: for model in target.graphics_models: self.add_model(model) # Initialize target location variable self.target_location = np.array([0, 0, 0]) # Declare any plant attributes which must be saved to the HDF file at the _cycle rate for attr in self.plant.hdf_attrs: self.add_dtype(*attr) def init(self): self.add_dtype('target', 'f8', (3, )) self.add_dtype('target_index', 'i', (1, )) super().init() def _cycle(self): ''' Calls any update functions necessary and redraws screen. Runs 60x per second. ''' self.task_data['target'] = self.target_location.copy() self.task_data['target_index'] = self.target_index ## Run graphics commands to show/hide the plant if the visibility has changed if self.plant_type != 'CursorPlant': if self.plant_visible != self.plant_vis_prev: self.plant_vis_prev = self.plant_visible self.plant.set_visibility(self.plant_visible) # self.show_object(self.plant, show=self.plant_visible) self.move_effector() ## Save plant status to HDF file plant_data = self.plant.get_data_to_save() for key in plant_data: self.task_data[key] = plant_data[key] super()._cycle() def move_effector(self): '''Move the end effector, if a robot or similar is being controlled''' pass def run(self): ''' See experiment.Experiment.run for documentation. ''' # Fire up the plant. For virtual/simulation plants, this does little/nothing. self.plant.start() try: super().run() finally: self.plant.stop() ##### HELPER AND UPDATE FUNCTIONS #### def update_cursor_visibility(self): ''' Update cursor visible flag to hide cursor if there has been no good data for more than 3 frames in a row''' prev = self.cursor_visible if self.no_data_count < 3: self.cursor_visible = True if prev != self.cursor_visible: self.show_object(self.cursor, show=True) else: self.cursor_visible = False if prev != self.cursor_visible: self.show_object(self.cursor, show=False) #### TEST FUNCTIONS #### def _test_enter_target(self, ts): ''' return true if the distance between center of cursor and target is smaller than the cursor radius ''' cursor_pos = self.plant.get_endpoint_pos() d = np.linalg.norm(cursor_pos - self.target_location) return d <= (self.target_radius - self.cursor_radius) def _test_leave_early(self, ts): ''' return true if cursor moves outside the exit radius ''' cursor_pos = self.plant.get_endpoint_pos() d = np.linalg.norm(cursor_pos - self.target_location) rad = self.target_radius - self.cursor_radius return d > rad #### STATE FUNCTIONS #### def _start_wait(self): super()._start_wait() # hide targets for target in self.targets: target.hide() def _start_target(self): super()._start_target() # move one of the two targets to the new target location target = self.targets[self.target_index % 2] target.move_to_position(self.target_location) target.cue_trial_start() def _start_hold(self): #make next target visible unless this is the final target in the trial idx = (self.target_index + 1) if idx < self.chain_length: target = self.targets[idx % 2] target.move_to_position(self.targs[idx]) def _end_hold(self): # change current target color to green self.targets[self.target_index % 2].cue_trial_end_success() def _start_hold_penalty(self): super()._start_hold_penalty() # hide targets for target in self.targets: target.hide() def _start_timeout_penalty(self): super()._start_timeout_penalty() # hide targets for target in self.targets: target.hide() def _start_targ_transition(self): #hide targets for target in self.targets: target.hide() def _start_reward(self): self.targets[self.target_index % 2].show() #### Generator functions #### @staticmethod def centerout_2D_discrete(nblocks=100, ntargets=8, boundaries=(-18, 18, -12, 12), distance=10): ''' Generates a sequence of 2D (x and z) target pairs with the first target always at the origin. Parameters ---------- length : int The number of target pairs in the sequence. boundaries: 6 element Tuple The limits of the allowed target locations (-x, x, -z, z) distance : float The distance in cm between the targets in a pair. Returns ------- pairs : [nblocks*ntargets x 2 x 3] array of pairs of target locations ''' # Choose a random sequence of points on the edge of a circle of radius # "distance" theta = [] for i in range(nblocks): temp = np.arange(0, 2 * np.pi, 2 * np.pi / ntargets) np.random.shuffle(temp) theta = theta + [temp] theta = np.hstack(theta) x = distance * np.cos(theta) y = np.zeros(len(theta)) z = distance * np.sin(theta) pairs = np.zeros([len(theta), 2, 3]) pairs[:, 1, :] = np.vstack([x, y, z]).T return pairs
class MotionData(traits.HasTraits): ''' Enable reading of raw motiontracker data from Phasespace system ''' marker_count = traits.Int(8, desc="Number of markers to return") marker_num = traits.Int(1, desc="Which marker to use") def init(self): ''' Secondary init function. See riglib.experiment.Experiment.init() Prior to starting the task, this 'init' sets up the DataSource for interacting with the motion tracker system and registers the source with the SinkRegister so that the data gets saved to file as it is collected. ''' from riglib import source src, mkw = self.source_class self.motiondata = source.DataSource(src, **mkw) from riglib import sink sink_manager = sink.SinkManager.get_instance() sink_manager.register(self.motiondata) super(MotionData, self).init() @property def source_class(self): ''' Specify the source class as a function in case future descendant classes want to use a different type of source ''' from riglib import motiontracker return motiontracker.make(self.marker_count), dict() def run(self): ''' Code to execute immediately prior to the beginning of the task FSM executing, or after the FSM has finished running. See riglib.experiment.Experiment.run(). This 'run' method starts the motiontracker source prior to starting the experiment's main thread/process, and handle any errors by stopping the source ''' self.motiondata.start() try: super(MotionData, self).run() finally: self.motiondata.stop() def join(self): ''' See riglib.experiment.Experiment.join(). Re-join the 'motiondata' source process before cleaning up the experiment thread ''' self.motiondata.join() super(MotionData, self).join() def _start_None(self): ''' Code to run before the 'None' state starts (i.e., the task stops) ''' self.motiondata.stop() super(MotionData, self)._start_None() def _get_manual_position(self): ''' Sets the plant configuration based on motiontracker data. For manual control, uses motiontracker data. If no motiontracker data available, returns None''' #get data from motion tracker- take average of all data points since last poll pt = self.motiondata.get() if len(pt) > 0: pt = pt[:, self.marker_num, :] conds = pt[:, 3] inds = np.nonzero((conds >= 0) & (conds != 4))[0] if len(inds) > 0: pt = pt[inds, :3] pt = pt.mean(0) pt = pt * mm_per_cm #self.convert_to_cm(pt) else: #if no usable data pt = None else: #if no new data pt = None return pt
class Window(LogExperiment): ''' Generic stereo window ''' status = dict(draw=dict(stop=None)) state = "draw" stop = False # XPS computer # window_size = (1920*2, 1080) window_size = traits.Tuple(monitor_res['monitor_2D'], desc='Window size, in pixels') background = traits.Tuple((0., 0., 0., 1.), desc="Background color (R,G,B,A)") fullscreen = traits.Bool(True, desc="Fullscreen window") #Screen parameters, all in centimeters -- adjust for monkey screen_dist = traits.Float(44.5 + 3, desc="Screen to eye distance (cm)") screen_half_height = traits.Float(10.75, desc="Screen half height (cm)") iod = traits.Float( 2.5, desc="Intraocular distance (cm)") # intraocular distance show_environment = traits.Int(0, desc="Show wireframe box around environment") hidden_traits = [ 'screen_dist', 'screen_half_height', 'iod', 'show_environment', 'background' ] def __init__(self, *args, **kwargs): self.display_start_pos = kwargs.pop('display_start_pos', "0,0") super(Window, self).__init__(*args, **kwargs) self.models = [] self.world = None self.event = None # os.popen('sudo vbetool dpms on') self.fov = np.degrees( np.arctan(self.screen_half_height / self.screen_dist)) * 2 self.screen_cm = [ 2 * self.screen_half_height * self.window_size[0] / self.window_size[1], 2 * self.screen_half_height ] if self.show_environment: self.add_model(Box()) def set_os_params(self): os.environ['SDL_VIDEO_WINDOW_POS'] = self.display_start_pos os.environ['SDL_VIDEO_X11_WMCLASS'] = "monkey_experiment" def screen_init(self): self.set_os_params() pygame.init() pygame.display.gl_set_attribute(pygame.GL_DEPTH_SIZE, 24) flags = pygame.DOUBLEBUF | pygame.HWSURFACE | pygame.OPENGL | pygame.NOFRAME if self.fullscreen: flags = flags | pygame.FULLSCREEN try: pygame.display.gl_set_attribute(pygame.GL_MULTISAMPLEBUFFERS, 1) self.screen = pygame.display.set_mode(self.window_size, flags) except: pygame.display.gl_set_attribute(pygame.GL_MULTISAMPLEBUFFERS, 0) self.screen = pygame.display.set_mode(self.window_size, flags) glEnable(GL_BLEND) glDepthFunc(GL_LESS) glEnable(GL_DEPTH_TEST) glEnable(GL_TEXTURE_2D) glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) glClearColor(*self.background) glClearDepth(1.0) glDepthMask(GL_TRUE) self.renderer = self._get_renderer() #this effectively determines the modelview matrix self.world = Group(self.models) self.world.init() #up vector is always (0,0,1), why would I ever need to roll the camera?! self.set_eye((0, -self.screen_dist, 0), (0, 0)) pygame.mouse.set_visible(False) def _get_renderer(self): near = 1 far = 1024 return stereo.MirrorDisplay(self.window_size, self.fov, near, far, self.screen_dist, self.iod) def set_eye(self, pos, vec, reset=True): '''Set the eye's position and direction. Camera starts at (0,0,0), pointing towards positive y''' self.world.translate(pos[0], pos[2], pos[1], reset=True).rotate_x(-90) self.world.rotate_y(vec[0]).rotate_x(vec[1]) def add_model(self, model): if self.world is None: #world doesn't exist yet, add the model to cache self.models.append(model) else: #We're already running, initialize the model and add it to the world model.init() self.world.add(model) def show_object(self, obj, show=False): ''' Show or hide an object. This function is an abstraction so that tasks don't need to know about attach/detach ''' if show: obj.attach() else: obj.detach() def draw_world(self): glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) self.renderer.draw(self.world) pygame.display.flip() self.renderer.draw_done() def _get_event(self): for e in pygame.event.get(pygame.KEYDOWN): return (e.key, e.type) def _start_None(self): pygame.display.quit() def _test_stop(self, ts): ''' Stop the task if the escape key is pressed, or if the super _test_stop instructs a stop ''' super_stop = super(Window, self)._test_stop(ts) from pygame import K_ESCAPE key_stop = self.event is not None and self.event[0] == K_ESCAPE if key_stop: print("Window closed. Stopping task") return super_stop or key_stop def update_report_stats(self): self.reportstats['FPS'] = round(self.clock.get_fps(), 2) return super().update_report_stats() def requeue(self): self.renderer._queue_render(self.world) def _cycle(self): self.requeue() self.draw_world() super(Window, self)._cycle( ) # is this order intentional? why not cycle first then draw the screen? self.event = self._get_event()
class ManualControlMulti_memory( manualcontrolmulti_COtasks.ManualControlMulti_plusvar): target_flash_time = traits.Float(0.2, desc="Time for target to flash") flash_target_radius = traits.Float(4., desc="Radius of flahs target cue") hold2_time = traits.Float(1.0, desc="hold #2, memory hold") hold2_var = traits.Float(.1, desc="hold #2, memory hold") ntargets = traits.Int(2) status = dict(wait=dict(start_trial="origin", stop=None), origin=dict(enter_origin="hold", timeout="timeout_penalty", stop=None), hold=dict(leave_early="hold_penalty", hold_complete="targ_flash"), targ_flash=dict(targ_flash_done="hold2", leave_orig='hold_penalty'), hold2=dict(leave_early="hold_penalty", hold_complete='choice_targets'), choice_targets=dict( enter_target="periph_hold", enter_wrong_target="wrong_target_penalty", timeout="timeout_penalty"), periph_hold=dict(hold_complete="reward", leave_periph_early="hold_penalty"), timeout_penalty=dict(timeout_penalty_end="wait"), hold_penalty=dict(hold_penalty_end="wait"), wrong_target_penalty=dict(timeout_penalty_end="wait"), reward=dict(reward_end="wait")) sequence_generators = ['twoD_choice_CO'] def __init__(self, *args, **kwargs): super(ManualControlMulti_memory, self).__init__(*args, **kwargs) self.hold_time_pls_var = self.hold_time #+ np.random.uniform(low=-1,high=1)*self.orig_hold_variance self.cursor_visible = True # Initialize the plant if not hasattr(self, 'plant'): self.plant = plantlist[self.plant_type] self.plant_vis_prev = True # Add graphics models for the plant and targets to the window if hasattr(self.plant, 'graphics_models'): for model in self.plant.graphics_models: self.add_model(model) # Instantiate the targets target1 = VirtualCircularTarget(target_radius=self.target_radius, target_color=RED) target2 = VirtualCircularTarget(target_radius=self.target_radius, target_color=RED) choice_targets = [] for i in range(self.ntargets): choice_targets.extend([ VirtualCircularTarget(target_radius=self.target_radius, target_color=RED) ]) self.target_dict = dict() self.target_dict['mc_orig'] = target1 self.target_dict['mc_targ'] = target2 self.target_dict['choice_targets'] = choice_targets self.targets = list(choice_targets + [target1, target2]) for target in self.targets: for model in target.graphics_models: self.add_model(model) # Initialize target location variable self.target_location = np.array([0, 0, 0]) # Declare any plant attributes which must be saved to the HDF file at the _cycle rate # print 'PKK: ', self.dtype # for attr in self.plant.hdf_attrs: # print 'PKKKKK: ', attr # self.add_dtype(*attr) self.add_dtype('choice_on', 'f8', (1, )) self.choice_on = 0 self.twoD_cursor_pos = np.zeros((3, )) def init(self): super(ManualControlMulti_memory, self).init() def _cycle(self): ''' Calls any update functions necessary and redraws screen. Runs 60x per second. ''' self.task_data['target'] = self.target_location.copy() #self.task_data['target_index'] = self.target_index self.task_data['choice_on'] = self.choice_on ## Run graphics commands to show/hide the plant if the visibility has changed if self.plant_type != 'CursorPlant': if self.plant_visible != self.plant_vis_prev: self.plant_vis_prev = self.plant_visible self.plant.set_visibility(self.plant_visible) # self.show_object(self.plant, show=self.plant_visible) self.move_plant() ## Save plant status to HDF file plant_data = self.plant.get_data_to_save() for key in plant_data: self.task_data[key] = plant_data[key] pos = self.plant.get_endpoint_pos() if self.planar_hand: self.twoD_cursor_pos = pos.copy() else: self.twoD_cursor_pos = pos.copy() super(ManualControlMulti_memory, self)._cycle() def _parse_next_trial(self): t = self.next_trial self.mc_targ_loc = t['mc_targ'] self.mc_label = t['mc_label'][0, 0] #print 'MC LABEL: ', self.mc_label #print 'MC TARG LOC', self.mc_targ_loc self.choice_targ_loc = t['choice_targ'] self.mc_orig_loc = t['mc_orig'] self.targs = np.vstack((self.mc_orig_loc, self.mc_targ_loc)) def _start_origin(self): #move a target to current location (target1 and target2 alternate moving) and set location attribute orig = self.target_dict['mc_orig'] self.target_location = self.mc_orig_loc[0, :] orig.move_to_position(self.target_location) orig.cue_trial_start() def _start_targ_flash(self): #Turn on flash periph = self.target_dict['mc_targ'] self.target_location = self.mc_targ_loc[0, :] periph.move_to_position(self.target_location) periph.cue_trial_start() def _start_hold2(self): #Turn off flash: periph = self.target_dict['mc_targ'] periph.hide() self.hold_time_pls_var = self.hold2_time + np.random.uniform( low=-1, high=1) * self.hold2_var self.target_location = self.mc_orig_loc[0, :] def _start_choice_targets(self): for t in self.targets: t.hide() #Turn on Go cue: orig = self.target_dict['mc_orig'] orig.cue_trial_end_success() #turn green # orig.sphere.color = GREEN orig.show() #orig.hide() #Turn on choice targets choice_targs = self.target_dict['choice_targets'] #print 'choice targ loc', self.choice_targ_loc #print 'choice shpae: ', choice_targs for i, c in enumerate(choice_targs): #print i, c c.move_to_position(self.choice_targ_loc[i, :]) c.cue_trial_start() def _start_periph_hold(self): self.target_dict['mc_orig'].hide() #self.target_dict['mc_targ'].hide() #for i, c in enumerate( self.target_dict['choice_targets']): #c.hide() self.hold_time_pls_var = self.hold_time def _start_wrong_target_penalty(self): #Turn on choice targets choice_targs = self.target_dict['choice_targets'] #print 'choice targ loc', self.choice_targ_loc #print 'choice shpae: ', choice_targs for i, c in enumerate(choice_targs): #print i, c c.move_to_position(self.choice_targ_loc[i, :]) c.hide() orig = self.target_dict['mc_orig'] orig.hide() def _start_reward(self): super(ManualControlMulti_memory, self)._start_reward() for i, c in enumerate(self.target_dict['choice_targets']): c.hide() orig = self.target_dict['mc_orig'] orig.hide() for t in self.targets: t.hide() self.target_dict['mc_targ'].sphere.color = GREEN self.target_dict['mc_targ'].show() # self.target_dict['choice_targets'][int(self.mc_label)].show() ######################## ### TEST FUNCTIONS ##### ######################## def _test_leave_orig(self, ts): cursor_pos = self.twoD_cursor_pos d = np.linalg.norm(cursor_pos - self.mc_orig_loc[0, :]) rad = self.target_radius - self.cursor_radius return d > rad def _test_enter_origin(self, ts): cursor_pos = self.twoD_cursor_pos d = np.linalg.norm(cursor_pos - self.target_location) return d <= (self.target_radius - self.cursor_radius) def _test_targ_flash_done(self, ts): return ts > self.target_flash_time def _test_enter_target(self, ts): correct_target_loc = self.choice_targ_loc[self.mc_label] cursor_pos = self.twoD_cursor_pos d = np.linalg.norm(cursor_pos - correct_target_loc) return d <= (self.target_radius - self.cursor_radius) def _test_enter_wrong_target(self, ts): enter_wrong_targ = False cursor_pos = self.twoD_cursor_pos for i in range(len(self.choice_targ_loc)): if abs(i - self.mc_label) > 0: target_loc = self.choice_targ_loc[i] d = np.linalg.norm(cursor_pos - target_loc) if d <= (self.target_radius - self.cursor_radius): enter_wrong_targ = True #print "WRONG TARGET: ", self.choice_targ_loc[int(self.mc_label)], target_loc, self.mc_label, cursor_pos, d, self.target_radius-self.cursor_radius return enter_wrong_targ ############################## ######## Generators ######## ############################## @staticmethod def twoD_choice_CO(nblocks=100, boundaries=(-18, 18, -12, 12), target_distance=6, ntargets=4, mc_target_angle_offset=0): theta = [] label = [] for i in range(nblocks): temp = np.arange(0, 2 * np.pi, 2 * np.pi / ntargets) temp = temp + (mc_target_angle_offset * np.pi / 180) temp2 = np.vstack((temp, np.arange(ntargets))) np.random.shuffle(temp2.T) theta = theta + list(temp2[0, :]) label = label + list(temp2[1, :]) theta = np.hstack(theta) label = np.hstack(label) x = target_distance * np.cos(theta) y = np.zeros(len(theta)) z = target_distance * np.sin(theta) mc_targ_flash = np.vstack([x, y, z]).T mc_targ_flash = mc_targ_flash[:, np.newaxis, :] mc_label = np.array(label) mc_label = mc_label[:, np.newaxis, np.newaxis] temp = np.arange(0, 2 * np.pi, 2 * np.pi / ntargets) + (mc_target_angle_offset * np.pi / 180) x = target_distance * np.cos(temp) y = np.zeros(len(temp)) z = target_distance * np.sin(temp) choice_targ = np.vstack([x, y, z]).T choice_targ = np.tile(choice_targ, (nblocks * ntargets, 1, 1)) mc_orig = np.zeros((mc_targ_flash.shape)) it = iter([ dict(mc_targ=mc_targ_flash[i, :, :], mc_label=mc_label[i, :, :], choice_targ=choice_targ[i, :, :], mc_orig=mc_orig[i, :, :]) for i in range(len(label)) ]) return it
class ApproachAvoidanceTask(Sequence, Window): ''' This is for a free-choice task with two targets (left and right) presented to choose from. The position of the targets may change along the x-axis, according to the target generator, and each target has a varying probability of reward, also according to the target generator. The code as it is written is for a joystick. Notes: want target_index to only write once per trial. if so, can make instructed trials random. else, make new state for instructed trial. ''' background = (0,0,0,1) shoulder_anchor = np.array([2., 0., -15.]) # Coordinates of shoulder anchor point on screen arm_visible = traits.Bool(True, desc='Specifies whether entire arm is displayed or just endpoint') cursor_radius = traits.Float(.5, desc="Radius of cursor") cursor_color = (.5,0,.5,1) joystick_method = traits.Float(1,desc="1: Normal velocity, 0: Position control") joystick_speed = traits.Float(20, desc="Speed of cursor") plant_type_options = plantlist.keys() plant_type = traits.OptionsList(*plantlist, bmi3d_input_options=plantlist.keys()) starting_pos = (5, 0, 5) # window_size = (1280*2, 1024) window_size = traits.Tuple((1366*2, 768), desc='window size') status = dict( #wait = dict(start_trial="target", stop=None), wait = dict(start_trial="center", stop=None), center = dict(enter_center="hold_center", timeout="timeout_penalty", stop=None), hold_center = dict(leave_early_center = "hold_penalty",hold_center_complete="target", timeout="timeout_penalty", stop=None), target = dict(enter_targetL="hold_targetL", enter_targetH = "hold_targetH", timeout="timeout_penalty", stop=None), hold_targetR = dict(leave_early_R="hold_penalty", hold_complete="targ_transition"), hold_targetL = dict(leave_early_L="hold_penalty", hold_complete="targ_transition"), targ_transition = dict(trial_complete="check_reward",trial_abort="wait", trial_incomplete="center"), check_reward = dict(avoid="reward",approach="reward_and_airpuff"), timeout_penalty = dict(timeout_penalty_end="targ_transition"), hold_penalty = dict(hold_penalty_end="targ_transition"), reward = dict(reward_end="wait"), reward_and_airpuff = dict(reward_and_airpuff_end="wait"), ) # target_color = (.5,1,.5,0) #initial state state = "wait" #create settable traits reward_time_avoid = traits.Float(.2, desc="Length of juice reward for avoid decision") reward_time_approach_min = traits.Float(.2, desc="Min length of juice for approach decision") reward_time_approach_max = traits.Float(.8, desc="Max length of juice for approach decision") target_radius = traits.Float(1.5, desc="Radius of targets in cm") block_length = traits.Float(100, desc="Number of trials per block") hold_time = traits.Float(.5, desc="Length of hold required at targets") hold_penalty_time = traits.Float(1, desc="Length of penalty time for target hold error") timeout_time = traits.Float(10, desc="Time allowed to go between targets") timeout_penalty_time = traits.Float(1, desc="Length of penalty time for timeout error") max_attempts = traits.Int(10, desc='The number of attempts at a target before\ skipping to the next one') session_length = traits.Float(0, desc="Time until task automatically stops. Length of 0 means no auto stop.") marker_num = traits.Int(14, desc="The index of the motiontracker marker to use for cursor position") arm_hide_rate = traits.Float(0.0, desc='If the arm is visible, specifies a percentage of trials where it will be hidden') target_index = 0 # Helper variable to keep track of whether trial is instructed (1 = 1 choice) or free-choice (2 = 2 choices) target_selected = 'L' # Helper variable to indicate which target was selected tries = 0 # Helper variable to keep track of the number of failed attempts at a given trial. timedout = False # Helper variable to keep track if transitioning from timeout_penalty reward_counter = 0.0 cursor_visible = False # Determines when to hide the cursor. no_data_count = 0 # Counter for number of missing data frames in a row scale_factor = 3.0 #scale factor for converting hand movement to screen movement (1cm hand movement = 3.5cm cursor movement) starting_dist = 10.0 # starting distance from center target #color_targets = np.random.randint(2) color_targets = 1 # 0: yellow low, blue high; 1: blue low, yellow high stopped_center_hold = False #keep track if center hold was released early limit2d = 1 color1 = target_colors['purple'] # approach color color2 = target_colors['lightsteelblue'] # avoid color reward_color = target_colors['green'] # color of reward bar airpuff_color = target_colors['red'] # color of airpuff bar sequence_generators = ['colored_targets_with_probabilistic_reward','block_probabilistic_reward','colored_targets_with_randomwalk_reward','randomwalk_probabilistic_reward'] def __init__(self, *args, **kwargs): super(ApproachAvoidanceTask, self).__init__(*args, **kwargs) self.cursor_visible = True # Add graphics models for the plant and targets to the window self.plant = plantlist[self.plant_type] self.plant_vis_prev = True # Add graphics models for the plant and targets to the window if hasattr(self.plant, 'graphics_models'): for model in self.plant.graphics_models: self.add_model(model) self.current_pt=np.zeros([3]) #keep track of current pt self.last_pt=np.zeros([3]) #kee ## Declare cursor #self.dtype.append(('cursor', 'f8', (3,))) if 0: #hasattr(self.arm, 'endpt_cursor'): self.cursor = self.arm.endpt_cursor else: self.cursor = Sphere(radius=self.cursor_radius, color=self.cursor_color) self.add_model(self.cursor) self.cursor.translate(*self.get_arm_endpoint(), reset=True) ## Instantiate the targets. Target 1 is center target, Target H is target with high probability of reward, Target L is target with low probability of reward. self.target1 = Sphere(radius=self.target_radius, color=self.target_color) # center target self.add_model(self.target1) self.targetR = Sphere(radius=self.target_radius, color=self.target_color) # left target self.add_model(self.targetH) self.targetL = Sphere(radius=self.target_radius, color=self.target_color) # right target self.add_model(self.targetL) ###STOPPED HERE: should define Rect target here and then adapt length during task. Also, ### be sure to change all targetH instantiations to targetR. # Initialize target location variable. self.target_location1 = np.array([0,0,0]) self.target_locationR = np.array([-self.starting_dist,0,0]) self.target_locationL = np.array([self.starting_dist,0,0]) self.target1.translate(*self.target_location1, reset=True) self.targetH.translate(*self.target_locationR, reset=True) self.targetL.translate(*self.target_locationL, reset=True) # Initialize colors for high probability and low probability target. Color will not change. self.targetH.color = self.color_targets*self.color1 + (1 - self.color_targets)*self.color2 # high is magenta if color_targets = 1, juicyorange otherwise self.targetL.color = (1 - self.color_targets)*self.color1 + self.color_targets*self.color2 #set target colors self.target1.color = (1,0,0,.5) # center target red # Initialize target location variable self.target_location = np.array([0, 0, 0]) # Declare any plant attributes which must be saved to the HDF file at the _cycle rate for attr in self.plant.hdf_attrs: self.add_dtype(*attr) def init(self): self.add_dtype('targetR', 'f8', (3,)) self.add_dtype('targetL','f8', (3,)) self.add_dtype('reward_scheduleR','f8', (1,)) self.add_dtype('reward_scheduleL','f8', (1,)) self.add_dtype('target_index', 'i', (1,)) super(ApproachAvoidanceTask, self).init() self.trial_allocation = np.zeros(1000) def _cycle(self): ''' Calls any update functions necessary and redraws screen. Runs 60x per second. ''' ## Run graphics commands to show/hide the arm if the visibility has changed if self.plant_type != 'cursor_14x14': if self.arm_visible != self.arm_vis_prev: self.arm_vis_prev = self.arm_visible self.show_object(self.arm, show=self.arm_visible) self.move_arm() #self.move_plant() ## Save plant status to HDF file plant_data = self.plant.get_data_to_save() for key in plant_data: self.task_data[key] = plant_data[key] self.update_cursor() if self.plant_type != 'cursor_14x14': self.task_data['joint_angles'] = self.get_arm_joints() super(ApproachAvoidanceTask, self)._cycle() ## Plant functions def get_cursor_location(self): # arm returns it's position as if it was anchored at the origin, so have to translate it to the correct place return self.get_arm_endpoint() def get_arm_endpoint(self): return self.plant.get_endpoint_pos() def set_arm_endpoint(self, pt, **kwargs): self.plant.set_endpoint_pos(pt, **kwargs) def set_arm_joints(self, angles): self.arm.set_intrinsic_coordinates(angles) def get_arm_joints(self): return self.arm.get_intrinsic_coordinates() def update_cursor(self): ''' Update the cursor's location and visibility status. ''' pt = self.get_cursor_location() self.update_cursor_visibility() if pt is not None: self.move_cursor(pt) def move_cursor(self, pt): ''' Move the cursor object to the specified 3D location. ''' # if not hasattr(self.arm, 'endpt_cursor'): self.cursor.translate(*pt[:3],reset=True) ## ##### HELPER AND UPDATE FUNCTIONS #### def move_arm(self): ''' Returns the 3D coordinates of the cursor. For manual control, uses joystick data. If no joystick data available, returns None''' pt = self.joystick.get() if len(pt) > 0: pt = pt[-1][0] pt[0]=1-pt[0]; #Switch L / R axes calib = [0.497,0.517] #Sometimes zero point is subject to drift this is the value of the incoming joystick when at 'rest' if self.joystick_method==0: pos = np.array([(pt[0]-calib[0]), 0, calib[1]-pt[1]]) pos[0] = pos[0]*36 pos[2] = pos[2]*24 self.current_pt = pos elif self.joystick_method==1: vel=np.array([(pt[0]-calib[0]), 0, calib[1]-pt[1]]) epsilon = 2*(10**-2) #Define epsilon to stabilize cursor movement if sum((vel)**2) > epsilon: self.current_pt=self.last_pt+self.joystick_speed*vel*(1/60) #60 Hz update rate, dt = 1/60 else: self.current_pt = self.last_pt if self.current_pt[0] < -25: self.current_pt[0] = -25 if self.current_pt[0] > 25: self.current_pt[0] = 25 if self.current_pt[-1] < -14: self.current_pt[-1] = -14 if self.current_pt[-1] > 14: self.current_pt[-1] = 14 self.set_arm_endpoint(self.current_pt) self.last_pt = self.current_pt.copy() def convert_to_cm(self, val): return val/10.0 def update_cursor_visibility(self): ''' Update cursor visible flag to hide cursor if there has been no good data for more than 3 frames in a row''' prev = self.cursor_visible if self.no_data_count < 3: self.cursor_visible = True if prev != self.cursor_visible: self.show_object(self.cursor, show=True) self.requeue() else: self.cursor_visible = False if prev != self.cursor_visible: self.show_object(self.cursor, show=False) self.requeue() def calc_n_successfultrials(self): trialendtimes = np.array([state[1] for state in self.state_log if state[0]=='check_reward']) return len(trialendtimes) def calc_n_rewards(self): rewardtimes = np.array([state[1] for state in self.state_log if state[0]=='reward']) return len(rewardtimes) def calc_trial_num(self): '''Calculates the current trial count: completed + aborted trials''' trialtimes = [state[1] for state in self.state_log if state[0] in ['wait']] return len(trialtimes)-1 def calc_targetH_num(self): '''Calculates the total number of times the high-value target was selected''' trialtimes = [state[1] for state in self.state_log if state[0] in ['hold_targetH']] return len(trialtimes) - 1 def calc_rewards_per_min(self, window): '''Calculates the Rewards/min for the most recent window of specified number of seconds in the past''' rewardtimes = np.array([state[1] for state in self.state_log if state[0]=='reward']) if (self.get_time() - self.task_start_time) < window: divideby = (self.get_time() - self.task_start_time)/sec_per_min else: divideby = window/sec_per_min return np.sum(rewardtimes >= (self.get_time() - window))/divideby def calc_success_rate(self, window): '''Calculates the rewarded trials/initiated trials for the most recent window of specified length in sec''' trialtimes = np.array([state[1] for state in self.state_log if state[0] in ['reward', 'timeout_penalty', 'hold_penalty']]) rewardtimes = np.array([state[1] for state in self.state_log if state[0]=='reward']) if len(trialtimes) == 0: return 0.0 else: return float(np.sum(rewardtimes >= (self.get_time() - window)))/np.sum(trialtimes >= (self.get_time() - window)) def update_report_stats(self): '''Function to update any relevant report stats for the task. Values are saved in self.reportstats, an ordered dictionary. Keys are strings that will be displayed as the label for the stat in the web interface, values can be numbers or strings. Called every time task state changes.''' super(ApproachAvoidanceTask, self).update_report_stats() self.reportstats['Trial #'] = self.calc_trial_num() self.reportstats['Reward/min'] = np.round(self.calc_rewards_per_min(120),decimals=2) self.reportstats['High-value target selections'] = self.calc_targetH_num() #self.reportstats['Success rate'] = str(np.round(self.calc_success_rate(120)*100.0,decimals=2)) + '%' start_time = self.state_log[0][1] rewardtimes=np.array([state[1] for state in self.state_log if state[0]=='reward']) if len(rewardtimes): rt = rewardtimes[-1]-start_time else: rt= np.float64("0.0") sec = str(np.int(np.mod(rt,60))) if len(sec) < 2: sec = '0'+sec self.reportstats['Time Of Last Reward'] = str(np.int(np.floor(rt/60))) + ':' + sec #### TEST FUNCTIONS #### def _test_enter_center(self, ts): #return true if the distance between center of cursor and target is smaller than the cursor radius d = np.sqrt((self.cursor.xfm.move[0]-self.target_location1[0])**2 + (self.cursor.xfm.move[1]-self.target_location1[1])**2 + (self.cursor.xfm.move[2]-self.target_location1[2])**2) #print 'TARGET SELECTED', self.target_selected return d <= self.target_radius - self.cursor_radius def _test_enter_targetL(self, ts): if self.target_index == 1 and self.LH_target_on[0]==0: #return false if instructed trial and this target is not on return False else: #return true if the distance between center of cursor and target is smaller than the cursor radius d = np.sqrt((self.cursor.xfm.move[0]-self.target_locationL[0])**2 + (self.cursor.xfm.move[1]-self.target_locationL[1])**2 + (self.cursor.xfm.move[2]-self.target_locationL[2])**2) self.target_selected = 'L' #print 'TARGET SELECTED', self.target_selected return d <= self.target_radius - self.cursor_radius def _test_enter_targetH(self, ts): if self.target_index ==1 and self.LH_target_on[1]==0: return False else: #return true if the distance between center of cursor and target is smaller than the cursor radius d = np.sqrt((self.cursor.xfm.move[0]-self.target_locationH[0])**2 + (self.cursor.xfm.move[1]-self.target_locationH[1])**2 + (self.cursor.xfm.move[2]-self.target_locationH[2])**2) self.target_selected = 'H' #print 'TARGET SELECTED', self.target_selected return d <= self.target_radius - self.cursor_radius def _test_leave_early_center(self, ts): # return true if cursor moves outside the exit radius (gives a bit of slack around the edge of target once cursor is inside) d = np.sqrt((self.cursor.xfm.move[0]-self.target_location1[0])**2 + (self.cursor.xfm.move[1]-self.target_location1[1])**2 + (self.cursor.xfm.move[2]-self.target_location1[2])**2) rad = self.target_radius - self.cursor_radius return d > rad def _test_leave_early_L(self, ts): # return true if cursor moves outside the exit radius (gives a bit of slack around the edge of target once cursor is inside) d = np.sqrt((self.cursor.xfm.move[0]-self.target_locationL[0])**2 + (self.cursor.xfm.move[1]-self.target_locationL[1])**2 + (self.cursor.xfm.move[2]-self.target_locationL[2])**2) rad = self.target_radius - self.cursor_radius return d > rad def _test_leave_early_H(self, ts): # return true if cursor moves outside the exit radius (gives a bit of slack around the edge of target once cursor is inside) d = np.sqrt((self.cursor.xfm.move[0]-self.target_locationH[0])**2 + (self.cursor.xfm.move[1]-self.target_locationH[1])**2 + (self.cursor.xfm.move[2]-self.target_locationH[2])**2) rad = self.target_radius - self.cursor_radius return d > rad def _test_hold_center_complete(self, ts): return ts>=self.hold_time def _test_hold_complete(self, ts): return ts>=self.hold_time def _test_timeout(self, ts): return ts>self.timeout_time def _test_timeout_penalty_end(self, ts): return ts>self.timeout_penalty_time def _test_hold_penalty_end(self, ts): return ts>self.hold_penalty_time def _test_trial_complete(self, ts): #return self.target_index==self.chain_length-1 return not self.timedout def _test_trial_incomplete(self, ts): return (not self._test_trial_complete(ts)) and (self.tries<self.max_attempts) def _test_trial_abort(self, ts): return (not self._test_trial_complete(ts)) and (self.tries==self.max_attempts) def _test_yes_reward(self,ts): if self.target_selected == 'H': #reward_assigned = self.targs[0,1] reward_assigned = self.rewardH else: #reward_assigned = self.targs[1,1] reward_assigned = self.rewardL if self.reward_SmallLarge==1: self.reward_time = reward_assigned*self.reward_time_large + (1 - reward_assigned)*self.reward_time_small # update reward time if using Small/large schedule reward_assigned = 1 # always rewarded return bool(reward_assigned) def _test_no_reward(self,ts): if self.target_selected == 'H': #reward_assigned = self.targs[0,1] reward_assigned = self.rewardH else: #reward_assigned = self.targs[1,1] reward_assigned = self.rewardL if self.reward_SmallLarge==True: self.reward_time = reward_assigned*self.reward_time_large + (1 - reward_assigned)*self.reward_time_small # update reward time if using Small/large schedule reward_assigned = 1 # always rewarded return bool(not reward_assigned) def _test_reward_end(self, ts): time_ended = (ts > self.reward_time) self.reward_counter = self.reward_counter + 1 return time_ended def _test_stop(self, ts): if self.session_length > 0 and (time.time() - self.task_start_time) > self.session_length: self.end_task() return self.stop #### STATE FUNCTIONS #### def show_object(self, obj, show=False): ''' Show or hide an object ''' if show: obj.attach() else: obj.detach() self.requeue() def _start_wait(self): super(ApproachAvoidanceTask, self)._start_wait() self.tries = 0 self.target_index = 0 # indicator for instructed or free-choice trial #hide targets self.show_object(self.target1, False) self.show_object(self.targetL, False) self.show_object(self.targetH, False) #get target positions and reward assignments for this trial self.targs = self.next_trial if self.plant_type != 'cursor_14x14' and np.random.rand() < self.arm_hide_rate: self.arm_visible = False else: self.arm_visible = True #self.chain_length = self.targs.shape[0] #Number of sequential targets in a single trial #self.task_data['target'] = self.target_locationH.copy() assign_reward = np.random.randint(0,100,size=2) self.rewardH = np.greater(self.targs[0,1],assign_reward[0]) #print 'high value target reward prob', self.targs[0,1] self.rewardL = np.greater(self.targs[1,1],assign_reward[1]) #print 'TARGET GENERATOR', self.targs[0,] self.task_data['targetH'] = self.targs[0,].copy() self.task_data['reward_scheduleH'] = self.rewardH.copy() self.task_data['targetL'] = self.targs[1,].copy() self.task_data['reward_scheduleL'] = self.rewardL.copy() self.requeue() def _start_center(self): #self.target_index += 1 self.show_object(self.target1, True) self.show_object(self.cursor, True) # Third argument in self.targs determines if target is on left or right # First argument in self.targs determines if location is offset to farther distances offsetH = (2*self.targs[0,2] - 1)*(self.starting_dist + self.location_offset_allowed*self.targs[0,0]*4.0) moveH = np.array([offsetH,0,0]) offsetL = (2*self.targs[1,2] - 1)*(self.starting_dist + self.location_offset_allowed*self.targs[1,0]*4.0) moveL = np.array([offsetL,0,0]) self.targetL.translate(*moveL, reset=True) #self.targetL.move_to_position(*moveL, reset=True) ##self.targetL.translate(*self.targs[self.target_index], reset=True) self.show_object(self.targetL, True) self.target_locationL = self.targetL.xfm.move self.targetH.translate(*moveH, reset=True) #self.targetR.move_to_position(*moveR, reset=True) ##self.targetR.translate(*self.targs[self.target_index], reset=True) self.show_object(self.targetH, True) self.target_locationH = self.targetH.xfm.move # Insert instructed trials within free choice trials if self.trial_allocation[self.calc_trial_num()] == 1: #if (self.calc_trial_num() % 10) < (self.percentage_instructed_trials/10): self.target_index = 1 # instructed trial leftright_coinflip = np.random.randint(0,2) if leftright_coinflip == 0: self.show_object(self.targetL, False) self.LH_target_on = (0, 1) else: self.show_object(self.targetH, False) self.LR_coinflip = 0 self.LH_target_on = (1, 0) else: self.target_index = 2 # free-choice trial self.cursor_visible = True self.task_data['target_index'] = self.target_index self.requeue() def _start_target(self): #self.target_index += 1 #move targets to current location and set location attribute. Target1 (center target) position does not change. self.show_object(self.target1, False) #self.target_location1 = self.target1.xfm.move self.show_object(self.cursor, True) self.update_cursor() self.requeue() def _start_hold_center(self): self.show_object(self.target1, True) self.timedout = False self.requeue() def _start_hold_targetL(self): #make next target visible unless this is the final target in the trial #if 1 < self.chain_length: #self.targetL.translate(*self.targs[self.target_index+1], reset=True) # self.show_object(self.targetL, True) # self.requeue() self.show_object(self.targetL, True) self.timedout = False self.requeue() def _start_hold_targetH(self): #make next target visible unless this is the final target in the trial #if 1 < self.chain_length: #self.targetR.translate(*self.targs[self.target_index+1], reset=True) # self.show_object(self.targetR, True) # self.requeue() self.show_object(self.targetH, True) self.timedout = False self.requeue() def _end_hold_center(self): self.target1.radius = 0.7*self.target_radius # color target green def _end_hold_targetL(self): self.targetL.color = (0,1,0,0.5) # color target green def _end_hold_targetH(self): self.targetH.color = (0,1,0,0.5) # color target green def _start_hold_penalty(self): #hide targets self.show_object(self.target1, False) self.show_object(self.targetL, False) self.show_object(self.targetH, False) self.timedout = True self.requeue() self.tries += 1 #self.target_index = -1 def _start_timeout_penalty(self): #hide targets self.show_object(self.target1, False) self.show_object(self.targetL, False) self.show_object(self.targetH, False) self.timedout = True self.requeue() self.tries += 1 #self.target_index = -1 def _start_targ_transition(self): #hide targets self.show_object(self.target1, False) self.show_object(self.targetL, False) self.show_object(self.targetH, False) self.requeue() def _start_check_reward(self): #hide targets self.show_object(self.target1, False) self.show_object(self.targetL, False) self.show_object(self.targetH, False) self.requeue() def _start_reward(self): #super(ApproachAvoidanceTask, self)._start_reward() if self.target_selected == 'L': self.show_object(self.targetL, True) #reward_assigned = self.targs[1,1] else: self.show_object(self.targetH, True) #reward_assigned = self.targs[0,1] #self.reward_counter = self.reward_counter + float(reward_assigned) self.requeue() @staticmethod def colored_targets_with_probabilistic_reward(length=1000, boundaries=(-18,18,-10,10,-15,15),reward_high_prob=80,reward_low_prob=40): """ Generator should return array of ntrials x 2 x 3. The second dimension is for each target. For example, first is the target with high probability of reward, and the second entry is for the target with low probability of reward. The third dimension holds three variables indicating position offset (yes/no), reward probability (fixed in this case), and location (binary returned where the ouput indicates either left or right). UPDATE: CHANGED SO THAT THE SECOND DIMENSION CARRIES THE REWARD PROBABILITY RATHER THAN THE REWARD SCHEDULE """ position_offsetH = np.random.randint(2,size=(1,length)) position_offsetL = np.random.randint(2,size=(1,length)) location_int = np.random.randint(2,size=(1,length)) # coin flips for reward schedules, want this to be elementwise comparison #assign_rewardH = np.random.randint(0,100,size=(1,length)) #assign_rewardL = np.random.randint(0,100,size=(1,length)) high_prob = reward_high_prob*np.ones((1,length)) low_prob = reward_low_prob*np.ones((1,length)) #reward_high = np.greater(high_prob,assign_rewardH) #reward_low = np.greater(low_prob,assign_rewardL) pairs = np.zeros([length,2,3]) pairs[:,0,0] = position_offsetH #pairs[:,0,1] = reward_high pairs[:,0,1] = high_prob pairs[:,0,2] = location_int pairs[:,1,0] = position_offsetL #pairs[:,1,1] = reward_low pairs[:,1,1] = low_prob pairs[:,1,2] = 1 - location_int return pairs @staticmethod def block_probabilistic_reward(length=1000, boundaries=(-18,18,-10,10,-15,15),reward_high_prob=80,reward_low_prob=40): pairs = colored_targets_with_probabilistic_reward(length=length, boundaries=boundaries,reward_high_prob=reward_high_prob,reward_low_prob=reward_low_prob) return pairs @staticmethod def colored_targets_with_randomwalk_reward(length=1000,reward_high_prob=80,reward_low_prob=40,reward_high_span = 20, reward_low_span = 20,step_size_mean = 0, step_size_var = 1): """ Generator should return array of ntrials x 2 x 3. The second dimension is for each target. For example, first is the target with high probability of reward, and the second entry is for the target with low probability of reward. The third dimension holds three variables indicating position offset (yes/no), reward probability, and location (binary returned where the ouput indicates either left or right). The variables reward_high_span and reward_low_span indicate the width of the range that the high or low reward probability are allowed to span respectively, e.g. if reward_high_prob is 80 and reward_high_span is 20, then the reward probability for the high value target will be bounded between 60 and 100 percent. """ position_offsetH = np.random.randint(2,size=(1,length)) position_offsetL = np.random.randint(2,size=(1,length)) location_int = np.random.randint(2,size=(1,length)) # define variables for increments: amount of increment and in which direction (i.e. increasing or decreasing) assign_rewardH = np.random.randn(1,length) assign_rewardL = np.random.randn(1,length) assign_rewardH_direction = np.random.randn(1,length) assign_rewardL_direction = np.random.randn(1,length) r_0_high = reward_high_prob r_0_low = reward_low_prob r_lowerbound_high = r_0_high - (reward_high_span/2) r_upperbound_high = r_0_high + (reward_high_span/2) r_lowerbound_low = r_0_low - (reward_low_span/2) r_upperbound_low = r_0_low + (reward_low_span/2) reward_high = np.zeros(length) reward_low = np.zeros(length) reward_high[0] = r_0_high reward_low[0] = r_0_low eps_high = assign_rewardH*step_size_mean + [2*(assign_rewardH_direction > 0) - 1]*step_size_var eps_low = assign_rewardL*step_size_mean + [2*(assign_rewardL_direction > 0) - 1]*step_size_var eps_high = eps_high.ravel() eps_low = eps_low.ravel() for i in range(1,length): ''' assign_rewardH_direction = np.random.randn(1) assign_rewardL_direction = np.random.randn(1) assign_rewardH = np.random.randn(1) if assign_rewardH_direction[i-1,] < 0: eps_high = step_size_mean*assign_rewardH[i-1] - step_size_var else: eps_high = step_size_mean*assign_rewardH[i-1] + step_size_var if assign_rewardL_direction[i] < 0: eps_low = step_size_mean*assign_rewardL[i] - step_size_var else: eps_low = step_size_mean*assign_rewardL[i] + step_size_var ''' reward_high[i] = reward_high[i-1] + eps_high[i-1] reward_low[i] = reward_low[i-1] + eps_low[i-1] reward_high[i] = (r_lowerbound_high < reward_high[i] < r_upperbound_high)*reward_high[i] + (r_lowerbound_high > reward_high[i])*(r_lowerbound_high+ eps_high[i-1]) + (r_upperbound_high < reward_high[i])*(r_upperbound_high - eps_high[i-1]) reward_low[i] = (r_lowerbound_low < reward_low[i] < r_upperbound_low)*reward_low[i] + (r_lowerbound_low > reward_low[i])*(r_lowerbound_low+ eps_low[i-1]) + (r_upperbound_low < reward_low[i])*(r_upperbound_low - eps_low[i-1]) pairs = np.zeros([length,2,3]) pairs[:,0,0] = position_offsetH pairs[:,0,1] = reward_high pairs[:,0,2] = location_int pairs[:,1,0] = position_offsetL pairs[:,1,1] = reward_low pairs[:,1,2] = 1 - location_int return pairs @staticmethod def randomwalk_probabilistic_reward(length=1000,reward_high_prob=80,reward_low_prob=40,reward_high_span = 20, reward_low_span = 20,step_size_mean = 0, step_size_var = 1): pairs = colored_targets_with_randomwalk_reward(length=length,reward_high_prob=reward_high_prob,reward_low_prob=reward_low_prob,reward_high_span = reward_high_span, reward_low_span = reward_low_span,step_size_mean = step_size_mean, step_size_var = step_size_var) return pairs
class BMIControlMulti(BMILoop, LinearlyDecreasingAssist, manualcontrolmultitasks.ManualControlMulti): ''' Target capture task with cursor position controlled by BMI output. Cursor movement can be assisted toward target by setting assist_level > 0. ''' background = (.5, .5, .5, 1) # Set the screen background color to grey reset = traits.Int( 0, desc='reset the decoder state to the starting configuration') ordered_traits = [ 'session_length', 'assist_level', 'assist_level_time', 'reward_time', 'timeout_time', 'timeout_penalty_time' ] exclude_parent_traits = ['marker_count', 'marker_num', 'goal_cache_block'] static_states = [] # states in which the decoder is not run hidden_traits = [ 'arm_hide_rate', 'arm_visible', 'hold_penalty_time', 'rand_start', 'reset', 'target_radius', 'window_size' ] is_bmi_seed = False cursor_color_adjust = traits.OptionsList( *target_colors.keys(), bmi3d_input_options=target_colors.keys()) def __init__(self, *args, **kwargs): super(BMIControlMulti, self).__init__(*args, **kwargs) def init(self, *args, **kwargs): sph = self.plant.graphics_models[0] sph.color = target_colors[self.cursor_color_adjust] sph.radius = self.cursor_radius self.plant.cursor_radius = self.cursor_radius self.plant.cursor.radius = self.cursor_radius super(BMIControlMulti, self).init(*args, **kwargs) def move_effector(self, *args, **kwargs): pass def create_assister(self): # Create the appropriate type of assister object start_level, end_level = self.assist_level kwargs = dict(decoder_binlen=self.decoder.binlen, target_radius=self.target_radius) if hasattr(self, 'assist_speed'): kwargs['assist_speed'] = self.assist_speed from db import namelist if self.decoder.ssm == namelist.endpt_2D_state_space and isinstance( self.decoder, ppfdecoder.PPFDecoder): self.assister = OFCEndpointAssister() elif self.decoder.ssm == namelist.endpt_2D_state_space: self.assister = SimpleEndpointAssister(**kwargs) elif (self.decoder.ssm == namelist.tentacle_2D_state_space) or ( self.decoder.ssm == namelist.joint_2D_state_space): # kin_chain = self.plant.kin_chain # A, B, W = self.decoder.ssm.get_ssm_matrices(update_rate=self.decoder.binlen) # Q = np.mat(np.diag(np.hstack([kin_chain.link_lengths, np.zeros_like(kin_chain.link_lengths), 0]))) # R = 10000*np.mat(np.eye(B.shape[1])) # fb_ctrl = LQRController(A, B, Q, R) # self.assister = FeedbackControllerAssist(fb_ctrl, style='additive') self.assister = TentacleAssist(ssm=self.decoder.ssm, kin_chain=self.plant.kin_chain, update_rate=self.decoder.binlen) else: raise NotImplementedError( "Cannot assist for this type of statespace: %r" % self.decoder.ssm) print self.assister def create_goal_calculator(self): from db import namelist if self.decoder.ssm == namelist.endpt_2D_state_space: self.goal_calculator = goal_calculators.ZeroVelocityGoal( self.decoder.ssm) elif self.decoder.ssm == namelist.joint_2D_state_space: self.goal_calculator = goal_calculators.PlanarMultiLinkJointGoal( self.decoder.ssm, self.plant.base_loc, self.plant.kin_chain, multiproc=False, init_resp=None) elif self.decoder.ssm == namelist.tentacle_2D_state_space: shoulder_anchor = self.plant.base_loc chain = self.plant.kin_chain q_start = self.plant.get_intrinsic_coordinates() x_init = np.hstack([q_start, np.zeros_like(q_start), 1]) x_init = np.mat(x_init).reshape(-1, 1) cached = True if cached: goal_calc_class = goal_calculators.PlanarMultiLinkJointGoalCached multiproc = False else: goal_calc_class = goal_calculators.PlanarMultiLinkJointGoal multiproc = True self.goal_calculator = goal_calc_class( namelist.tentacle_2D_state_space, shoulder_anchor, chain, multiproc=multiproc, init_resp=x_init) else: raise ValueError("Unrecognized decoder state space!") def get_target_BMI_state(self, *args): ''' Run the goal calculator to determine the target state of the task ''' if isinstance(self.goal_calculator, goal_calculators.PlanarMultiLinkJointGoalCached): task_eps = np.inf else: task_eps = 0.5 ik_eps = task_eps / 10 data, solution_updated = self.goal_calculator( self.target_location, verbose=False, n_particles=500, eps=ik_eps, n_iter=10, q_start=self.plant.get_intrinsic_coordinates()) target_state, error = data if isinstance(self.goal_calculator, goal_calculators.PlanarMultiLinkJointGoal ) and error > task_eps and solution_updated: self.goal_calculator.reset() return np.array(target_state).reshape(-1, 1) def _end_timeout_penalty(self): if self.reset: self.decoder.filt.state.mean = self.init_decoder_mean self.hdf.sendMsg("reset") def move_effector(self): pass
class TargetCapture(Sequence): ''' This is a generic cued target capture skeleton, to form as a common ancestor to the most common type of motor control task. ''' status = dict(wait=dict(start_trial="target"), target=dict(enter_target="hold", timeout="timeout_penalty"), hold=dict(leave_target="hold_penalty", hold_complete="delay"), delay=dict(leave_target="delay_penalty", delay_complete="targ_transition"), targ_transition=dict(trial_complete="reward", trial_abort="wait", trial_incomplete="target"), timeout_penalty=dict(timeout_penalty_end="targ_transition", end_state=True), hold_penalty=dict(hold_penalty_end="targ_transition", end_state=True), delay_penalty=dict(delay_penalty_end="targ_transition", end_state=True), reward=dict(reward_end="wait", stoppable=False, end_state=True)) # initial state state = "wait" target_index = -1 # Helper variable to keep track of which target to display within a trial tries = 0 # Helper variable to keep track of the number of failed attempts at a given trial. sequence_generators = [] reward_time = traits.Float(.5, desc="Length of reward dispensation") hold_time = traits.Float( .2, desc="Length of hold required at targets before next target appears") hold_penalty_time = traits.Float( 1, desc="Length of penalty time for target hold error") delay_time = traits.Float( 0, desc= "Length of time after a hold while the next target is on before the go cue" ) delay_penalty_time = traits.Float( 1, desc="Length of penalty time for delay error") timeout_time = traits.Float(10, desc="Time allowed to go between targets") timeout_penalty_time = traits.Float( 1, desc="Length of penalty time for timeout error") max_attempts = traits.Int(10, desc='The number of attempts at a target before\ skipping to the next one') def init(self): self.trial_dtype = np.dtype([('trial', 'u4'), ('index', 'u4'), ('target', 'f8', (3, ))]) super().init() def _start_wait(self): # Call parent method to draw the next target capture sequence from the generator super()._start_wait() # number of times this sequence of targets has been attempted self.tries = 0 # index of current target presented to subject self.target_index = -1 # number of targets to be acquired in this trial self.chain_length = len(self.targs) def _parse_next_trial(self): '''Check that the generator has the required data''' self.gen_indices, self.targs = self.next_trial # TODO error checking # Update the data sinks with trial information self.trial_record['trial'] = self.calc_trial_num() for i in range(len(self.gen_indices)): self.trial_record['index'] = self.gen_indices[i] self.trial_record['target'] = self.targs[i] self.sinks.send("trials", self.trial_record) def _start_target(self): self.target_index += 1 def _end_target(self): '''Nothing generic to do.''' pass def _start_hold(self): '''Nothing generic to do.''' pass def _while_hold(self): '''Nothing generic to do.''' pass def _end_hold(self): '''Nothing generic to do.''' pass def _start_delay(self): '''Nothing generic to do.''' pass def _while_delay(self): '''Nothing generic to do.''' pass def _end_delay(self): '''Nothing generic to do.''' pass def _start_targ_transition(self): '''Nothing generic to do. Child class might show/hide targets''' pass def _while_targ_transition(self): '''Nothing generic to do.''' pass def _end_targ_transition(self): '''Nothing generic to do.''' pass def _increment_tries(self): self.tries += 1 self.target_index = -1 if self.tries < self.max_attempts: self.trial_record['trial'] += 1 for i in range(len(self.gen_indices)): self.trial_record['index'] = self.gen_indices[i] self.trial_record['target'] = self.targs[i] self.sinks.send("trials", self.trial_record) def _start_timeout_penalty(self): self._increment_tries() def _while_timeout_penalty(self): '''Nothing generic to do.''' pass def _end_timeout_penalty(self): '''Nothing generic to do.''' pass def _start_hold_penalty(self): self._increment_tries() def _while_hold_penalty(self): '''Nothing generic to do.''' pass def _end_hold_penalty(self): '''Nothing generic to do.''' pass def _start_delay_penalty(self): self._increment_tries() def _while_delay_penalty(self): '''Nothing generic to do.''' pass def _end_delay_penalty(self): '''Nothing generic to do.''' pass def _start_reward(self): '''Nothing generic to do.''' pass def _while_reward(self): '''Nothing generic to do.''' pass def _end_reward(self): '''Nothing generic to do.''' pass ################## State transition test functions ################## def _test_start_trial(self, time_in_state): '''Start next trial automatically. You may want this to instead be - a random delay - require some initiation action ''' return True def _test_timeout(self, time_in_state): return time_in_state > self.timeout_time def _test_hold_complete(self, time_in_state): ''' Test whether the target is held long enough to declare the trial a success Possible options - Target held for the minimum requred time (implemented here) - Sensorized object moved by a certain amount - Sensorized object moved to the required location - Manually triggered by experimenter ''' return time_in_state > self.hold_time def _test_delay_complete(self, time_in_state): ''' Test whether the delay period, when the cursor must stay in place while another target is being presented, is over. There should be no delay on the last target in a chain. ''' return self.target_index + 1 == self.chain_length or time_in_state > self.delay_time def _test_trial_complete(self, time_in_state): '''Test whether all targets in sequence have been acquired''' return self.target_index == self.chain_length - 1 def _test_trial_abort(self, time_in_state): '''Test whether the target capture sequence should just be skipped due to too many failures''' return (not self._test_trial_complete(time_in_state)) and ( self.tries == self.max_attempts) def _test_trial_incomplete(self, time_in_state): '''Test whether the target capture sequence needs to be restarted''' return (not self._test_trial_complete(time_in_state)) and ( self.tries < self.max_attempts) and not self.pause def _test_timeout_penalty_end(self, time_in_state): return time_in_state > self.timeout_penalty_time def _test_hold_penalty_end(self, time_in_state): return time_in_state > self.hold_penalty_time def _test_delay_penalty_end(self, time_in_state): return time_in_state > self.delay_penalty_time def _test_reward_end(self, time_in_state): return time_in_state > self.reward_time def _test_enter_target(self, time_in_state): '''This function is task-specific and not much can be done generically''' return False def _test_leave_target(self, time_in_state): '''This function is task-specific and not much can be done generically''' return False def update_report_stats(self): ''' see experiment.Experiment.update_report_stats for docs ''' super().update_report_stats() self.reportstats['Trial #'] = self.calc_trial_num() self.reportstats['Reward/min'] = np.round(self.calc_events_per_min( 'reward', 120.), decimals=2)