def make_pc(): contrasts = [1., 0.25, 0.125, 0.0625, 0.0] len_block = [90] pos = [-35] * int(len_block[0] / 2) + [35] * int(len_block[0] / 2) cont = np.sort(contrasts * 10)[::-1][:-5].tolist() pc = np.array([pos, cont + cont]).T np.random.shuffle(pc) # only shuffles on the first dimension prob_left = 0.8 if blocks.draw_position([-35, 35], 0.5) < 0 else 0.2 while len(pc) < 2001: len_block.append(blocks.get_block_len(60, min_=20, max_=100)) for x in range(len_block[-1]): p = blocks.draw_position([-35, 35], prob_left) c = misc.draw_contrast(contrasts) pc = np.append(pc, np.array([[p, c]]), axis=0) # do this in PC space prob_left = np.round(np.abs(1 - prob_left), 1) return pc, len_block
def next_trial(self): # First trial exception if self.trial_num == 0: self.trial_num += 1 self.block_num += 1 self.block_rew_num += 1 self.block_trial_num += 1 # Send next trial info to Bonsai bonsai.send_current_trial_info(self) return self.data_file = str(self.data_file) # Increment trial number self.trial_num += 1 # Update quiescent period self.quiescent_period = self.quiescent_period_base + misc.texp() # Update stimulus phase self.stim_phase = random.uniform(0, math.pi) # Update block self = blocks.update_block_params(self) # Update stim probability left self.stim_probability_left = blocks.update_probability_left(self) # Update position self.position = blocks.draw_position(self.position_set, self.stim_probability_left) # Update contrast self.contrast = misc.draw_contrast( self.contrast_set, prob_type=self.contrast_set_probability_type) self.signed_contrast = self.contrast * np.sign(self.position) # Update state machine events self.event_error = self.threshold_events_dict[self.position] self.event_correct = self.threshold_events_dict[ -self.position] # need to check this self.rewarded = blocks_rew.draw_reward( self.position, self.rew_set, self.rew_probability_left ) #### @alejandro This determines whether an event is rewarded # Reset outcome variables for next trial self.trial_correct_rewarded = None #### self.trial_correct_unrewarded = None #### # Open the data file to append the next trial self.data_file = open(self.data_file_path, 'a') # Send next trial info to Bonsai bonsai.send_current_trial_info(self) #Update opto_variables self.opto_trial = optobpod.opto_switch(self.opto_on, self.opto_freq)
def next_trial(self): # First trial exception if self.trial_num == 0: self.trial_num += 1 self.block_num += 1 self.block_trial_num += 1 # Send next trial info to Bonsai bonsai.send_current_trial_info(self) return self.data_file = str(self.data_file) # Increment trial number self.trial_num += 1 # Update quiescent period self.quiescent_period = self.quiescent_period_base + misc.texp() # Update stimulus phase self.stim_phase = random.uniform(0, math.pi) # Update block self = blocks.update_block_params(self) # Update stim probability left + buffer self.stim_probability_left = blocks.update_probability_left(self) self.stim_probability_left_buffer.append(self.stim_probability_left) # Update position + buffer self.position = blocks.draw_position(self.position_set, self.stim_probability_left) self.position_buffer.append(self.position) # Update contrast + buffer self.contrast = misc.draw_contrast( self.contrast_set, prob_type=self.contrast_set_probability_type) self.contrast_buffer.append(self.contrast) # Update signed_contrast + buffer (AFTER position update) self.signed_contrast = self.contrast * np.sign(self.position) self.signed_contrast_buffer.append(self.signed_contrast) # Update state machine events self.event_error = self.threshold_events_dict[self.position] self.event_reward = self.threshold_events_dict[-self.position] # Reset outcome variables for next trial self.trial_correct = None # Open the data file to append the next trial self.data_file = open(self.data_file_path, 'a') # Send next trial info to Bonsai bonsai.send_current_trial_info(self)
def __init__(self, sph): # Constants from settings self.init_datetime = parser.parse(sph.PYBPOD_SESSION) self.task_protocol = sph.PYBPOD_PROTOCOL self.elapsed_time = 0 self.data_file_path = sph.DATA_FILE_PATH self.data_file = open(self.data_file_path, 'a') self.position_set = sph.STIM_POSITIONS self.contrast_set = sph.CONTRAST_SET self.contrast_set_probability_type = sph.CONTRAST_SET_PROBABILITY_TYPE self.repeat_on_error = sph.REPEAT_ON_ERROR self.threshold_events_dict = sph.ROTARY_ENCODER.THRESHOLD_EVENTS self.quiescent_period_base = sph.QUIESCENT_PERIOD self.quiescent_period = self.quiescent_period_base + misc.texp() self.response_window = sph.RESPONSE_WINDOW self.interactive_delay = sph.INTERACTIVE_DELAY self.iti_error = sph.ITI_ERROR self.iti_correct_target = sph.ITI_CORRECT self.osc_client = sph.OSC_CLIENT self.stim_freq = sph.STIM_FREQ self.stim_angle = sph.STIM_ANGLE self.stim_gain = sph.STIM_GAIN self.stim_sigma = sph.STIM_SIGMA self.out_tone = sph.OUT_TONE self.out_noise = sph.OUT_NOISE self.poop_count = sph.POOP_COUNT ####################################################################### ##########################Opto Settings################################ ####################################################################### self.opto_on = sph.OPTO_ON self.opto_freq = sph.OPTO_FREQ self.opto_pulse_length = sph.OPTO_PULSE_LENGTH self.opto_pulse_freq = sph.OPTO_PULSE_FREQ self.opto_trial = optobpod.opto_switch( self.opto_on, self.opto_freq) #Determines whether this trial is an opto trial ####################################################################### ####################################################################### # Reward amount self.reward_amount = sph.REWARD_AMOUNT self.reward_valve_time = sph.REWARD_VALVE_TIME self.iti_correct = self.iti_correct_target - self.reward_valve_time # Initialize parameters that may change every trial self.trial_num = 0 self.stim_phase = 0. self.block_num = 0 self.block_trial_num = 0 self.block_len_factor = sph.BLOCK_LEN_FACTOR self.block_len_min = sph.BLOCK_LEN_MIN self.block_len_max = sph.BLOCK_LEN_MAX self.block_probability_set = sph.BLOCK_PROBABILITY_SET self.block_len = blocks.init_block_len(self) ##########################Rewward blocks ############################# self.rew_set = [1, 0] #### @alejandro self.rew_prob_set = sph.BLOCK_REW_PROBABILITY_SET #### @alejandro self.block_rew_num = 0 self.block_rew_len_factor = sph.BLOCK_REW_LEN_FACTOR self.block_rew_len_min = sph.BLOCK_REW_LEN_MIN self.block_reW_len_max = sph.BLOCK_REW_LEN_MAX self.block_rew_probability_set = sph.BLOCK_REW_PROBABILITY_SET self.block_len = blocks_rew.init_block_len(self) ####################################################################### # Position self.stim_probability_left = blocks.init_probability_left(self) self.position = blocks.draw_position(self.position_set, self.stim_probability_left) # Contrast self.contrast = misc.draw_contrast(self.contrast_set) self.signed_contrast = self.contrast * np.sign(self.position) # RE event names self.event_error = self.threshold_events_dict[self.position] self.event_correct = self.threshold_events_dict[ -self.position] #need to change this self.movement_left = ( self.threshold_events_dict[sph.QUIESCENCE_THRESHOLDS[0]]) self.movement_right = ( self.threshold_events_dict[sph.QUIESCENCE_THRESHOLDS[1]]) self.response_time_buffer = [] # Rewarded self.rew_probability_left = blocks_rew.init_rew_probability_left( self) #### @alejandro self.rewarded = blocks_rew.draw_reward(self.position, self.rew_set, self.rew_probability_left) #### #@alejandro This determines whether an event is rewarded # Outcome related parmeters self.trial_correct_rewarded = None #### @alejandro self.trial_correct_unrewarded = None #### @alejandro self.ntrials_correct_unrewarded = 0 #### @alejandro self.ntrials_correct_rewarded = 0 #### @alejandro self.water_delivered = 0
def __init__(self, sph): # Constants from settings self.init_datetime = parser.parse(sph.PYBPOD_SESSION) self.task_protocol = sph.PYBPOD_PROTOCOL self.data_file_path = sph.DATA_FILE_PATH self.data_file = open(self.data_file_path, 'a') self.position_set = sph.STIM_POSITIONS self.contrast_set = sph.CONTRAST_SET self.contrast_set_probability_type = sph.CONTRAST_SET_PROBABILITY_TYPE self.repeat_on_error = sph.REPEAT_ON_ERROR self.threshold_events_dict = sph.ROTARY_ENCODER.THRESHOLD_EVENTS self.quiescent_period_base = sph.QUIESCENT_PERIOD self.quiescent_period = self.quiescent_period_base + misc.texp() self.response_window = sph.RESPONSE_WINDOW self.interactive_delay = sph.INTERACTIVE_DELAY self.iti_error = sph.ITI_ERROR self.iti_correct_target = sph.ITI_CORRECT self.osc_client = sph.OSC_CLIENT self.stim_freq = sph.STIM_FREQ self.stim_angle = sph.STIM_ANGLE self.stim_gain = sph.STIM_GAIN self.stim_sigma = sph.STIM_SIGMA self.out_tone = sph.OUT_TONE self.out_noise = sph.OUT_NOISE self.poop_count = sph.POOP_COUNT self.save_ambient_data = sph.RECORD_AMBIENT_SENSOR_DATA self.as_data = { 'Temperature_C': 0, 'AirPressure_mb': 0, 'RelativeHumidity': 0 } # Reward amount self.reward_amount = sph.REWARD_AMOUNT self.reward_valve_time = sph.REWARD_VALVE_TIME self.iti_correct = self.iti_correct_target - self.reward_valve_time # Initialize parameters that may change every trial self.trial_num = 0 self.stim_phase = 0. self.block_num = 0 self.block_trial_num = 0 self.block_len_factor = sph.BLOCK_LEN_FACTOR self.block_len_min = sph.BLOCK_LEN_MIN self.block_len_max = sph.BLOCK_LEN_MAX self.block_probability_set = sph.BLOCK_PROBABILITY_SET self.block_init_5050 = sph.BLOCK_INIT_5050 self.block_len = blocks.init_block_len(self) # Position self.stim_probability_left = blocks.init_probability_left(self) self.stim_probability_left_buffer = [self.stim_probability_left] self.position = blocks.draw_position(self.position_set, self.stim_probability_left) self.position_buffer = [self.position] # Contrast self.contrast = misc.draw_contrast(self.contrast_set) self.contrast_buffer = [self.contrast] self.signed_contrast = self.contrast * np.sign(self.position) self.signed_contrast_buffer = [self.signed_contrast] # RE event names self.event_error = self.threshold_events_dict[self.position] self.event_reward = self.threshold_events_dict[-self.position] self.movement_left = ( self.threshold_events_dict[sph.QUIESCENCE_THRESHOLDS[0]]) self.movement_right = ( self.threshold_events_dict[sph.QUIESCENCE_THRESHOLDS[1]]) # Trial Completed params self.elapsed_time = 0 self.behavior_data = [] self.response_time = None self.response_time_buffer = [] self.response_side_buffer = [] self.trial_correct = None self.trial_correct_buffer = [] self.ntrials_correct = 0 self.water_delivered = 0