예제 #1
0
    def next_trial(self):
        # First trial exception
        if self.trial_num == 0:
            self.trial_num += 1
            # Send next trial info to Bonsai
            bonsai.send_current_trial_info(self)
            return
        self.data_file = str(self.data_file)
        # update + next contrast: update buffers/counters + get next contrast
        # This has to happen before self.contrast is pointing to next trials
        self.contrast.next_trial(self.position)  # still prev_position

        # Increment trial number
        self.trial_num += 1
        # Update quiescent period
        self.quiescent_period = self.quiescent_period_base + misc.texp()
        # Update stimulus phase
        self.stim_phase = random.uniform(0, math.pi)
        # Update contrast
        self._next_contrast()
        # Update position
        self.position, self.stim_probability_left = self._next_position()
        # Update signed_contrast and buffer (AFTER position update)
        self.signed_contrast = self.contrast.value * np.sign(self.position)
        self.signed_contrast_buffer.append(self.signed_contrast)
        # Update state machine events
        self.event_error = self.threshold_events_dict[self.position]
        self.event_reward = self.threshold_events_dict[-self.position]
        # Reset outcome variables for next trial
        self.trial_correct = None
        # Open the data file to append the next trial
        self.data_file = open(self.data_file_path, 'a')
        # Send next trial info to Bonsai
        bonsai.send_current_trial_info(self)
예제 #2
0
def make_pcqs(pc):
    qperiod_base = 0.2  # + x, where x~exp(0.35), t ∈ 0.2 <= R <= 0.5
    sphase = []
    qperiod = []
    for i in pc:
        sphase.append(random.uniform(0, math.pi))
        qperiod.append(qperiod_base +
                       misc.texp(factor=0.35, min_=0.2, max_=0.5))
    qs = np.array([qperiod, sphase]).T
    pcqs = np.append(pc, qs, axis=1)
    return pcqs
예제 #3
0
 def next_trial(self):
     # First trial exception
     if self.trial_num == 0:
         self.trial_num += 1
         self.block_num += 1
         self.block_rew_num += 1
         self.block_trial_num += 1
         # Send next trial info to Bonsai
         bonsai.send_current_trial_info(self)
         return
     self.data_file = str(self.data_file)
     # Increment trial number
     self.trial_num += 1
     # Update quiescent period
     self.quiescent_period = self.quiescent_period_base + misc.texp()
     # Update stimulus phase
     self.stim_phase = random.uniform(0, math.pi)
     # Update block
     self = blocks.update_block_params(self)
     # Update stim probability left
     self.stim_probability_left = blocks.update_probability_left(self)
     # Update position
     self.position = blocks.draw_position(self.position_set,
                                          self.stim_probability_left)
     # Update contrast
     self.contrast = misc.draw_contrast(
         self.contrast_set, prob_type=self.contrast_set_probability_type)
     self.signed_contrast = self.contrast * np.sign(self.position)
     # Update state machine events
     self.event_error = self.threshold_events_dict[self.position]
     self.event_correct = self.threshold_events_dict[
         -self.position]  # need to check this
     self.rewarded = blocks_rew.draw_reward(
         self.position, self.rew_set, self.rew_probability_left
     )  #### @alejandro This determines whether an event is rewarded
     # Reset outcome variables for next trial
     self.trial_correct_rewarded = None  ####
     self.trial_correct_unrewarded = None  ####
     # Open the data file to append the next trial
     self.data_file = open(self.data_file_path, 'a')
     # Send next trial info to Bonsai
     bonsai.send_current_trial_info(self)
     #Update opto_variables
     self.opto_trial = optobpod.opto_switch(self.opto_on, self.opto_freq)
예제 #4
0
 def next_trial(self):
     # First trial exception
     if self.trial_num == 0:
         self.trial_num += 1
         self.block_num += 1
         self.block_trial_num += 1
         # Send next trial info to Bonsai
         bonsai.send_current_trial_info(self)
         return
     self.data_file = str(self.data_file)
     # Increment trial number
     self.trial_num += 1
     # Update quiescent period
     self.quiescent_period = self.quiescent_period_base + misc.texp()
     # Update stimulus phase
     self.stim_phase = random.uniform(0, math.pi)
     # Update block
     self = blocks.update_block_params(self)
     # Update stim probability left + buffer
     self.stim_probability_left = blocks.update_probability_left(self)
     self.stim_probability_left_buffer.append(self.stim_probability_left)
     # Update position + buffer
     self.position = blocks.draw_position(self.position_set,
                                          self.stim_probability_left)
     self.position_buffer.append(self.position)
     # Update contrast + buffer
     self.contrast = misc.draw_contrast(
         self.contrast_set, prob_type=self.contrast_set_probability_type)
     self.contrast_buffer.append(self.contrast)
     # Update signed_contrast + buffer (AFTER position update)
     self.signed_contrast = self.contrast * np.sign(self.position)
     self.signed_contrast_buffer.append(self.signed_contrast)
     # Update state machine events
     self.event_error = self.threshold_events_dict[self.position]
     self.event_reward = self.threshold_events_dict[-self.position]
     # Reset outcome variables for next trial
     self.trial_correct = None
     # Open the data file to append the next trial
     self.data_file = open(self.data_file_path, 'a')
     # Send next trial info to Bonsai
     bonsai.send_current_trial_info(self)
예제 #5
0
    def __init__(self, sph):
        # Constants from settings
        self.init_datetime = parser.parse(sph.PYBPOD_SESSION)
        self.task_protocol = sph.PYBPOD_PROTOCOL
        self.elapsed_time = 0
        self.data_file_path = sph.DATA_FILE_PATH
        self.data_file = open(self.data_file_path, 'a')
        self.position_set = sph.STIM_POSITIONS
        self.contrast_set = sph.CONTRAST_SET
        self.contrast_set_probability_type = sph.CONTRAST_SET_PROBABILITY_TYPE
        self.repeat_on_error = sph.REPEAT_ON_ERROR
        self.threshold_events_dict = sph.ROTARY_ENCODER.THRESHOLD_EVENTS
        self.quiescent_period_base = sph.QUIESCENT_PERIOD
        self.quiescent_period = self.quiescent_period_base + misc.texp()
        self.response_window = sph.RESPONSE_WINDOW
        self.interactive_delay = sph.INTERACTIVE_DELAY
        self.iti_error = sph.ITI_ERROR
        self.iti_correct_target = sph.ITI_CORRECT
        self.osc_client = sph.OSC_CLIENT
        self.stim_freq = sph.STIM_FREQ
        self.stim_angle = sph.STIM_ANGLE
        self.stim_gain = sph.STIM_GAIN
        self.stim_sigma = sph.STIM_SIGMA
        self.out_tone = sph.OUT_TONE
        self.out_noise = sph.OUT_NOISE
        self.poop_count = sph.POOP_COUNT
        #######################################################################
        ##########################Opto Settings################################
        #######################################################################
        self.opto_on = sph.OPTO_ON
        self.opto_freq = sph.OPTO_FREQ
        self.opto_pulse_length = sph.OPTO_PULSE_LENGTH
        self.opto_pulse_freq = sph.OPTO_PULSE_FREQ
        self.opto_trial = optobpod.opto_switch(
            self.opto_on,
            self.opto_freq)  #Determines whether this trial is an opto trial
        #######################################################################
        #######################################################################
        # Reward amount
        self.reward_amount = sph.REWARD_AMOUNT
        self.reward_valve_time = sph.REWARD_VALVE_TIME
        self.iti_correct = self.iti_correct_target - self.reward_valve_time
        # Initialize parameters that may change every trial
        self.trial_num = 0
        self.stim_phase = 0.
        self.block_num = 0
        self.block_trial_num = 0
        self.block_len_factor = sph.BLOCK_LEN_FACTOR
        self.block_len_min = sph.BLOCK_LEN_MIN
        self.block_len_max = sph.BLOCK_LEN_MAX
        self.block_probability_set = sph.BLOCK_PROBABILITY_SET
        self.block_len = blocks.init_block_len(self)
        ##########################Rewward blocks #############################
        self.rew_set = [1, 0]  #### @alejandro
        self.rew_prob_set = sph.BLOCK_REW_PROBABILITY_SET  #### @alejandro
        self.block_rew_num = 0
        self.block_rew_len_factor = sph.BLOCK_REW_LEN_FACTOR
        self.block_rew_len_min = sph.BLOCK_REW_LEN_MIN
        self.block_reW_len_max = sph.BLOCK_REW_LEN_MAX
        self.block_rew_probability_set = sph.BLOCK_REW_PROBABILITY_SET
        self.block_len = blocks_rew.init_block_len(self)
        #######################################################################

        # Position
        self.stim_probability_left = blocks.init_probability_left(self)
        self.position = blocks.draw_position(self.position_set,
                                             self.stim_probability_left)
        # Contrast
        self.contrast = misc.draw_contrast(self.contrast_set)
        self.signed_contrast = self.contrast * np.sign(self.position)
        # RE event names
        self.event_error = self.threshold_events_dict[self.position]
        self.event_correct = self.threshold_events_dict[
            -self.position]  #need to change this
        self.movement_left = (
            self.threshold_events_dict[sph.QUIESCENCE_THRESHOLDS[0]])
        self.movement_right = (
            self.threshold_events_dict[sph.QUIESCENCE_THRESHOLDS[1]])
        self.response_time_buffer = []
        # Rewarded
        self.rew_probability_left = blocks_rew.init_rew_probability_left(
            self)  #### @alejandro
        self.rewarded = blocks_rew.draw_reward(self.position, self.rew_set,
                                               self.rew_probability_left)  ####
        #@alejandro This determines whether an event is rewarded
        # Outcome related parmeters
        self.trial_correct_rewarded = None  #### @alejandro
        self.trial_correct_unrewarded = None  #### @alejandro
        self.ntrials_correct_unrewarded = 0  #### @alejandro
        self.ntrials_correct_rewarded = 0  #### @alejandro
        self.water_delivered = 0
예제 #6
0
def get_block_len(factor, min_, max_):
    return int(misc.texp(factor=factor, min_=min_, max_=max_))
예제 #7
0
    def __init__(self, sph):
        # Constants from settings
        self.init_datetime = parser.parse(sph.PYBPOD_SESSION)
        self.task_protocol = sph.PYBPOD_PROTOCOL
        self.data_file_path = sph.DATA_FILE_PATH
        self.data_file = open(self.data_file_path, 'a')
        self.position_set = sph.STIM_POSITIONS
        self.contrast_set = sph.CONTRAST_SET
        self.contrast_set_probability_type = sph.CONTRAST_SET_PROBABILITY_TYPE
        self.repeat_on_error = sph.REPEAT_ON_ERROR
        self.threshold_events_dict = sph.ROTARY_ENCODER.THRESHOLD_EVENTS
        self.quiescent_period_base = sph.QUIESCENT_PERIOD
        self.quiescent_period = self.quiescent_period_base + misc.texp()
        self.response_window = sph.RESPONSE_WINDOW
        self.interactive_delay = sph.INTERACTIVE_DELAY
        self.iti_error = sph.ITI_ERROR
        self.iti_correct_target = sph.ITI_CORRECT
        self.osc_client = sph.OSC_CLIENT
        self.stim_freq = sph.STIM_FREQ
        self.stim_angle = sph.STIM_ANGLE
        self.stim_gain = sph.STIM_GAIN
        self.stim_sigma = sph.STIM_SIGMA
        self.out_tone = sph.OUT_TONE
        self.out_noise = sph.OUT_NOISE
        self.poop_count = sph.POOP_COUNT
        self.save_ambient_data = sph.RECORD_AMBIENT_SENSOR_DATA
        self.as_data = {
            'Temperature_C': 0,
            'AirPressure_mb': 0,
            'RelativeHumidity': 0
        }
        # Reward amount
        self.reward_amount = sph.REWARD_AMOUNT
        self.reward_valve_time = sph.REWARD_VALVE_TIME
        self.iti_correct = self.iti_correct_target - self.reward_valve_time
        # Initialize parameters that may change every trial
        self.trial_num = 0
        self.stim_phase = 0.

        self.block_num = 0
        self.block_trial_num = 0
        self.block_len_factor = sph.BLOCK_LEN_FACTOR
        self.block_len_min = sph.BLOCK_LEN_MIN
        self.block_len_max = sph.BLOCK_LEN_MAX
        self.block_probability_set = sph.BLOCK_PROBABILITY_SET
        self.block_init_5050 = sph.BLOCK_INIT_5050
        self.block_len = blocks.init_block_len(self)
        # Position
        self.stim_probability_left = blocks.init_probability_left(self)
        self.stim_probability_left_buffer = [self.stim_probability_left]
        self.position = blocks.draw_position(self.position_set,
                                             self.stim_probability_left)
        self.position_buffer = [self.position]
        # Contrast
        self.contrast = misc.draw_contrast(self.contrast_set)
        self.contrast_buffer = [self.contrast]
        self.signed_contrast = self.contrast * np.sign(self.position)
        self.signed_contrast_buffer = [self.signed_contrast]
        # RE event names
        self.event_error = self.threshold_events_dict[self.position]
        self.event_reward = self.threshold_events_dict[-self.position]
        self.movement_left = (
            self.threshold_events_dict[sph.QUIESCENCE_THRESHOLDS[0]])
        self.movement_right = (
            self.threshold_events_dict[sph.QUIESCENCE_THRESHOLDS[1]])
        # Trial Completed params
        self.elapsed_time = 0
        self.behavior_data = []
        self.response_time = None
        self.response_time_buffer = []
        self.response_side_buffer = []
        self.trial_correct = None
        self.trial_correct_buffer = []
        self.ntrials_correct = 0
        self.water_delivered = 0
예제 #8
0
 def __init__(self, sph):
     # Constants from settings
     self.init_datetime = parser.parse(sph.PYBPOD_SESSION)
     self.task_protocol = sph.PYBPOD_PROTOCOL
     self.data_file_path = sph.DATA_FILE_PATH
     self.data_file = open(self.data_file_path, 'a')
     self.position_set = sph.STIM_POSITIONS
     self.repeat_on_error = sph.REPEAT_ON_ERROR
     self.repeat_contrasts = sph.REPEAT_CONTRASTS
     self.threshold_events_dict = sph.ROTARY_ENCODER.THRESHOLD_EVENTS
     self.quiescent_period_base = sph.QUIESCENT_PERIOD
     self.quiescent_period = self.quiescent_period_base + misc.texp()
     self.response_window = sph.RESPONSE_WINDOW
     self.interactive_delay = sph.INTERACTIVE_DELAY
     self.iti_error = sph.ITI_ERROR
     self.iti_correct_target = sph.ITI_CORRECT
     self.osc_client = sph.OSC_CLIENT
     self.stim_freq = sph.STIM_FREQ
     self.stim_angle = sph.STIM_ANGLE
     self.stim_gain = sph.STIM_GAIN
     self.stim_sigma = sph.STIM_SIGMA
     self.out_tone = sph.OUT_TONE
     self.out_noise = sph.OUT_NOISE
     self.poop_count = sph.POOP_COUNT
     self.save_ambient_data = sph.RECORD_AMBIENT_SENSOR_DATA
     self.as_data = {
         'Temperature_C': 0,
         'AirPressure_mb': 0,
         'RelativeHumidity': 0
     }
     # Reward amount
     self.reward_amount = sph.REWARD_AMOUNT
     self.reward_valve_time = sph.REWARD_VALVE_TIME
     self.iti_correct = self.iti_correct_target - self.reward_valve_time
     # Init trial type objects
     self.ac = AdaptiveContrast(sph)
     self.rc = RepeatContrast()
     # Initialize parameters that may change every trial
     self.contrast_set = sph.CONTRAST_SET
     self.trial_num = 0
     self.position = random.choice(sph.STIM_POSITIONS)
     self.stim_probability_left = sph.STIM_PROBABILITY_LEFT
     self.stim_phase = 0.
     self.event_error = self.threshold_events_dict[self.position]
     self.event_reward = self.threshold_events_dict[-self.position]
     self.movement_left = (
         self.threshold_events_dict[sph.QUIESCENCE_THRESHOLDS[0]])
     self.movement_right = (
         self.threshold_events_dict[sph.QUIESCENCE_THRESHOLDS[1]])
     # Outcome related parmeters
     self.contrast = self.ac
     self.current_contrast = self.contrast.value
     self.signed_contrast = self.contrast.value * np.sign(self.position)
     self.signed_contrast_buffer = [self.signed_contrast]
     # Trial Completed params
     self.elapsed_time = 0
     self.behavior_data = []
     self.response_time = None
     self.response_time_buffer = []
     self.response_buffer = [0] * sph.RESPONSE_BUFFER_LENGTH
     self.response_side_buffer = []
     self.trial_correct = None
     self.ntrials_correct = 0
     self.water_delivered = 0
     self.non_rc_ntrials = self.trial_num - self.rc.ntrials