def next_trial(self):
        # First trial exception
        if self.trial_num == 0:
            self.trial_num += 1
            # Send next trial info to Bonsai
            bonsai.send_current_trial_info(self)
            return
        self.data_file = str(self.data_file)
        # update + next contrast: update buffers/counters + get next contrast
        # This has to happen before self.contrast is pointing to next trials
        self.contrast.next_trial(self.position)  # still prev_position

        # Increment trial number
        self.trial_num += 1
        # Update quiescent period
        self.quiescent_period = self.quiescent_period_base + misc.texp()
        # Update stimulus phase
        self.stim_phase = random.uniform(0, math.pi)
        # Update contrast
        self._next_contrast()
        # Update position
        self.position, self.stim_probability_left = self._next_position()
        # Update signed_contrast and buffer (AFTER position update)
        self.signed_contrast = self.contrast.value * np.sign(self.position)
        self.signed_contrast_buffer.append(self.signed_contrast)
        # Update state machine events
        self.event_error = self.threshold_events_dict[self.position]
        self.event_reward = self.threshold_events_dict[-self.position]
        # Reset outcome variables for next trial
        self.trial_correct = None
        # Open the data file to append the next trial
        self.data_file = open(self.data_file_path, 'a')
        # Send next trial info to Bonsai
        bonsai.send_current_trial_info(self)
Ejemplo n.º 2
0
def make_ephysCW_pcqs(pc):
    qperiod_base = 0.2  # + x, where x~exp(0.35), t ∈ 0.2 <= R <= 0.5
    sphase = []
    qperiod = []
    for i in pc:
        sphase.append(np.random.uniform(0, math.pi))
        qperiod.append(qperiod_base +
                       misc.texp(factor=0.35, min_=0.2, max_=0.5))
    qs = np.array([qperiod, sphase]).T
    pcqs = np.append(pc, qs, axis=1)
    return pcqs
Ejemplo n.º 3
0
def make_ephysCW_pcqs(pc):
    qperiod_base = 0.2  # + x, where x~exp(0.35), t ∈ 0.2 <= R <= 0.5
    sphase = []
    qperiod = []
    for i in pc:
        sphase.append(np.random.uniform(0, 2 * math.pi))
        qperiod.append(qperiod_base +
                       misc.texp(factor=0.35, min_=0.2, max_=0.5))
    qs = np.array([qperiod, sphase]).T
    pcqs = np.append(pc, qs, axis=1)
    perm = [0, 1, 3, 4, 2]
    idx = np.empty_like(perm)
    idx[perm] = np.arange(len(perm))
    pcqs[:, idx]
    pcqs[:] = pcqs[:, idx]

    return pcqs
Ejemplo n.º 4
0
 def next_trial(self):
     # First trial exception
     if self.trial_num == 0:
         self.trial_num += 1
         self.block_num += 1
         self.block_trial_num += 1
         # Send next trial info to Bonsai
         bonsai.send_current_trial_info(self)
         return
     self.data_file = str(self.data_file)
     # Increment trial number
     self.trial_num += 1
     # Update quiescent period
     self.quiescent_period = self.quiescent_period_base + misc.texp()
     # Update stimulus phase
     self.stim_phase = random.uniform(0, 2 * math.pi)
     # Update block
     self = update_block_params(self)
     # Update stim probability left + buffer
     self.stim_probability_left = update_probability_left(self)
     self.stim_probability_left_buffer.append(self.stim_probability_left)
     # Update position + buffer
     self.position = draw_position(self.position_set,
                                   self.stim_probability_left)
     self.position_buffer.append(self.position)
     # Update reward
     self.left_reward, self.right_reward = draw_reward(
         self.stim_probability_left)
     # Update contrast + buffer
     self.contrast = misc.draw_contrast(
         self.contrast_set, prob_type=self.contrast_set_probability_type)
     self.contrast_buffer.append(self.contrast)
     # Update signed_contrast + buffer (AFTER position update)
     self.signed_contrast = self.contrast * np.sign(self.position)
     self.signed_contrast_buffer.append(self.signed_contrast)
     # Update state machine events
     self.event_error = self.threshold_events_dict[self.position]
     self.event_reward = self.threshold_events_dict[-self.position]
     # Reset outcome variables for next trial
     self.trial_correct = None
     # Open the data file to append the next trial
     self.data_file = open(self.data_file_path, "a")
     # Send next trial info to Bonsai
     bonsai.send_current_trial_info(self)
Ejemplo n.º 5
0
def get_block_len(factor, min_, max_):
    return int(misc.texp(factor=factor, min_=min_, max_=max_))
Ejemplo n.º 6
0
    def __init__(self, sph):
        # Constants from settings
        self.session_start_delay_sec = sph.SESSION_START_DELAY_SEC
        self.init_datetime = parser.parse(
            sph.PYBPOD_SESSION) + datetime.timedelta(
                0, self.session_start_delay_sec)
        self.task_protocol = sph.PYBPOD_PROTOCOL
        self.data_file_path = sph.DATA_FILE_PATH
        self.data_file = open(self.data_file_path, "a")
        self.position_set = sph.STIM_POSITIONS
        self.contrast_set = sph.CONTRAST_SET
        self.contrast_set_probability_type = sph.CONTRAST_SET_PROBABILITY_TYPE
        self.repeat_on_error = sph.REPEAT_ON_ERROR
        self.threshold_events_dict = sph.ROTARY_ENCODER.THRESHOLD_EVENTS
        self.quiescent_period_base = sph.QUIESCENT_PERIOD
        self.quiescent_period = self.quiescent_period_base + misc.texp()
        self.response_window = sph.RESPONSE_WINDOW
        self.interactive_delay = sph.INTERACTIVE_DELAY
        self.iti_error = sph.ITI_ERROR
        self.iti_correct_target = sph.ITI_CORRECT
        self.osc_client = sph.OSC_CLIENT
        self.stim_freq = sph.STIM_FREQ
        self.stim_angle = sph.STIM_ANGLE
        self.stim_gain = sph.STIM_GAIN
        self.stim_sigma = sph.STIM_SIGMA
        self.out_tone = sph.OUT_TONE
        self.out_noise = sph.OUT_NOISE
        self.out_stop_sound = sph.OUT_STOP_SOUND
        self.poop_count = sph.POOP_COUNT
        self.save_ambient_data = sph.RECORD_AMBIENT_SENSOR_DATA
        self.as_data = {
            "Temperature_C": -1,
            "AirPressure_mb": -1,
            "RelativeHumidity": -1,
        }
        # Reward amount
        self.reward_amount = sph.REWARD_AMOUNT
        self.reward_valve_time = sph.REWARD_VALVE_TIME
        self.iti_correct = self.iti_correct_target - self.reward_valve_time
        # Initialize parameters that may change every trial
        self.trial_num = 0
        self.stim_phase = 0.0
        self.opto_block_prob = sph.OPTO_BLOCK_PROBABILITY
        self.opto_block = np.random.choice(
            2, p=[1 - self.opto_block_prob, self.opto_block_prob])

        self.block_num = 0

        self.block_trial_num = 0
        self.opto_block_trial_num = 0

        self.block_len_factor = sph.BLOCK_LEN_FACTOR
        self.block_len_min = sph.BLOCK_LEN_MIN
        self.block_len_max = sph.BLOCK_LEN_MAX
        self.block_probability_set = sph.BLOCK_PROBABILITY_SET
        self.block_init_5050 = sph.BLOCK_INIT_5050
        self.block_len = init_block_len(self)

        self.opto_block_len = get_block_len(factor=30, min_=1, max_=60)
        # Position
        self.stim_probability_left = init_probability_left(self)
        self.stim_probability_left_buffer = [self.stim_probability_left]
        self.position = draw_position(self.position_set,
                                      self.stim_probability_left)
        self.position_buffer = [self.position]
        # Reward
        self.left_reward, self.right_reward = draw_reward(
            self.stim_probability_left)

        self.penalties = 0
        # Contrast
        self.contrast = misc.draw_contrast(self.contrast_set)
        self.contrast_buffer = [self.contrast]
        self.signed_contrast = self.contrast * np.sign(self.position)
        self.signed_contrast_buffer = [self.signed_contrast]
        # RE event names
        self.event_error = self.threshold_events_dict[self.position]
        self.event_reward = self.threshold_events_dict[-self.position]
        self.movement_left = self.threshold_events_dict[
            sph.QUIESCENCE_THRESHOLDS[0]]
        self.movement_right = self.threshold_events_dict[
            sph.QUIESCENCE_THRESHOLDS[1]]
        self.choice_left = self.threshold_events_dict[sph.CHOICE_THRESHOLDS[1]]
        self.choice_right = self.threshold_events_dict[
            sph.CHOICE_THRESHOLDS[0]]

        # Trial Completed params
        self.elapsed_time = 0
        self.behavior_data = []
        self.response_time = None
        self.response_time_buffer = []
        self.response_side_buffer = []
        self.trial_correct = None
        self.trial_correct_buffer = []
        self.ntrials_correct = 0
        self.water_delivered = 0
 def __init__(self, sph):
     # Constants from settings
     self.init_datetime = parser.parse(sph.PYBPOD_SESSION)
     self.task_protocol = sph.PYBPOD_PROTOCOL
     self.data_file_path = sph.DATA_FILE_PATH
     self.data_file = open(self.data_file_path, 'a')
     self.position_set = sph.STIM_POSITIONS
     self.repeat_on_error = sph.REPEAT_ON_ERROR
     self.repeat_contrasts = sph.REPEAT_CONTRASTS
     self.threshold_events_dict = sph.ROTARY_ENCODER.THRESHOLD_EVENTS
     self.quiescent_period_base = sph.QUIESCENT_PERIOD
     self.quiescent_period = self.quiescent_period_base + misc.texp()
     self.response_window = sph.RESPONSE_WINDOW
     self.interactive_delay = sph.INTERACTIVE_DELAY
     self.iti_error = sph.ITI_ERROR
     self.iti_correct_target = sph.ITI_CORRECT
     self.osc_client = sph.OSC_CLIENT
     self.stim_freq = sph.STIM_FREQ
     self.stim_angle = sph.STIM_ANGLE
     self.stim_gain = sph.STIM_GAIN
     self.stim_sigma = sph.STIM_SIGMA
     self.out_tone = sph.OUT_TONE
     self.out_noise = sph.OUT_NOISE
     self.out_stop_sound = sph.OUT_STOP_SOUND
     self.poop_count = sph.POOP_COUNT
     self.save_ambient_data = sph.RECORD_AMBIENT_SENSOR_DATA
     self.as_data = {
         'Temperature_C': -1,
         'AirPressure_mb': -1,
         'RelativeHumidity': -1
     }
     # Reward amount
     self.reward_amount = sph.REWARD_AMOUNT
     self.reward_valve_time = sph.REWARD_VALVE_TIME
     self.iti_correct = self.iti_correct_target - self.reward_valve_time
     # Init trial type objects
     self.ac = AdaptiveContrast(sph)
     self.rc = RepeatContrast()
     # Initialize parameters that may change every trial
     self.contrast_set = sph.CONTRAST_SET
     self.trial_num = 0
     self.position = random.choice(sph.STIM_POSITIONS)
     self.stim_probability_left = sph.STIM_PROBABILITY_LEFT
     self.stim_phase = 0.
     self.event_error = self.threshold_events_dict[self.position]
     self.event_reward = self.threshold_events_dict[-self.position]
     self.movement_left = (
         self.threshold_events_dict[sph.QUIESCENCE_THRESHOLDS[0]])
     self.movement_right = (
         self.threshold_events_dict[sph.QUIESCENCE_THRESHOLDS[1]])
     # Outcome related parmeters
     self.contrast = self.ac
     self.current_contrast = self.contrast.value
     self.signed_contrast = self.contrast.value * np.sign(self.position)
     self.signed_contrast_buffer = [self.signed_contrast]
     # Trial Completed params
     self.elapsed_time = 0
     self.behavior_data = []
     self.response_time = None
     self.response_time_buffer = []
     self.response_buffer = [0] * sph.RESPONSE_BUFFER_LENGTH
     self.response_side_buffer = []
     self.trial_correct = None
     self.ntrials_correct = 0
     self.water_delivered = 0
     self.non_rc_ntrials = self.trial_num - self.rc.ntrials