def assign_trials_pairs(): trials_pairs = TaskLogics.select_trial_pairs(TaskParams.n_trials) TrialsInfo.set_trials_pairs( np.reshape(trials_pairs, (TaskParams.num_of_blocks, TaskParams.num_of_block_trials, 2)).tolist())
def save_agent_trials_results(selected_options, visited_objects, gained_rewards, predictions, reaction_times): TrialsInfo.set_subjects_selection(selected_options) TrialsInfo.set_visited_objects(visited_objects) TrialsInfo.set_gained_rewards(gained_rewards) TrialsInfo.set_subject_predictions(predictions) TrialsInfo.set_selection_reaction_times(reaction_times)
def set_available_objects_in_trials(): objects = [] for block in range(TaskParams.num_of_blocks): block_objects = TaskLogics.find_available_objects( TrialsInfo.trials_pairs[block]) objects.append(block_objects) TrialsInfo.set_trials_available_objects(objects)
def manage_warmup_trials(): trials = TaskLogics.select_trial_pairs(TaskParams.n_warm_up_trials) available_objects = TaskLogics.find_available_objects(trials) reward_probs = TaskLogics.select_reward_prob_for_training( TaskParams.n_warm_up_trials) available_objects_reward_probs = TaskLogics.set_block_available_objects_reward_probs( available_objects, reward_probs) TrialsInfo.set_warmup_trials_data(trials, available_objects, available_objects_reward_probs)
def store_trials_available_objects_reward_prob(): all_trials_probs = [] for block in range(TaskParams.num_of_blocks): block_trials_reward_probs = TaskLogics.set_block_available_objects_reward_probs( TrialsInfo.trials_availables_objects[block], TrialsInfo.objects_reward_probs_during_trials[block]) all_trials_probs.append(block_trials_reward_probs) TrialsInfo.set_available_objects_reward_probs(all_trials_probs)
def initialize_trials(cls, phase=2): cls.initialize_qvalues() TrialsInfo.reinitialize_trials_info() TaskLogics.assign_trials_pairs() TaskLogics.set_available_objects_in_trials() TaskLogics.set_objects_reward_probs(phase) TaskLogics.store_trials_available_objects_reward_prob() TaskLogics.set_objects_actual_rewards()
def initialize(first_phase=True, phase=1): TrialsInfo.reinitialize_trials_info() if first_phase: TaskLogics.pair_options() TaskLogics.assign_objects_to_options() TaskLogics.assign_trials_pairs() TaskLogics.set_available_objects_in_trials() TaskLogics.set_objects_reward_probs(phase) TaskLogics.store_trials_available_objects_reward_prob() TaskLogics.set_objects_actual_rewards() if first_phase: TaskLogics.manage_warmup_trials() TaskLogics.set_rewards_for_warmup()
def save_warmup_results(selected_keys, reaction_times): selected_options, visited_objects, rewards = \ TaskLogics.block_trials_results( selected_keys, TrialsInfo.warmup_trials_pairs, TrialsInfo.warmup_available_objects, TrialsInfo.warmup_objects_actual_rewards ) TrialsInfo.set_warmup_selections(selected_options) TrialsInfo.set_warmup_visited_objects(visited_objects) TrialsInfo.set_warmup_gained_rewards(rewards) TrialsInfo.set_warmup_reaction_times(reaction_times)
def set_objects_actual_rewards(): generated_randoms, actual_rewards = [], [] for block in range(TaskParams.num_of_blocks): block_randoms, block_rewards = [], [] for objects_rewards_probs in TrialsInfo.trials_availables_objects_reward_probs[ block]: temp_radoms, temp_rewards = [], [] for object_set in objects_rewards_probs: randoms = (random.random(), random.random()) rewards = (randoms[0] < object_set[0], randoms[1] < object_set[1]) temp_radoms.append(randoms) temp_rewards.append(rewards) block_randoms.append(temp_radoms) block_rewards.append(temp_rewards) generated_randoms.append(block_randoms) actual_rewards.append(block_rewards) TrialsInfo.set_generated_randoms_for_rewards(generated_randoms) TrialsInfo.set_objects_actual_rewards(actual_rewards)
def save_trials_results(selected_keys, reaction_times, predictions=None): selected_options, visited_objects, rewards = [], [], [] for block in range(TaskParams.num_of_blocks): block_selected_option, block_visited_objects, block_rewards = \ TaskLogics.block_trials_results( selected_keys[block], TrialsInfo.trials_pairs[block], TrialsInfo.trials_availables_objects[block], TrialsInfo.available_objects_actual_rewards[block] ) selected_options.append(block_selected_option) visited_objects.append(block_visited_objects) rewards.append(block_rewards) TrialsInfo.set_subjects_selection(selected_options) TrialsInfo.set_visited_objects(visited_objects) TrialsInfo.set_gained_rewards(rewards) TrialsInfo.set_selection_reaction_times(reaction_times)
def set_rewards_for_warmup(): generated_randoms, actual_rewards = TaskLogics.set_reward( TrialsInfo.warmup_reward_probs) TrialsInfo.set_rewards_for_warmup(generated_randoms, actual_rewards)
def set_objects_reward_probs(phase): objects_reward_probs = TaskLogics.find_reward_probs(phase) TrialsInfo.set_objects_reward_probs(objects_reward_probs)