def test_WP_err(self): np.random.seed(100) d = discrete.weightProb(task_responses=[1, 2, 3]) result = d([0.6, 0.3, 0.5], trial_responses=[0, 3]) correct_result = (3, collections.OrderedDict([(1, 0), (2, 0), (3, 1)])) assert result == correct_result
def test_WP_string(self): np.random.seed(100) d = discrete.weightProb(["A", "B", "C"]) result = d([0.2, 0.3, 0.5], trial_responses=["A", "B"]) correct_result = ('B', collections.OrderedDict([('A', 0.4), ('B', 0.6), ('C', 0)])) assert result == correct_result
def test_WP_normal(self): np.random.seed(100) d = discrete.weightProb(task_responses=[1, 2, 3]) result = d([0.8, 0.5, 0.7]) correct_result = (2, collections.OrderedDict([(1, 0.4), (2, 0.25), (3, 0.35)])) assert result == correct_result
def test_WP_no_valid(self): np.random.seed(100) d = discrete.weightProb(task_responses=[1, 2, 3]) result = d([0.2, 0.3, 0.5], trial_responses=[]) correct_result = (None, collections.OrderedDict([(1, 0.2), (2, 0.3), (3, 0.5)])) assert result == correct_result
def __init__(self, number_actions=2, number_cues=1, number_critics=None, action_codes=None, non_action='None', prior=None, stimulus_shaper=None, stimulus_shaper_name=None, stimulus_shaper_properties=None, reward_shaper=None, reward_shaper_name=None, reward_shaper_properties=None, decision_function=None, decision_function_name=None, decision_function_properties=None, **kwargs): """""" self.Name = self.get_name() self.pattern_parameters = self.kwarg_pattern_parameters(kwargs) for k, v in self.pattern_parameters.items(): setattr(self, k, v) self.pattern_parameters = self.kwarg_pattern_parameters(kwargs) for k, v in self.pattern_parameters.items(): setattr(self, k, v) self.number_actions = number_actions self.number_cues = number_cues if number_critics is None: number_critics = self.number_actions * self.number_cues self.number_critics = number_critics if action_codes is None: action_codes = {k: k for k in range(self.number_actions)} self.actionCode = action_codes self.defaultNonAction = non_action if prior is None: prior = np.ones(self.number_actions) / self.number_actions self.prior = prior self.stimuli = np.ones(self.number_cues) self.stimuliFilter = np.ones(self.number_cues) self.currAction = None self.decision = None self.validActions = None self.lastObservation = None self.probabilities = np.array(self.prior) self.decProbabilities = np.array(self.prior) self.expectedRewards = np.ones(self.number_actions) self.expectedReward = np.array([1]) if stimulus_shaper is not None and issubclass(stimulus_shaper, Stimulus): if stimulus_shaper_properties is not None: stimulus_shaper_kwargs = { k: v for k, v in kwargs.items() if k in stimulus_shaper_properties } else: stimulus_shaper_kwargs = kwargs.copy() self.stimulus_shaper = stimulus_shaper(**stimulus_shaper_kwargs) elif isinstance(stimulus_shaper_name, str): stimulus_class = utils.find_class( stimulus_shaper_name, class_folder='tasks', inherited_class=Stimulus, excluded_files=['taskTemplate', '__init__', 'taskGenerator']) stimulus_shaper_kwargs = { k: v for k, v in kwargs.items() if k in utils.get_class_args(stimulus_class) } self.stimulus_shaper = stimulus_class(**stimulus_shaper_kwargs) else: self.stimulus_shaper = Stimulus() if reward_shaper is not None and issubclass(reward_shaper, Rewards): if reward_shaper_properties is not None: reward_shaper_kwargs = { k: v for k, v in kwargs.items() if k in reward_shaper_properties } else: reward_shaper_kwargs = kwargs.copy() self.reward_shaper = reward_shaper(**reward_shaper_kwargs) elif isinstance(reward_shaper_name, str): reward_class = utils.find_class( reward_shaper_name, class_folder='tasks', inherited_class=Rewards, excluded_files=['taskTemplate', '__init__', 'taskGenerator']) reward_shaper_kwargs = { k: v for k, v in kwargs.items() if k in utils.get_class_args(reward_class) } self.reward_shaper = reward_class.processFeedback( **reward_shaper_kwargs) else: self.reward_shaper = Rewards() if callable(decision_function): if decision_function_properties is not None: decision_shaper_kwargs = { k: v for k, v in kwargs.items() if k in decision_function_properties } else: decision_shaper_kwargs = kwargs.copy() self.decision_function = decision_function( **decision_shaper_kwargs) elif isinstance(decision_function_name, str): decision_function = utils.find_function(decision_function_name, 'model/decision') decision_function_kwargs = { k: v for k, v in kwargs.items() if k in utils.get_function_args(decision_function) } self.decision_function = decision_function( **decision_function_kwargs) else: self.decision_function = weightProb( list(range(self.number_actions))) self.parameters = { "Name": self.Name, "number_actions": self.number_actions, "number_cues": self.number_cues, "number_critics": self.number_critics, "prior": copy.copy(self.prior), "non_action": self.defaultNonAction, "actionCode": copy.copy(self.actionCode), "stimulus_shaper": self.stimulus_shaper.details(), "reward_shaper": self.reward_shaper.details(), "decision_function": utils.callableDetailsString(self.decision_function) } self.parameters.update(self.pattern_parameters) # Recorded information self.recAction = [] self.recActionSymbol = [] self.recStimuli = [] self.recReward = [] self.recExpectations = [] self.recExpectedReward = [] self.recExpectedRewards = [] self.recValidActions = [] self.recDecision = [] self.recProbabilities = [] self.recActionProbs = [] self.recActionProb = [] self.simID = None