Ejemplo n.º 1
0
    def get_result(self, problem, pr_number, skillName, use_hints=True):
        effects = self.get_prereq_effects(skillName)

        problem = 1 - problem

        knowledge_p = self.knowledge + effects[0]
        # knowledge_p = problem + effects[0]
        speed_p = self.speed + effects[1]
        hint_p = self.hint + effects[2]

        # print [knowledge_p, self.knowledge_std]

        answer = du.clamp(np.random.normal(knowledge_p, self.knowledge_std), 0,
                          1)
        pr_hint = hint_p / pr_number

        hint = int(du.diceRoll(1000) < (pr_hint * 1000))

        cor = 0
        if answer > problem:
            answer = du.clamp(np.random.normal(0.9, self.knowledge_std), 0, 1)
        else:
            answer *= ((0.3 - (problem - 0.3)) / 0.3)

        cor = int(
            du.diceRoll(1000) < answer * 1000) * (1 - (hint * int(use_hints)))
        time = du.clamp(np.random.normal(speed_p, self.speed_std), 0,
                        10000) * problem
        time += du.MAX(
            0,
            np.random.normal(Student.hint_time_offset,
                             Student.hint_time_offset_std)) * hint

        return [cor, time, hint]
Ejemplo n.º 2
0
 def get_speed_effect(self):
     return du.MAX(
         np.random.normal(self.speed_effect, self.speed_effect_std), 0)
Ejemplo n.º 3
0
 def get_hint_effect(self):
     return du.MAX(np.random.normal(self.hint_effect, self.hint_effect_std),
                   0)
Ejemplo n.º 4
0
 def get_knowledge_effect(self):
     return du.MAX(
         np.random.normal(self.knowledge_effect, self.knowledge_effect_std),
         0)