def test_get_time_in_millisecs_with_complicated_time(self) -> None: dt = datetime.datetime(2020, 6, 15, 5, 18, 23, microsecond=123456) msecs = utils.get_time_in_millisecs(dt) self.assertEqual( dt, datetime.datetime.fromtimestamp(python_utils.divide( msecs, 1000.0))) # type: ignore[no-untyped-call]
def get(self): """Handles GET requests.""" comma_separated_topic_ids = ( self.request.get('comma_separated_topic_ids')) topic_ids = comma_separated_topic_ids.split(',') topics = topic_fetchers.get_topics_by_ids(topic_ids) all_skill_ids = [] subtopic_mastery_dict = {} for ind, topic in enumerate(topics): if not topic: raise self.InvalidInputException('Invalid topic ID %s' % topic_ids[ind]) all_skill_ids.extend(topic.get_all_skill_ids()) all_skill_ids = list(set(all_skill_ids)) all_skills_mastery_dict = skill_services.get_multi_user_skill_mastery( self.user_id, all_skill_ids) for topic in topics: subtopic_mastery_dict[topic.id] = {} for subtopic in topic.subtopics: skill_mastery_dict = { skill_id: mastery for skill_id, mastery in all_skills_mastery_dict.items() if mastery is not None and skill_id in subtopic.skill_ids } if skill_mastery_dict: # Subtopic mastery is average of skill masteries. subtopic_mastery_dict[topic.id][subtopic.id] = ( python_utils.divide(sum(skill_mastery_dict.values()), len(skill_mastery_dict))) self.values.update({'subtopic_mastery_dict': subtopic_mastery_dict}) self.render_json(self.values)
def test_get_time_in_millisecs(self) -> None: dt = datetime.datetime(2020, 6, 15) msecs = utils.get_time_in_millisecs(dt) self.assertEqual( dt, datetime.datetime.fromtimestamp(python_utils.divide( msecs, 1000.0))) # type: ignore[no-untyped-call]
def _refresh_average_ratings_transactional(user_id, new_rating, old_rating): """Refreshes the average rating for a user. Args: user_id: str. The id of the user. new_rating: int. The new rating of the exploration. old_rating: int|None. The old rating of the exploration before refreshing, or None if the exploration hasn't been rated by the user yet. """ user_stats_model = user_models.UserStatsModel.get(user_id, strict=False) if user_stats_model is None: user_models.UserStatsModel( id=user_id, average_ratings=new_rating, num_ratings=1).put() return num_ratings = user_stats_model.num_ratings average_ratings = user_stats_model.average_ratings if average_ratings is None: average_ratings = new_rating num_ratings += 1 else: sum_of_ratings = (average_ratings * num_ratings) + new_rating if old_rating is None: num_ratings += 1 else: sum_of_ratings -= old_rating average_ratings = python_utils.divide( sum_of_ratings, float(num_ratings)) user_stats_model.average_ratings = average_ratings user_stats_model.num_ratings = num_ratings user_stats_model.update_timestamps() user_stats_model.put()
def test_swap_to_always_raise_with_error(self): obj = mock.Mock() obj.func = lambda: python_utils.divide(1, 0) with self.assertRaisesRegex(ZeroDivisionError, 'integer division or modulo by zero'): obj.func() with self.swap_to_always_raise(obj, 'func', error=ValueError('abc')): with self.assertRaisesRegex(ValueError, 'abc'): obj.func()
def get_time_in_millisecs(datetime_obj: datetime.datetime) -> float: """Returns time in milliseconds since the Epoch. Args: datetime_obj: datetime. An object of type datetime.datetime. Returns: float. The time in milliseconds since the Epoch. """ msecs = time.mktime(datetime_obj.timetuple()) * 1000.0 return msecs + python_utils.divide(datetime_obj.microsecond, 1000.0) # type: ignore[no-any-return, no-untyped-call]
def get_human_readable_time_string(time_msec: float) -> str: """Given a time in milliseconds since the epoch, get a human-readable time string for the admin dashboard. Args: time_msec: float. Time in milliseconds since the Epoch. Returns: str. A string representing the time. """ # Ignoring arg-type because we are preventing direct usage of 'str' for # Python3 compatibilty. return time.strftime( '%B %d %H:%M:%S', time.gmtime(python_utils.divide(time_msec, 1000.0))) # type: ignore[arg-type, no-untyped-call]
def test_get_with_valid_topic_ids(self): topic_id_1 = topic_fetchers.get_new_topic_id() topic_id_2 = topic_fetchers.get_new_topic_id() self.save_new_topic(topic_id_1, self.user_id, name='Name 1', abbreviated_name='topic-one', url_fragment='topic-one', description='Description 1', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[ self.skill_id_1, self.skill_id_2, self.skill_id_3 ], subtopics=[], next_subtopic_id=1) self.save_new_topic(topic_id_2, self.user_id, name='Name 2', abbreviated_name='topic-two', url_fragment='topic-two', description='Description 2', canonical_story_ids=[], additional_story_ids=[], uncategorized_skill_ids=[ self.skill_id_3, self.skill_id_4, self.skill_id_5 ], subtopics=[], next_subtopic_id=1) # Update Topic 1. changelist = [ topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'title': 'Title 1', 'subtopic_id': 1 }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_UPDATE_SUBTOPIC_PROPERTY, 'property_name': (topic_domain.SUBTOPIC_PROPERTY_URL_FRAGMENT), 'old_value': '', 'new_value': 'subtopic-one-one', 'subtopic_id': 1 }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, 'old_subtopic_id': None, 'new_subtopic_id': 1, 'skill_id': self.skill_id_1 }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, 'old_subtopic_id': None, 'new_subtopic_id': 1, 'skill_id': self.skill_id_2 }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'title': 'Title 2', 'subtopic_id': 2 }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_UPDATE_SUBTOPIC_PROPERTY, 'property_name': (topic_domain.SUBTOPIC_PROPERTY_URL_FRAGMENT), 'old_value': '', 'new_value': 'subtopic-one-two', 'subtopic_id': 2 }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, 'old_subtopic_id': None, 'new_subtopic_id': 2, 'skill_id': self.skill_id_3 }) ] topic_services.update_topic_and_subtopic_pages(self.user_id, topic_id_1, changelist, 'Added subtopics.') # Update Topic 2. changelist = [ topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'title': 'Title 1', 'subtopic_id': 1 }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_UPDATE_SUBTOPIC_PROPERTY, 'property_name': (topic_domain.SUBTOPIC_PROPERTY_URL_FRAGMENT), 'old_value': '', 'new_value': 'subtopic-two-one', 'subtopic_id': 1 }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, 'old_subtopic_id': None, 'new_subtopic_id': 1, 'skill_id': self.skill_id_3 }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, 'old_subtopic_id': None, 'new_subtopic_id': 1, 'skill_id': self.skill_id_4 }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_ADD_SUBTOPIC, 'title': 'Title 2', 'subtopic_id': 2 }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_UPDATE_SUBTOPIC_PROPERTY, 'property_name': (topic_domain.SUBTOPIC_PROPERTY_URL_FRAGMENT), 'old_value': '', 'new_value': 'subtopic-two-two', 'subtopic_id': 2 }), topic_domain.TopicChange({ 'cmd': topic_domain.CMD_MOVE_SKILL_ID_TO_SUBTOPIC, 'old_subtopic_id': None, 'new_subtopic_id': 2, 'skill_id': self.skill_id_5 }) ] topic_services.update_topic_and_subtopic_pages(self.user_id, topic_id_2, changelist, 'Added subtopics.') skill_services.create_user_skill_mastery(self.user_id, self.skill_id_1, self.degree_of_mastery_1) skill_services.create_user_skill_mastery(self.user_id, self.skill_id_2, self.degree_of_mastery_2) skill_services.create_user_skill_mastery(self.user_id, self.skill_id_5, self.degree_of_mastery_5) skill_services.create_user_skill_mastery(self.user_id, self.skill_id_6, self.degree_of_mastery_6) self.login(self.NEW_USER_EMAIL) # First case: One subtopic mastery doesn't exist. response_json = self.get_json('%s' % feconf.SUBTOPIC_MASTERY_DATA_URL, params={ 'comma_separated_topic_ids': ','.join([topic_id_1, topic_id_2]) }) degrees_of_mastery_1 = { u'1': python_utils.divide( self.degree_of_mastery_1 + self.degree_of_mastery_2, 2) } degrees_of_mastery_2 = {u'2': self.degree_of_mastery_5} self.assertEqual(response_json['subtopic_mastery_dict'], { topic_id_1: degrees_of_mastery_1, topic_id_2: degrees_of_mastery_2 }) # Second case: One skill mastery doesn't exist. skill_services.create_user_skill_mastery(self.user_id, self.skill_id_3, self.degree_of_mastery_3) response_json = self.get_json('%s' % feconf.SUBTOPIC_MASTERY_DATA_URL, params={ 'comma_separated_topic_ids': ','.join([topic_id_1, topic_id_2]) }) degrees_of_mastery_1 = { u'1': python_utils.divide( self.degree_of_mastery_1 + self.degree_of_mastery_2, 2), u'2': self.degree_of_mastery_3 } degrees_of_mastery_2 = { u'1': self.degree_of_mastery_3, u'2': self.degree_of_mastery_5 } self.assertEqual(response_json['subtopic_mastery_dict'], { topic_id_1: degrees_of_mastery_1, topic_id_2: degrees_of_mastery_2 }) # Third case: All masteries exist. skill_services.create_user_skill_mastery(self.user_id, self.skill_id_4, self.degree_of_mastery_4) response_json = self.get_json('%s' % feconf.SUBTOPIC_MASTERY_DATA_URL, params={ 'comma_separated_topic_ids': ','.join([topic_id_1, topic_id_2]) }) degrees_of_mastery_1 = { u'1': python_utils.divide( self.degree_of_mastery_1 + self.degree_of_mastery_2, 2), u'2': self.degree_of_mastery_3 } degrees_of_mastery_2 = { u'1': python_utils.divide( self.degree_of_mastery_3 + self.degree_of_mastery_4, 2), u'2': self.degree_of_mastery_5 } self.assertEqual(response_json['subtopic_mastery_dict'], { topic_id_1: degrees_of_mastery_1, topic_id_2: degrees_of_mastery_2 }) self.logout()
def test_divide(self): self.assertEqual(python_utils.divide(4, 2), 2) self.assertEqual(python_utils.divide(5, 2), 2)
def get_question_skill_links_equidistributed_by_skill( cls, total_question_count: int, skill_ids: List[str] ) -> List[QuestionSkillLinkModel]: """Fetches the list of constant number of random QuestionSkillLinkModels linked to the skills. Args: total_question_count: int. The number of questions expected. skill_ids: list(str). The ids of skills for which the linked question ids are to be retrieved. Returns: list(QuestionSkillLinkModel). A list of random QuestionSkillLinkModels corresponding to given skill_ids, with total_question_count/len(skill_ids) number of questions for each skill. If not evenly divisible, it will be rounded up. If not enough questions for a skill, just return all questions it links to. """ if len(skill_ids) > feconf.MAX_NUMBER_OF_SKILL_IDS: raise Exception('Please keep the number of skill IDs below 20.') if not skill_ids: return [] question_count_per_skill = int( math.ceil( python_utils.divide( # type: ignore[no-untyped-call] float(total_question_count), float(len(skill_ids))))) question_skill_link_models = [] existing_question_ids = [] def get_offset(query: datastore_services.Query) -> int: """Helper function to get the offset.""" question_count = query.count() if question_count > 2 * question_count_per_skill: return utils.get_random_int( question_count - (question_count_per_skill * 2)) return 0 for skill_id in skill_ids: query = cls.query(cls.skill_id == skill_id) # We fetch more questions here in order to try and ensure that the # eventual number of returned questions is sufficient to meet the # number requested, even after deduplication. new_question_skill_link_models: List[QuestionSkillLinkModel] = list( query.fetch( limit=question_count_per_skill * 2, offset=get_offset(query) ) ) # Deduplicate if the same question is linked to multiple skills. for model in new_question_skill_link_models: if model.question_id in existing_question_ids: new_question_skill_link_models.remove(model) if len(new_question_skill_link_models) > question_count_per_skill: sampled_question_skill_link_models = random.sample( new_question_skill_link_models, question_count_per_skill ) else: sampled_question_skill_link_models = ( new_question_skill_link_models) question_skill_link_models.extend( sampled_question_skill_link_models) existing_question_ids.extend([ model.question_id for model in ( sampled_question_skill_link_models) ]) return question_skill_link_models
def get_question_skill_links_based_on_difficulty_equidistributed_by_skill( cls, total_question_count: int, skill_ids: List[str], difficulty_requested: float ) -> List[QuestionSkillLinkModel]: """Fetches the list of constant number of random QuestionSkillLinkModels linked to the skills, sorted by the absolute value of the difference between skill difficulty and the requested difficulty. Args: total_question_count: int. The number of questions expected. skill_ids: list(str). The ids of skills for which the linked question ids are to be retrieved. difficulty_requested: float. The skill difficulty of the questions requested to be fetched. Returns: list(QuestionSkillLinkModel). A list of random QuestionSkillLinkModels corresponding to given skill_ids, with total_question_count/len(skill_ids) number of questions for each skill. If not evenly divisible, it will be rounded up. If not enough questions for a skill, just return all questions it links to. """ if len(skill_ids) > feconf.MAX_NUMBER_OF_SKILL_IDS: raise Exception('Please keep the number of skill IDs below 20.') if (not skill_ids) or (total_question_count == 0): return [] question_count_per_skill = int( math.ceil(python_utils.divide( # type: ignore[no-untyped-call] float(total_question_count), float(len(skill_ids))))) question_skill_link_mapping = {} # For fetching the questions randomly we have used a random offset. # But this is a temporary solution since this method scales linearly. # Other alternative methods were: # 1) Using a random id in question id filter # 2) Adding an additional column that can be filtered upon. # But these methods are not viable because google datastore limits # each query to have at most one inequality filter. So we can't filter # on both question_id and difficulty. Please see # https://github.com/oppia/oppia/pull/9061#issuecomment-629765809 # for more details. def get_offset(query: datastore_services.Query) -> int: """Helper function to get the offset.""" question_count = query.count() if question_count > 2 * question_count_per_skill: return utils.get_random_int( question_count - (question_count_per_skill * 2)) return 0 for skill_id in skill_ids: query = cls.query(cls.skill_id == skill_id) equal_questions_query = query.filter( cls.skill_difficulty == difficulty_requested) # We fetch more questions here in order to try and ensure that the # eventual number of returned questions is sufficient to meet the # number requested, even after deduplication. new_question_skill_link_models: List[QuestionSkillLinkModel] = list( equal_questions_query.fetch( limit=question_count_per_skill * 2, offset=get_offset(equal_questions_query) ) ) for model in new_question_skill_link_models: if model.question_id in question_skill_link_mapping: new_question_skill_link_models.remove(model) if len(new_question_skill_link_models) >= question_count_per_skill: new_question_skill_link_models = random.sample( new_question_skill_link_models, question_count_per_skill) else: # Fetch QuestionSkillLinkModels with difficulty smaller than # requested difficulty. easier_questions_query = query.filter( cls.skill_difficulty < difficulty_requested) easier_question_skill_link_models: List[ QuestionSkillLinkModel ] = list( easier_questions_query.fetch( limit=question_count_per_skill * 2, offset=get_offset(easier_questions_query) ) ) for model in easier_question_skill_link_models: if model.question_id in question_skill_link_mapping: easier_question_skill_link_models.remove(model) question_extra_count = ( len(new_question_skill_link_models) + len(easier_question_skill_link_models) - question_count_per_skill) if question_extra_count >= 0: easier_question_skill_link_models = random.sample( easier_question_skill_link_models, question_count_per_skill - len(new_question_skill_link_models) ) new_question_skill_link_models.extend( easier_question_skill_link_models) else: # Fetch QuestionSkillLinkModels with difficulty larger than # requested difficulty. new_question_skill_link_models.extend( easier_question_skill_link_models) harder_questions_query = query.filter( cls.skill_difficulty > difficulty_requested) harder_question_skill_link_models: List[ QuestionSkillLinkModel ] = list( harder_questions_query.fetch( limit=question_count_per_skill * 2, offset=get_offset(harder_questions_query) ) ) for model in harder_question_skill_link_models: if model.question_id in question_skill_link_mapping: harder_question_skill_link_models.remove(model) question_extra_count = ( len(new_question_skill_link_models) + len(harder_question_skill_link_models) - question_count_per_skill) if question_extra_count >= 0: harder_question_skill_link_models = ( random.sample( harder_question_skill_link_models, question_count_per_skill - len(new_question_skill_link_models) )) new_question_skill_link_models.extend( harder_question_skill_link_models) new_question_skill_link_models = ( new_question_skill_link_models[:question_count_per_skill]) for model in new_question_skill_link_models: if model.question_id not in question_skill_link_mapping: question_skill_link_mapping[model.question_id] = model return list(question_skill_link_mapping.values())