Exemplo n.º 1
0
    def reduce(key, stringified_values):
        """Implements the reduce function.

        This function creates or updates the UserStatsModel instance for the
        given user. It updates the impact score, total plays of all
        explorations, number of ratings across all explorations and average
        rating.

        Args:
            key: str. The unique id of the user.
            stringified_values: list(str). A list of information regarding all
                the explorations that this user contributes to or owns. Each
                entry is a stringified dict having the following keys:
                    exploration_impact_score: float. The impact score of all the
                        explorations contributed to by the user.
                    total_plays_for_owned_exp: int. Total plays of all
                        explorations owned by the user.
                    average_rating_for_owned_exp: float. Average of average
                        ratings of all explorations owned by the user.
                    num_ratings_for_owned_exp: int. Total number of ratings of
                        all explorations owned by the user.
        """
        values = [ast.literal_eval(v) for v in stringified_values]
        exponent = python_utils.divide(2.0, 3)

        # Find the final score and round to a whole number.
        user_impact_score = int(
            python_utils.ROUND(
                sum(value['exploration_impact_score'] for value in values
                    if value.get('exploration_impact_score'))**exponent))

        # Sum up the total plays for all explorations.
        total_plays = sum(value['total_plays_for_owned_exp']
                          for value in values
                          if value.get('total_plays_for_owned_exp'))

        # Sum of ratings across all explorations.
        sum_of_ratings = 0
        # Number of ratings across all explorations.
        num_ratings = 0

        for value in values:
            if value.get('num_ratings_for_owned_exp'):
                num_ratings += value['num_ratings_for_owned_exp']
                sum_of_ratings += (value['average_rating_for_owned_exp'] *
                                   value['num_ratings_for_owned_exp'])

        mr_model = user_models.UserStatsModel.get_or_create(key)
        mr_model.impact_score = user_impact_score
        mr_model.total_plays = total_plays
        mr_model.num_ratings = num_ratings
        if sum_of_ratings != 0:
            average_ratings = python_utils.divide(sum_of_ratings,
                                                  float(num_ratings))
            mr_model.average_ratings = average_ratings
        mr_model.update_timestamps()
        mr_model.put()
Exemplo n.º 2
0
        def _round_average_ratings(rating):
            """Returns the rounded average rating to display on the creator
            dashboard.

            Args:
                rating: float. The rating of the lesson.

            Returns:
                float. The rounded average value of rating.
            """
            return python_utils.ROUND(
                rating, feconf.AVERAGE_RATINGS_DASHBOARD_PRECISION)
Exemplo n.º 3
0
    def test_standard_user_stats_calculation_one_exploration(self):
        exploration = self._create_exploration(self.EXP_ID_1, self.user_a_id)
        # Give this exploration an average rating of 4.
        avg_rating = 4
        self._rate_exploration(exploration.id, 5, avg_rating)

        # The expected answer count is the sum of the first hit counts in the
        # statistics defined in _get_mock_statistics() method above.
        expected_answer_count = 15
        reach = expected_answer_count**self.EXPONENT
        expected_user_impact_score = python_utils.ROUND(
            ((avg_rating - 2) * reach)**self.EXPONENT)

        # Verify that the impact score matches the expected.
        self._run_computation()
        user_stats_model = user_models.UserStatsModel.get(self.user_a_id)
        self.assertEqual(user_stats_model.impact_score,
                         expected_user_impact_score)
Exemplo n.º 4
0
    def test_standard_user_stats_calculation_multiple_explorations(self):
        exploration_1 = self._create_exploration(self.EXP_ID_1, self.user_a_id)
        exploration_2 = self._create_exploration(self.EXP_ID_2, self.user_a_id)
        avg_rating = 4
        self._rate_exploration(exploration_1.id, 2, avg_rating)
        self._rate_exploration(exploration_2.id, 2, avg_rating)

        # The expected answer count is the sum of the first hit counts in the
        # statistics defined in _get_mock_statistics() method above.
        expected_answer_count = 15
        reach = expected_answer_count**self.EXPONENT
        impact_per_exp = ((avg_rating - 2) * reach)  # * 1 for contribution
        expected_user_impact_score = python_utils.ROUND(
            (impact_per_exp * 2)**self.EXPONENT)

        # Verify that the impact score matches the expected.
        self._run_computation()
        user_stats_model = user_models.UserStatsModel.get(self.user_a_id)
        self.assertEqual(user_stats_model.impact_score,
                         expected_user_impact_score)