Example #1
0
    def test_update_widget_handlers(self):
        """Test updating of widget_handlers."""

        # We create a second state to use as a rule destination
        exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        exploration.add_states(['State 2'])
        exp_services._save_exploration(self.OWNER_ID, exploration, '', [])

        exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        self.widget_handlers['submit'][1]['dest'] = 'State 2'
        exp_services.update_exploration(
            self.OWNER_ID, self.EXP_ID,
            _get_change_list(
                self.init_state_name, 'widget_id', 'MultipleChoiceInput') +
            _get_change_list(
                self.init_state_name, 'widget_handlers', self.widget_handlers),
            '')

        exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        rule_specs = exploration.init_state.widget.handlers[0].rule_specs
        self.assertEqual(rule_specs[0].definition, {
            'rule_type': 'atomic',
            'name': 'Equals',
            'inputs': {'x': 0},
            'subject': 'answer'
        })
        self.assertEqual(rule_specs[0].feedback, ['Try again'])
        self.assertEqual(rule_specs[0].dest, self.init_state_name)
        self.assertEqual(rule_specs[1].dest, 'State 2')
Example #2
0
    def test_migration_job_skips_deleted_explorations(self):
        """Tests that the exploration migration job skips deleted explorations
        and does not attempt to migrate.
        """
        self.save_new_exp_with_states_schema_v0(
            self.NEW_EXP_ID, self.albert_id, self.EXP_TITLE)

        # Note: This creates a summary based on the upgraded model (which is
        # fine). A summary is needed to delete the exploration.
        exp_services.create_exploration_summary(
            self.NEW_EXP_ID, None)

        # Delete the exploration before migration occurs.
        exp_services.delete_exploration(self.albert_id, self.NEW_EXP_ID)

        # Ensure the exploration is deleted.
        with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
            exp_services.get_exploration_by_id(self.NEW_EXP_ID)

        # Start migration job on sample exploration.
        job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
        exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)

        # This running without errors indicates the deleted exploration is
        # being ignored, since otherwise exp_services.get_exploration_by_id
        # (used within the job) will raise an error.
        self.process_and_flush_pending_tasks()

        # Ensure the exploration is still deleted.
        with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
            exp_services.get_exploration_by_id(self.NEW_EXP_ID)
Example #3
0
    def get(self, exploration_id, exploration_version):
        """Handles GET requests."""
        try:
            exp_services.get_exploration_by_id(exploration_id)
        except:
            raise self.PageNotFoundException

        self.render_json(stats_services.get_exploration_stats(
            exploration_id, exploration_version))
    def test_two_state_default_hit(self):
        exp = exp_services.get_exploration_by_id(exp_services.create_new(
            '*****@*****.**', 'exploration', 'category', 'eid'))
        SECOND_STATE = 'State 2'
        exp_services.add_state('*****@*****.**', exp.id, SECOND_STATE)
        exp = exp_services.get_exploration_by_id('eid')
        state1_id = exp.init_state_id
        second_state = exp_services.get_state_by_name('eid', SECOND_STATE)
        state2_id = second_state.id

        # Hit the default rule of state 1 once, and the default rule of state 2
        # twice.
        stats_services.EventHandler.record_state_hit('eid', state1_id, True)
        stats_services.EventHandler.record_answer_submitted(
            'eid', state1_id, self.SUBMIT_HANDLER,
            self.DEFAULT_RULESPEC_STR, '1')

        for i in range(2):
            stats_services.EventHandler.record_state_hit(
                'eid', state2_id, True)
            stats_services.EventHandler.record_answer_submitted(
                'eid', state2_id, self.SUBMIT_HANDLER,
                self.DEFAULT_RULESPEC_STR, '1')

        states = stats_services.get_top_improvable_states(['eid'], 5)
        self.assertEquals(len(states), 2)
        self.assertEquals(states[0]['rank'], 2)
        self.assertEquals(states[0]['type'], 'default')
        self.assertEquals(states[0]['state_id'], state2_id)
        self.assertEquals(states[1]['rank'], 1)
        self.assertEquals(states[1]['type'], 'default')
        self.assertEquals(states[1]['state_id'], state1_id)

        # Hit the default rule of state 1 two more times.

        for i in range(2):
            stats_services.EventHandler.record_state_hit(
                'eid', state1_id, True)
            stats_services.EventHandler.record_answer_submitted(
                'eid', state1_id, self.SUBMIT_HANDLER,
                self.DEFAULT_RULESPEC_STR, '1')

        states = stats_services.get_top_improvable_states(['eid'], 5)
        self.assertEquals(len(states), 2)
        self.assertEquals(states[0]['rank'], 3)
        self.assertEquals(states[0]['type'], 'default')
        self.assertEquals(states[0]['state_id'], state1_id)
        self.assertEquals(states[1]['rank'], 2)
        self.assertEquals(states[1]['type'], 'default')
        self.assertEquals(states[1]['state_id'], state2_id)

        # Try getting just the top improvable state.
        states = stats_services.get_top_improvable_states(['eid'], 1)
        self.assertEquals(len(states), 1)
        self.assertEquals(states[0]['rank'], 3)
        self.assertEquals(states[0]['type'], 'default')
        self.assertEquals(states[0]['state_id'], state1_id)
Example #5
0
def assign_rating_to_exploration(user_id, exploration_id, new_rating):
    """Records the rating awarded by the user to the exploration in both the
    user-specific data and exploration summary.

    This function validates the exploration id but not the user id.

    Args:
        user_id: str. The id of the user assigning the rating.
        exploration_id: str. The id of the exploration that is
            assigned a rating.
        new_rating: int. Value of assigned rating, should be between
            1 and 5 inclusive.
    """

    if not isinstance(new_rating, int):
        raise ValueError(
            'Expected the rating to be an integer, received %s' % new_rating)

    if new_rating not in ALLOWED_RATINGS:
        raise ValueError('Expected a rating 1-5, received %s.' % new_rating)

    try:
        exp_services.get_exploration_by_id(exploration_id)
    except:
        raise Exception('Invalid exploration id %s' % exploration_id)

    def _update_user_rating():
        exp_user_data_model = user_models.ExplorationUserDataModel.get(
            user_id, exploration_id)
        if exp_user_data_model:
            old_rating = exp_user_data_model.rating
        else:
            old_rating = None
            exp_user_data_model = user_models.ExplorationUserDataModel.create(
                user_id, exploration_id)
        exp_user_data_model.rating = new_rating
        exp_user_data_model.rated_on = datetime.datetime.utcnow()
        exp_user_data_model.put()
        return old_rating
    old_rating = transaction_services.run_in_transaction(_update_user_rating)

    exploration_summary = exp_services.get_exploration_summary_by_id(
        exploration_id)
    if not exploration_summary.ratings:
        exploration_summary.ratings = feconf.get_empty_ratings()
    exploration_summary.ratings[str(new_rating)] += 1
    if old_rating:
        exploration_summary.ratings[str(old_rating)] -= 1

    event_services.RateExplorationEventHandler.record(
        exploration_id, user_id, new_rating, old_rating)

    exploration_summary.scaled_average_rating = (
        exp_services.get_scaled_average_rating(
            exploration_summary.ratings))

    exp_services.save_exploration_summary(exploration_summary)
    def test_version_number_changes_only_after_exp_publication(self):
        USER_ID = 'user_id'
        EXP_ID = 'exp_id'

        exp_services.create_new(USER_ID, 'A title', 'A category', EXP_ID)

        exploration_model = exp_models.ExplorationModel.get(EXP_ID)
        self.assertEqual(exploration_model.version, 0)
        self.assertEqual(exploration_model.title, 'A title')

        snapshot_id = self.get_snapshot_id(EXP_ID, 0)
        snapshot_model = exp_models.ExplorationSnapshotModel.get(
            snapshot_id, strict=False)
        self.assertIsNone(snapshot_model)

        # The exploration is not public, so new versions are not created.
        exploration = exp_services.get_exploration_by_id(EXP_ID)
        exploration.title = 'New title'
        exp_services.save_exploration(USER_ID, exploration)

        exploration_model = exp_models.ExplorationModel.get(EXP_ID)
        self.assertEqual(exploration_model.version, 0)
        self.assertEqual(exploration_model.title, 'New title')

        snapshot_id = self.get_snapshot_id(EXP_ID, 0)
        snapshot_model = exp_models.ExplorationSnapshotModel.get(
            snapshot_id, strict=False)
        self.assertIsNone(snapshot_model)

        snapshot_id = self.get_snapshot_id(EXP_ID, 1)
        snapshot_model = exp_models.ExplorationSnapshotModel.get(
            snapshot_id, strict=False)
        self.assertIsNone(snapshot_model)

        # The exploration is made public, so a new version is created.
        exploration = exp_services.get_exploration_by_id(EXP_ID)
        exploration.title = 'Newer title'
        exploration.is_public = True
        exp_services.save_exploration(USER_ID, exploration)

        exploration_model = exp_models.ExplorationModel.get(EXP_ID)
        self.assertEqual(exploration_model.version, 1)
        self.assertEqual(exploration_model.title, 'Newer title')

        snapshot_id = self.get_snapshot_id(EXP_ID, 0)
        snapshot_model = exp_models.ExplorationSnapshotModel.get(
            snapshot_id, strict=False)
        self.assertIsNone(snapshot_model)

        snapshot_id = self.get_snapshot_id(EXP_ID, 1)
        snapshot_content_model = exp_models.ExplorationSnapshotContentModel.get(
            snapshot_id, strict=False)
        self.assertIsNotNone(snapshot_content_model)
        self.assertIsNotNone(snapshot_content_model.content)
        self.assertEqual(snapshot_content_model.format, 'full')
Example #7
0
    def test_migration_job_skips_deleted_explorations(self):
        """Tests that the exploration migration job skips deleted explorations
        and does not attempt to migrate.
        """
        # Save new default exploration with a default version 0 states
        # dictionary.
        exp_model = exp_models.ExplorationModel(
            id=self.NEW_EXP_ID,
            category='category',
            title='title',
            objective='',
            language_code='en',
            tags=[],
            blurb='',
            author_notes='',
            default_skin='conversation_v1',
            skin_customizations={'panels_contents': {}},
            states_schema_version=0,
            init_state_name=feconf.DEFAULT_INIT_STATE_NAME,
            states=self.VERSION_0_STATES_DICT,
            param_specs={},
            param_changes=[]
        )
        rights_manager.create_new_exploration_rights(
            self.NEW_EXP_ID, self.ALBERT_ID)
        exp_model.commit(self.ALBERT_ID, 'old commit', [{
            'cmd': 'create_new',
            'title': 'title',
            'category': 'category',
        }])

        # Note: This creates a summary based on the upgraded model (which is
        # fine). A summary is needed to delete the exploration.
        exp_services.create_exploration_summary(self.NEW_EXP_ID)

        # Delete the exploration before migration occurs.
        exp_services.delete_exploration(self.ALBERT_ID, self.NEW_EXP_ID)

        # Ensure the exploration is deleted.
        with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
            exp_services.get_exploration_by_id(self.NEW_EXP_ID)

        # Start migration job on sample exploration.
        job_id = exp_jobs.ExplorationMigrationJobManager.create_new()
        exp_jobs.ExplorationMigrationJobManager.enqueue(job_id)

        # This running without errors indicates the deleted exploration is being
        # ignored, since otherwise exp_services.get_exploration_by_id (used
        # within the job) will raise an error.
        self.process_and_flush_pending_tasks()

        # Ensure the exploration is still deleted.
        with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
            exp_services.get_exploration_by_id(self.NEW_EXP_ID)
Example #8
0
    def test_retrieval_of_explorations(self):
        """Test the get_exploration_by_id_by_id() method."""
        with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
            exp_services.get_exploration_by_id('fake_eid')

        exploration = self.save_new_default_exploration(self.EXP_ID)
        retrieved_exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        self.assertEqual(exploration.id, retrieved_exploration.id)
        self.assertEqual(exploration.title, retrieved_exploration.title)

        with self.assertRaises(Exception):
            exp_services.get_exploration_by_id('fake_exploration')
Example #9
0
    def test_update_state_name(self):
        """Test updating of state name."""
        exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        exp_services.update_exploration(self.OWNER_ID, self.EXP_ID, [{
            'cmd': 'rename_state',
            'old_state_name': '(untitled state)',
            'new_state_name': 'new name',
        }], 'Change state name')

        exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        self.assertIn('new name', exploration.states)
        self.assertNotIn('(untitled state)', exploration.states)
Example #10
0
    def put(self, exploration_id):
        """Updates properties of the given exploration."""

        exploration = exp_services.get_exploration_by_id(exploration_id)
        version = self.payload['version']
        if version != exploration.version:
            raise Exception(
                'Trying to update version %s of exploration from version %s, '
                'which is too old. Please reload the page and try again.'
                % (exploration.version, version))

        is_public = self.payload.get('is_public')
        category = self.payload.get('category')
        title = self.payload.get('title')
        image_id = self.payload.get('image_id')
        editors = self.payload.get('editors')
        param_specs = self.payload.get('param_specs')
        param_changes = self.payload.get('param_changes')

        if is_public:
            exploration.is_public = True
        if category:
            exploration.category = category
        if title:
            exploration.title = title
        if 'image_id' in self.payload:
            exploration.image_id = None if image_id == 'null' else image_id
        if editors:
            if (exploration.editor_ids and
                    self.user_id == exploration.editor_ids[0]):
                exploration.editor_ids = []
                for email in editors:
                    exploration.add_editor(email)
            else:
                raise self.UnauthorizedUserException(
                    'Only the exploration owner can add new collaborators.')
        if param_specs is not None:
            exploration.param_specs = {
                ps_name: param_domain.ParamSpec.from_dict(ps_val)
                for (ps_name, ps_val) in param_specs.iteritems()
            }
        if param_changes is not None:
            exploration.param_changes = [
                param_domain.ParamChange.from_dict(param_change)
                for param_change in param_changes
            ]

        exp_services.save_exploration(self.user_id, exploration)

        exploration = exp_services.get_exploration_by_id(exploration_id)
        self.render_json({
            'version': exploration.version
        })
Example #11
0
    def test_update_state_name_with_unicode(self):
        """Test updating of state name to one that uses unicode characters."""
        exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        exp_services.update_exploration(self.OWNER_ID, self.EXP_ID, [{
            'cmd': 'rename_state',
            'old_state_name': '(untitled state)',
            'new_state_name': u'¡Hola! αβγ',
        }], 'Change state name')

        exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        self.assertIn(u'¡Hola! αβγ', exploration.states)
        self.assertNotIn('(untitled state)', exploration.states)
Example #12
0
    def test_versioning_with_reverting(self):
        exploration = self.save_new_valid_exploration(
            self.EXP_ID, self.OWNER_ID)

        # In version 1, the title was 'A title'.
        # In version 2, the title becomes 'V2 title'.
        exploration.title = 'V2 title'
        exp_services._save_exploration(
            self.OWNER_ID, exploration, 'Changed title.', [])

        # In version 3, a new state is added.
        exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        exploration.add_states(['New state'])
        exp_services._save_exploration(
            'committer_id_v3', exploration, 'Added new state', [])

        # It is not possible to revert from anything other than the most
        # current version.
        with self.assertRaisesRegexp(Exception, 'too old'):
            exp_services.revert_exploration(
                'committer_id_v4', self.EXP_ID, 2, 1)

        # Version 4 is a reversion to version 1.
        exp_services.revert_exploration('committer_id_v4', self.EXP_ID, 3, 1)
        exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        self.assertEqual(exploration.title, 'A title')
        self.assertEqual(len(exploration.states), 1)
        self.assertEqual(exploration.version, 4)

        snapshots_metadata = exp_services.get_exploration_snapshots_metadata(
            self.EXP_ID, 5)

        commit_dict_4 = {
            'committer_id': 'committer_id_v4',
            'commit_message': 'Reverted exploration to version 1',
            'version_number': 4,
        }
        commit_dict_3 = {
            'committer_id': 'committer_id_v3',
            'commit_message': 'Added new state',
            'version_number': 3,
        }
        self.assertEqual(len(snapshots_metadata), 4)
        self.assertDictContainsSubset(
            commit_dict_4, snapshots_metadata[0])
        self.assertDictContainsSubset(commit_dict_3, snapshots_metadata[1])
        self.assertGreaterEqual(
            snapshots_metadata[0]['created_on'],
            snapshots_metadata[1]['created_on'])
Example #13
0
    def test_update_widget_sticky(self):
        """Test updating of widget_sticky."""
        exp_services.update_exploration(
            self.OWNER_ID, self.EXP_ID, _get_change_list(
                self.init_state_name, 'widget_sticky', False), '')

        exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        self.assertEqual(exploration.init_state.widget.sticky, False)

        exp_services.update_exploration(
            self.OWNER_ID, self.EXP_ID, _get_change_list(
                self.init_state_name, 'widget_sticky', True), '')

        exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        self.assertEqual(exploration.init_state.widget.sticky, True)
Example #14
0
    def get(self, exploration_id):
        """Gets the data for the exploration overview page."""
        exploration = exp_services.get_exploration_by_id(exploration_id)

        state_list = {}
        for state_id in exploration.state_ids:
            state_list[state_id] = exp_services.export_state_to_verbose_dict(
                exploration_id, state_id)

        self.values.update({
            'exploration_id': exploration_id,
            'init_state_id': exploration.init_state_id,
            'is_public': exploration.is_public,
            'image_id': exploration.image_id,
            'category': exploration.category,
            'title': exploration.title,
            'editors': exploration.editor_ids,
            'states': state_list,
            'param_changes': exploration.param_change_dicts,
            'param_specs': exploration.param_specs_dict,
            'version': exploration.version,
            # Add information about the most recent versions.
            'snapshots': exp_services.get_exploration_snapshots_metadata(
                exploration_id, DEFAULT_NUM_SNAPSHOTS),
            # Add information for the exploration statistics page.
            'num_visits': stats_services.get_exploration_visit_count(
                exploration_id),
            'num_completions': stats_services.get_exploration_completed_count(
                exploration_id),
            'state_stats': stats_services.get_state_stats_for_exploration(
                exploration_id),
            'imp': stats_services.get_top_improvable_states(
                [exploration_id], 10),
        })
        self.render_json(self.values)
Example #15
0
def get_state_improvements(exploration_id, exploration_version):
    """Returns a list of dicts, each representing a suggestion for improvement
    to a particular state.
    """
    ranked_states = []

    exploration = exp_services.get_exploration_by_id(exploration_id)
    state_names = exploration.states.keys()

    default_rule_answer_logs = stats_domain.StateRuleAnswerLog.get_multi(
        exploration_id, [{
            'state_name': state_name,
            'rule_str': exp_domain.DEFAULT_RULESPEC_STR
        } for state_name in state_names])

    statistics = stats_jobs.StatisticsAggregator.get_statistics(
        exploration_id, exploration_version)
    state_hit_counts = statistics['state_hit_counts']

    for ind, state_name in enumerate(state_names):
        total_entry_count = 0
        no_answer_submitted_count = 0
        if state_name in state_hit_counts:
            total_entry_count = (
                state_hit_counts[state_name]['total_entry_count'])
            no_answer_submitted_count = state_hit_counts[state_name].get(
                'no_answer_count', 0)

        if total_entry_count == 0:
            continue

        threshold = 0.2 * total_entry_count
        default_rule_answer_log = default_rule_answer_logs[ind]
        default_count = default_rule_answer_log.total_answer_count

        eligible_flags = []
        state = exploration.states[state_name]
        if (default_count > threshold and
                state.interaction.default_outcome is not None and
                state.interaction.default_outcome.dest == state_name):
            eligible_flags.append({
                'rank': default_count,
                'improve_type': IMPROVE_TYPE_DEFAULT})
        if no_answer_submitted_count > threshold:
            eligible_flags.append({
                'rank': no_answer_submitted_count,
                'improve_type': IMPROVE_TYPE_INCOMPLETE})

        if eligible_flags:
            eligible_flags = sorted(
                eligible_flags, key=lambda flag: flag['rank'], reverse=True)
            ranked_states.append({
                'rank': eligible_flags[0]['rank'],
                'state_name': state_name,
                'type': eligible_flags[0]['improve_type'],
            })

    return sorted(
        [state for state in ranked_states if state['rank'] != 0],
        key=lambda x: -x['rank'])
Example #16
0
    def post(self, exploration_id):
        """Handles POST requests."""
        change_list = self.payload.get('change_list')
        version = self.payload.get('version')
        current_exploration = exp_services.get_exploration_by_id(
            exploration_id)

        if version != current_exploration.version:
            # TODO(sll): Improve the handling of merge conflicts.
            self.render_json({
                'is_version_of_draft_valid': False
            })
        else:
            utils.recursively_remove_key(change_list, '$$hashKey')

            summary = exp_services.get_summary_of_change_list(
                current_exploration, change_list)
            updated_exploration = exp_services.apply_change_list(
                exploration_id, change_list)
            warning_message = ''
            try:
                updated_exploration.validate(strict=True)
            except utils.ValidationError as e:
                warning_message = unicode(e)

            self.render_json({
                'summary': summary,
                'warning_message': warning_message
            })
Example #17
0
    def post(self, exploration_id):
        old_state_name = self.payload.get('old_state_name')
        # The reader's answer.
        answer = self.payload.get('answer')
        # Parameters associated with the learner.
        old_params = self.payload.get('params', {})
        old_params['answer'] = answer
        # The version of the exploration.
        version = self.payload.get('version')
        rule_spec_string = self.payload.get('rule_spec_string')

        exploration = exp_services.get_exploration_by_id(
            exploration_id, version=version)
        exp_param_specs = exploration.param_specs
        old_interaction = exploration.states[old_state_name].interaction

        old_interaction_instance = (
            interaction_registry.Registry.get_interaction_by_id(
                old_interaction.id))
        normalized_answer = old_interaction_instance.normalize_answer(answer)
        # TODO(sll): Should this also depend on `params`?
        event_services.AnswerSubmissionEventHandler.record(
            exploration_id, version, old_state_name, rule_spec_string,
            old_interaction_instance.get_stats_log_html(
                old_interaction.customization_args, normalized_answer))
        self.render_json({})
    def test_resolving_answers(self):
        exp = exp_services.get_exploration_by_id(exp_services.create_new(
            'user_id', 'exploration', 'category', 'eid'))
        state_id = exp.init_state_id

        answer_log = stats_domain.StateRuleAnswerLog.get(
            'eid', state_id, self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
        self.assertEquals(answer_log.answers, {})

        stats_services.EventHandler.record_answer_submitted(
            'eid', state_id, self.SUBMIT_HANDLER,
            self.DEFAULT_RULESPEC_STR, 'answer1')
        stats_services.EventHandler.record_answer_submitted(
            'eid', state_id, self.SUBMIT_HANDLER,
            self.DEFAULT_RULESPEC_STR, 'answer1')
        stats_services.EventHandler.record_answer_submitted(
            'eid', state_id, self.SUBMIT_HANDLER,
            self.DEFAULT_RULESPEC_STR, 'answer2')

        answer_log = stats_domain.StateRuleAnswerLog.get(
            'eid', state_id, self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
        self.assertEquals(answer_log.answers, {'answer1': 2, 'answer2': 1})
        self.assertEquals(answer_log.total_answer_count, 3)

        stats_services.EventHandler.resolve_answers_for_default_rule(
            'eid', state_id, self.SUBMIT_HANDLER, ['answer1'])

        answer_log = stats_domain.StateRuleAnswerLog.get(
            'eid', state_id, self.SUBMIT_HANDLER, self.DEFAULT_RULESPEC_STR)
        self.assertEquals(answer_log.answers, {'answer2': 1})
        self.assertEquals(answer_log.total_answer_count, 1)
Example #19
0
    def get(self, exploration_id):
        """Handles GET requests."""
        try:
            exploration = exp_services.get_exploration_by_id(exploration_id)
        except:
            raise self.PageNotFoundException

        if not rights_manager.Actor(self.user_id).can_view(
                rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id):
            raise self.PageNotFoundException

        version = self.request.get('v', default_value=exploration.version)
        output_format = self.request.get('output_format', default_value='zip')
        width = int(self.request.get('width', default_value=80))

        # If the title of the exploration has changed, we use the new title
        filename = 'oppia-%s-v%s' % (
            utils.to_ascii(exploration.title.replace(' ', '')), version)

        if output_format == feconf.OUTPUT_FORMAT_ZIP:
            self.response.headers['Content-Type'] = 'text/plain'
            self.response.headers['Content-Disposition'] = (
                'attachment; filename=%s.zip' % str(filename))
            self.response.write(
                exp_services.export_to_zip_file(exploration_id, version))
        elif output_format == feconf.OUTPUT_FORMAT_JSON:
            self.render_json(exp_services.export_states_to_yaml(
                exploration_id, version=version, width=width))
        else:
            raise self.InvalidInputException(
                'Unrecognized output format %s' % output_format)
Example #20
0
def get_state_rules_stats(exploration_id, state_name):
    """Gets statistics for the handlers and rules of this state.

    Returns:
        A dict, keyed by the string '{HANDLER_NAME}.{RULE_STR}', whose
        values are the corresponding stats_domain.StateRuleAnswerLog
        instances.
    """
    exploration = exp_services.get_exploration_by_id(exploration_id)
    state = exploration.states[state_name]

    rule_keys = []
    for handler in state.widget.handlers:
        for rule in handler.rule_specs:
            rule_keys.append((handler.name, str(rule)))

    answer_logs = stats_domain.StateRuleAnswerLog.get_multi(
        exploration_id, [{
            'state_name': state_name,
            'handler_name': rule_key[0],
            'rule_str': rule_key[1]
        } for rule_key in rule_keys])

    results = {}
    for ind, answer_log in enumerate(answer_logs):
        results['.'.join(rule_keys[ind])] = {
            'answers': answer_log.get_top_answers(5),
            'rule_hits': answer_log.total_answer_count
        }

    return results
Example #21
0
    def get(self, exploration_id):
        """Populates the data on the individual exploration page."""
        version = self.request.get('v')
        version = int(version) if version else None

        try:
            exploration = exp_services.get_exploration_by_id(
                exploration_id, version=version)
        except Exception as e:
            raise self.PageNotFoundException(e)

        info_card_color = (
            feconf.CATEGORIES_TO_COLORS[exploration.category] if
            exploration.category in feconf.CATEGORIES_TO_COLORS else
            feconf.DEFAULT_COLOR)

        self.values.update({
            'can_edit': (
                self.user_id and
                rights_manager.Actor(self.user_id).can_edit(
                    rights_manager.ACTIVITY_TYPE_EXPLORATION, exploration_id)),
            'exploration': exploration.to_player_dict(),
            'info_card_image_url': (
                '/images/gallery/exploration_background_%s_large.png' %
                info_card_color),
            'is_logged_in': bool(self.user_id),
            'session_id': utils.generate_random_string(24),
            'version': exploration.version,
        })
        self.render_json(self.values)
Example #22
0
    def test_update_param_changes(self):
        """Test updating of param_changes."""
        exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        exploration.param_specs = {'myParam': param_domain.ParamSpec('Int')}
        exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
        exp_services.update_exploration(
            self.OWNER_ID, self.EXP_ID, _get_change_list(
                self.init_state_name, 'param_changes', self.param_changes), '')

        exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        param_changes = exploration.init_state.param_changes[0]
        self.assertEqual(param_changes._name, 'myParam')
        self.assertEqual(param_changes._generator_id, 'RandomSelector')
        self.assertEqual(
            param_changes._customization_args,
            {'list_of_values': ['1', '2'], 'parse_with_jinja': False})
Example #23
0
def get_exploration_stats(exploration_id, exploration_version):
    """Returns a dict with state statistics for the given exploration id.

    Note that exploration_version should be a string.
    """
    exploration = exp_services.get_exploration_by_id(exploration_id)
    exp_stats = stats_jobs_continuous.StatisticsAggregator.get_statistics(
        exploration_id, exploration_version)

    last_updated = exp_stats['last_updated']
    state_hit_counts = exp_stats['state_hit_counts']

    return {
        'improvements': get_state_improvements(
            exploration_id, exploration_version),
        'last_updated': last_updated,
        'num_completions': exp_stats['complete_exploration_count'],
        'num_starts': exp_stats['start_exploration_count'],
        'state_stats': {
            state_name: {
                'name': state_name,
                'firstEntryCount': (
                    state_hit_counts[state_name]['first_entry_count']
                    if state_name in state_hit_counts else 0),
                'totalEntryCount': (
                    state_hit_counts[state_name]['total_entry_count']
                    if state_name in state_hit_counts else 0),
            } for state_name in exploration.states
        },
    }
Example #24
0
    def test_basic_computation(self):
        with self.swap(
                jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
                self.ALL_CONTINUOUS_COMPUTATION_MANAGERS_FOR_TESTS):
            EXP_ID = 'eid'
            EXP_TITLE = 'Title'
            USER_ID = 'user_id'

            self.save_new_valid_exploration(
                EXP_ID, USER_ID, title=EXP_TITLE, category='Category')
            expected_last_updated_ms = utils.get_time_in_millisecs(
                exp_services.get_exploration_by_id(EXP_ID).last_updated)

            ModifiedRecentUpdatesAggregator.start_computation()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    queue_name=taskqueue_services.QUEUE_NAME_DEFAULT),
                1)
            self.process_and_flush_pending_tasks()

            self.assertEqual(
                ModifiedRecentUpdatesAggregator.get_recent_notifications(
                    USER_ID)[1],
                [self._get_expected_exploration_created_dict(
                    USER_ID, EXP_ID, EXP_TITLE, expected_last_updated_ms)])
Example #25
0
    def map(item):
        if item.deleted:
            return

        # Do not upgrade explorations that fail non-strict validation.
        old_exploration = exp_services.get_exploration_by_id(item.id)
        try:
            old_exploration.validate()
        except Exception as e:
            logging.error(
                'Exploration %s failed non-strict validation: %s' %
                (item.id, e))
            return

        # If the exploration model being stored in the datastore is not the
        # most up-to-date states schema version, then update it.
        if (item.states_schema_version !=
                feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION):
            # Note: update_exploration does not need to apply a change list in
            # order to perform a migration. See the related comment in
            # exp_services.apply_change_list for more information.
            commit_cmds = [{
                'cmd': exp_domain.CMD_MIGRATE_STATES_SCHEMA_TO_LATEST_VERSION,
                'from_version': str(item.states_schema_version),
                'to_version': str(
                    feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
            }]
            exp_services.update_exploration(
                feconf.MIGRATION_BOT_USERNAME, item.id, commit_cmds,
                'Update exploration states from schema version %d to %d.' % (
                    item.states_schema_version,
                    feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION))
Example #26
0
    def test_state_download_handler_for_default_exploration(self):

        self.login(self.EDITOR_EMAIL)
        self.OWNER_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)

        # Create a simple exploration
        EXP_ID = 'eid'
        self.save_new_valid_exploration(
            EXP_ID, self.OWNER_ID,
            title='The title for states download handler test!',
            category='This is just a test category')

        exploration = exp_services.get_exploration_by_id(EXP_ID)
        exploration.add_states(['State A', 'State 2', 'State 3'])
        exploration.states['State A'].update_interaction_id('TextInput')
        exploration.states['State 2'].update_interaction_id('TextInput')
        exploration.states['State 3'].update_interaction_id('TextInput')
        exploration.rename_state('State 2', 'State B')
        exploration.delete_state('State 3')
        exp_services._save_exploration(self.OWNER_ID, exploration, '', [])
        response = self.testapp.get('/create/%s' % EXP_ID)

        # Check download state as YAML string
        self.maxDiff = None
        state_name = 'State%20A'
        EXPLORATION_DOWNLOAD_URL = (
            '/createhandler/download_state/%s?state=%s&width=50' %
            (EXP_ID, state_name))
        response = self.testapp.get(EXPLORATION_DOWNLOAD_URL)
        self.assertEqual(self.SAMPLE_STATE_STRING, response.body)

        self.logout()
Example #27
0
def get_state_rules_stats(exploration_id, state_name):
    """Gets statistics for the answer groups and rules of this state.

    Returns:
        A dict, keyed by the string '{HANDLER_NAME}.{RULE_STR}', whose
        values are the corresponding stats_domain.StateRuleAnswerLog
        instances.
    """
    exploration = exp_services.get_exploration_by_id(exploration_id)
    state = exploration.states[state_name]

    rule_keys = []
    for group in state.interaction.answer_groups:
        for rule in group.rule_specs:
            rule_keys.append((
                _OLD_SUBMIT_HANDLER_NAME, rule.stringify_classified_rule()))

    if state.interaction.default_outcome:
        rule_keys.append((
            _OLD_SUBMIT_HANDLER_NAME, exp_domain.DEFAULT_RULESPEC_STR))

    answer_logs = stats_domain.StateRuleAnswerLog.get_multi(
        exploration_id, [{
            'state_name': state_name,
            'rule_str': rule_key[1]
        } for rule_key in rule_keys])

    results = {}
    for ind, answer_log in enumerate(answer_logs):
        results['.'.join(rule_keys[ind])] = {
            'answers': answer_log.get_top_answers(5),
            'rule_hits': answer_log.total_answer_count
        }

    return results
Example #28
0
    def test_loading_and_validation_and_deletion_of_demo_explorations(self):
        """Test loading, validation and deletion of the demo explorations."""
        self.assertEqual(exp_services.count_explorations(), 0)

        self.assertGreaterEqual(
            len(feconf.DEMO_EXPLORATIONS), 1,
            msg='There must be at least one demo exploration.')

        for ind in range(len(feconf.DEMO_EXPLORATIONS)):
            start_time = datetime.datetime.utcnow()

            exp_id = str(ind)
            exp_services.load_demo(exp_id)
            exploration = exp_services.get_exploration_by_id(exp_id)
            warnings = exploration.validate(strict=True)
            if warnings:
                raise Exception(warnings)

            duration = datetime.datetime.utcnow() - start_time
            processing_time = duration.seconds + duration.microseconds / 1E6
            print 'Loaded and validated exploration %s (%.2f seconds)' % (
                exploration.title.encode('utf-8'), processing_time)

        self.assertEqual(
            exp_services.count_explorations(), len(feconf.DEMO_EXPLORATIONS))

        for ind in range(len(feconf.DEMO_EXPLORATIONS)):
            exp_services.delete_demo(str(ind))
        self.assertEqual(exp_services.count_explorations(), 0)
Example #29
0
    def test_state_download_handler_for_default_exploration(self):

        self.login(self.EDITOR_EMAIL)
        owner_id = self.get_user_id_from_email(self.EDITOR_EMAIL)

        # Create a simple exploration
        exp_id = 'eid'
        self.save_new_valid_exploration(
            exp_id, owner_id,
            title='The title for states download handler test!',
            category='This is just a test category')

        exploration = exp_services.get_exploration_by_id(exp_id)
        exploration.add_states(['State A', 'State 2', 'State 3'])
        exploration.states['State A'].update_interaction_id('TextInput')
        exploration.states['State 2'].update_interaction_id('TextInput')
        exploration.states['State 3'].update_interaction_id('TextInput')
        exploration.rename_state('State 2', 'State B')
        exploration.delete_state('State 3')
        exp_services._save_exploration(  # pylint: disable=protected-access
            owner_id, exploration, '', [])
        response = self.testapp.get('/create/%s' % exp_id)

        # Check download state as YAML string
        self.maxDiff = None
        state_name = 'State%20A'
        download_url = (
            '/createhandler/download_state/%s?state=%s&width=50' %
            (exp_id, state_name))
        response = self.testapp.get(download_url)
        self.assertEqual(self.SAMPLE_STATE_STRING, response.body)

        self.logout()
Example #30
0
    def test_migration_job_does_not_convert_up_to_date_exp(self):
        """Tests that the exploration migration job does not convert an
        exploration that is already the latest states schema version.
        """
        # Create a new, default exploration that should not be affected by the
        # job.
        exploration = exp_domain.Exploration.create_default_exploration(
            self.VALID_EXP_ID, 'title', 'category')
        init_state = exploration.states[exploration.init_state_name]
        init_state.update_interaction_id('EndExploration')
        init_state.interaction.default_outcome = None
        exp_services.save_new_exploration(self.albert_id, exploration)
        self.assertEqual(
            exploration.states_schema_version,
            feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
        yaml_before_migration = exploration.to_yaml()

        # Start migration job on sample exploration.
        job_id = exp_jobs_one_off.ExplorationMigrationJobManager.create_new()
        exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
        self.process_and_flush_pending_tasks()

        # Verify the exploration is exactly the same after migration.
        updated_exp = exp_services.get_exploration_by_id(self.VALID_EXP_ID)
        self.assertEqual(
            updated_exp.states_schema_version,
            feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
        after_converted_yaml = updated_exp.to_yaml()
        self.assertEqual(after_converted_yaml, yaml_before_migration)
Example #31
0
    def test_creation_of_jobs_and_mappings(self):
        """Test the handle_trainable_states method and
        handle_non_retrainable_states method by triggering
        update_exploration() method.
        """
        exploration = exp_services.get_exploration_by_id(self.exp_id)
        state = exploration.states['Home']

        # There is one job and one mapping in the data store now as a result of
        # creating the exploration.
        all_jobs = classifier_models.ClassifierTrainingJobModel.get_all()
        self.assertEqual(all_jobs.count(), 1)
        all_mappings = (
            classifier_models.TrainingJobExplorationMappingModel.get_all())
        self.assertEqual(all_mappings.count(), 1)

        # Modify such that job creation is triggered.
        new_answer_group = copy.deepcopy(state.interaction.answer_groups[1])
        new_answer_group.outcome.feedback.content_id = 'new_feedback'
        state.recorded_voiceovers.voiceovers_mapping['new_feedback'] = {}
        state.interaction.answer_groups.insert(3, new_answer_group)
        answer_groups = []
        for answer_group in state.interaction.answer_groups:
            answer_groups.append(answer_group.to_dict())
        change_list = [
            exp_domain.ExplorationChange({
                'cmd': 'edit_state_property',
                'state_name': 'Home',
                'property_name': 'answer_groups',
                'new_value': answer_groups
            }),
            exp_domain.ExplorationChange({
                'cmd':
                'edit_state_property',
                'state_name':
                'Home',
                'property_name':
                'recorded_voiceovers',
                'new_value':
                state.recorded_voiceovers.to_dict()
            })
        ]
        with self.swap(feconf, 'ENABLE_ML_CLASSIFIERS', True):
            exp_services.update_exploration(feconf.SYSTEM_COMMITTER_ID,
                                            self.exp_id, change_list, '')

        # There should be two jobs and two mappings in the data store now.
        all_jobs = classifier_models.ClassifierTrainingJobModel.get_all()
        self.assertEqual(all_jobs.count(), 2)
        all_mappings = (
            classifier_models.TrainingJobExplorationMappingModel.get_all())
        self.assertEqual(all_mappings.count(), 2)

        # Make a change to the exploration without changing the answer groups
        # to trigger mapping update.
        change_list = [
            exp_domain.ExplorationChange({
                'cmd': 'edit_exploration_property',
                'property_name': 'title',
                'new_value': 'New title'
            })
        ]
        with self.swap(feconf, 'ENABLE_ML_CLASSIFIERS', True):
            exp_services.update_exploration(feconf.SYSTEM_COMMITTER_ID,
                                            self.exp_id, change_list, '')

        # There should be two jobs and three mappings in the data store now.
        all_jobs = classifier_models.ClassifierTrainingJobModel.get_all()
        self.assertEqual(all_jobs.count(), 2)
        all_mappings = (
            classifier_models.TrainingJobExplorationMappingModel.get_all())
        self.assertEqual(all_mappings.count(), 3)

        # Check that renaming a state does not create an extra job.
        change_list = [
            exp_domain.ExplorationChange({
                'cmd': 'rename_state',
                'old_state_name': 'Home',
                'new_state_name': 'Home2'
            }),
            exp_domain.ExplorationChange({
                'cmd': 'rename_state',
                'old_state_name': 'Home2',
                'new_state_name': 'Home3'
            })
        ]
        with self.swap(feconf, 'ENABLE_ML_CLASSIFIERS', True):
            exp_services.update_exploration(feconf.SYSTEM_COMMITTER_ID,
                                            self.exp_id, change_list, '')

        # There should still be only two jobs and four mappings in the data
        # store now.
        all_jobs = classifier_models.ClassifierTrainingJobModel.get_all()
        self.assertEqual(all_jobs.count(), 2)
        all_mappings = (
            classifier_models.TrainingJobExplorationMappingModel.get_all())
        self.assertEqual(all_mappings.count(), 4)
Example #32
0
    def test_exploration_rights_handler(self):
        """Test exploration rights handler."""

        # Create several users
        self.signup(
            self.COLLABORATOR_EMAIL, username=self.COLLABORATOR_USERNAME)
        self.signup(
            self.COLLABORATOR2_EMAIL, username=self.COLLABORATOR2_USERNAME)
        self.signup(
            self.COLLABORATOR3_EMAIL, username=self.COLLABORATOR3_USERNAME)

        # Owner creates exploration
        self.login(self.OWNER_EMAIL)
        exp_id = 'eid'
        self.save_new_valid_exploration(
            exp_id, self.owner_id, title='Title for rights handler test!',
            category='My category')

        exploration = exp_services.get_exploration_by_id(exp_id)
        exploration.add_states(['State A', 'State 2', 'State 3'])
        exploration.states['State A'].update_interaction_id('TextInput')
        exploration.states['State 2'].update_interaction_id('TextInput')
        exploration.states['State 3'].update_interaction_id('TextInput')

        response = self.testapp.get(
            '%s/%s' % (feconf.EDITOR_URL_PREFIX, exp_id))
        csrf_token = self.get_csrf_token_from_response(response)

        # Owner adds rights for other users
        rights_url = '%s/%s' % (feconf.EXPLORATION_RIGHTS_PREFIX, exp_id)
        self.put_json(
            rights_url, {
                'version': exploration.version,
                'new_member_username': self.VIEWER_USERNAME,
                'new_member_role': rights_manager.ROLE_VIEWER
            }, csrf_token)
        self.put_json(
            rights_url, {
                'version': exploration.version,
                'new_member_username': self.COLLABORATOR_USERNAME,
                'new_member_role': rights_manager.ROLE_EDITOR
            }, csrf_token)
        self.put_json(
            rights_url, {
                'version': exploration.version,
                'new_member_username': self.COLLABORATOR2_USERNAME,
                'new_member_role': rights_manager.ROLE_EDITOR
            }, csrf_token)

        self.logout()

        # Check that viewer can access editor page but cannot edit.
        self.login(self.VIEWER_EMAIL)
        response = self.testapp.get('/create/%s' % exp_id, expect_errors=True)
        self.assertEqual(response.status_int, 200)
        self.assert_cannot_edit(response.body)
        self.logout()

        # Check that collaborator can access editor page and can edit.
        self.login(self.COLLABORATOR_EMAIL)
        response = self.testapp.get('/create/%s' % exp_id)
        self.assertEqual(response.status_int, 200)
        self.assert_can_edit(response.body)
        csrf_token = self.get_csrf_token_from_response(response)

        # Check that collaborator can add a new state called 'State 4'
        add_url = '%s/%s' % (feconf.EXPLORATION_DATA_PREFIX, exp_id)
        response_dict = self.put_json(
            add_url,
            {
                'version': exploration.version,
                'commit_message': 'Added State 4',
                'change_list': [{
                    'cmd': 'add_state',
                    'state_name': 'State 4'
                }, {
                    'cmd': 'edit_state_property',
                    'state_name': 'State 4',
                    'property_name': 'widget_id',
                    'new_value': 'TextInput',
                }]
            },
            csrf_token=csrf_token,
            expected_status_int=200
        )
        self.assertIn('State 4', response_dict['states'])

        # Check that collaborator cannot add new members
        exploration = exp_services.get_exploration_by_id(exp_id)
        rights_url = '%s/%s' % (feconf.EXPLORATION_RIGHTS_PREFIX, exp_id)
        response_dict = self.put_json(
            rights_url, {
                'version': exploration.version,
                'new_member_username': self.COLLABORATOR3_USERNAME,
                'new_member_role': rights_manager.ROLE_EDITOR,
            }, csrf_token, expect_errors=True, expected_status_int=401)
        self.assertEqual(response_dict['code'], 401)

        self.logout()

        # Check that collaborator2 can access editor page and can edit.
        self.login(self.COLLABORATOR2_EMAIL)
        response = self.testapp.get('/create/%s' % exp_id)
        self.assertEqual(response.status_int, 200)
        self.assert_can_edit(response.body)
        csrf_token = self.get_csrf_token_from_response(response)

        # Check that collaborator2 can add a new state called 'State 5'
        add_url = '%s/%s' % (feconf.EXPLORATION_DATA_PREFIX, exp_id)
        response_dict = self.put_json(
            add_url,
            {
                'version': exploration.version,
                'commit_message': 'Added State 5',
                'change_list': [{
                    'cmd': 'add_state',
                    'state_name': 'State 5'
                }, {
                    'cmd': 'edit_state_property',
                    'state_name': 'State 5',
                    'property_name': 'widget_id',
                    'new_value': 'TextInput',
                }]
            },
            csrf_token=csrf_token,
            expected_status_int=200
        )
        self.assertIn('State 5', response_dict['states'])

        # Check that collaborator2 cannot add new members.
        exploration = exp_services.get_exploration_by_id(exp_id)
        rights_url = '%s/%s' % (feconf.EXPLORATION_RIGHTS_PREFIX, exp_id)
        response_dict = self.put_json(
            rights_url, {
                'version': exploration.version,
                'new_member_username': self.COLLABORATOR3_USERNAME,
                'new_member_role': rights_manager.ROLE_EDITOR,
                }, csrf_token, expect_errors=True, expected_status_int=401)
        self.assertEqual(response_dict['code'], 401)

        self.logout()
Example #33
0
    def test_answers_across_multiple_exploration_versions(self):
        with self.swap(jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
                       self.ALL_CC_MANAGERS_FOR_TESTS):

            # Setup example exploration.
            exp_id = 'eid'
            exp = self.save_new_valid_exploration(exp_id, '*****@*****.**')
            first_state_name = exp.init_state_name
            second_state_name = 'State 2'
            exp_services.update_exploration('*****@*****.**', exp_id, [
                exp_domain.ExplorationChange(
                    {
                        'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
                        'state_name': first_state_name,
                        'property_name':
                        exp_domain.STATE_PROPERTY_INTERACTION_ID,
                        'new_value': 'MultipleChoiceInput',
                    }),
                exp_domain.ExplorationChange({
                    'cmd': exp_domain.CMD_ADD_STATE,
                    'state_name': second_state_name,
                }),
                exp_domain.ExplorationChange(
                    {
                        'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
                        'state_name': second_state_name,
                        'property_name':
                        exp_domain.STATE_PROPERTY_INTERACTION_ID,
                        'new_value': 'MultipleChoiceInput',
                    })
            ], 'Add new state')
            exp = exp_services.get_exploration_by_id(exp_id)
            exp_version = exp.version

            time_spent = 5.0
            params = {}

            # Add an answer.
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, exp_version, first_state_name, 'MultipleChoiceInput',
                0, 0, exp_domain.EXPLICIT_CLASSIFICATION, 'session1',
                time_spent, params, 'answer1')

            # Run the answers aggregation job.
            MockInteractionAnswerSummariesAggregator.start_computation()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
            self.process_and_flush_pending_tasks()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 0)

            calc_id = 'AnswerFrequencies'

            # Check the output of the job.
            calc_output_first_model = self._get_calc_output_model(
                exp_id, first_state_name, calc_id, exploration_version='2')
            calc_output_all_model = self._get_calc_output_model(
                exp_id, first_state_name, calc_id)

            self.assertEqual('AnswerFrequencies',
                             calc_output_first_model.calculation_id)
            self.assertEqual('AnswerFrequencies',
                             calc_output_all_model.calculation_id)

            calculation_output_first = (
                calc_output_first_model.calculation_output)
            calculation_output_all = calc_output_all_model.calculation_output

            expected_calculation_output_first_answer = [{
                'answer': 'answer1',
                'frequency': 1
            }]

            self.assertEqual(calculation_output_first,
                             expected_calculation_output_first_answer)
            self.assertEqual(calculation_output_all,
                             expected_calculation_output_first_answer)

            # Try modifying the exploration and adding another answer.
            exp_services.update_exploration('*****@*****.**', exp_id, [
                exp_domain.ExplorationChange({
                    'cmd': exp_domain.CMD_ADD_STATE,
                    'state_name': 'third state',
                })
            ], 'Adding yet another state')
            exp = exp_services.get_exploration_by_id(exp_id)
            self.assertNotEqual(exp.version, exp_version)

            # Submit another answer.
            exp_version = exp.version
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, exp_version, first_state_name, 'MultipleChoiceInput',
                0, 0, exp_domain.EXPLICIT_CLASSIFICATION, 'session2',
                time_spent, params, 'answer1')

            # Run the aggregator again.
            MockInteractionAnswerSummariesAggregator.stop_computation('a')
            MockInteractionAnswerSummariesAggregator.start_computation()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
            self.process_and_flush_pending_tasks()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 0)

            # Extract the output from the job.
            calc_output_first_model = self._get_calc_output_model(
                exp_id, first_state_name, calc_id, exploration_version='2')
            calc_output_second_model = self._get_calc_output_model(
                exp_id, first_state_name, calc_id, exploration_version='3')
            calc_output_all_model = self._get_calc_output_model(
                exp_id, first_state_name, calc_id)

            self.assertEqual('AnswerFrequencies',
                             calc_output_first_model.calculation_id)
            self.assertEqual('AnswerFrequencies',
                             calc_output_second_model.calculation_id)
            self.assertEqual('AnswerFrequencies',
                             calc_output_all_model.calculation_id)

            calculation_output_first = (
                calc_output_first_model.calculation_output)
            calculation_output_second = (
                calc_output_second_model.calculation_output)
            calculation_output_all = (calc_output_all_model.calculation_output)

            # The output for version 2 of the exploration should be the same,
            # but the total combined output should include both answers. Also,
            # the output for version 3 should only include the second answer.
            expected_calculation_output_second_answer = [{
                'answer': 'answer1',
                'frequency': 1
            }]
            expected_calculation_output_all_answers = [{
                'answer': 'answer1',
                'frequency': 2
            }]

            self.assertEqual(calculation_output_first,
                             expected_calculation_output_first_answer)
            self.assertEqual(calculation_output_second,
                             expected_calculation_output_second_answer)
            self.assertEqual(calculation_output_all,
                             expected_calculation_output_all_answers)
Example #34
0
    def test_record_answer(self):
        self.save_new_default_exploration('eid', '*****@*****.**')
        exp = exp_services.get_exploration_by_id('eid')

        first_state_name = exp.init_state_name
        second_state_name = 'State 2'
        third_state_name = 'State 3'
        exp_services.update_exploration(
            '*****@*****.**', 'eid', [{
                'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
                'state_name': first_state_name,
                'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
                'new_value': 'TextInput',
            }, {
                'cmd': exp_domain.CMD_ADD_STATE,
                'state_name': second_state_name,
            }, {
                'cmd': exp_domain.CMD_ADD_STATE,
                'state_name': third_state_name,
            }, {
                'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
                'state_name': second_state_name,
                'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
                'new_value': 'TextInput',
            }, {
                'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
                'state_name': third_state_name,
                'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
                'new_value': 'Continue',
            }], 'Add new state')
        exp = exp_services.get_exploration_by_id('eid')

        exp_version = exp.version

        for state_name in [first_state_name, second_state_name]:
            state_answers = stats_services.get_state_answers(
                'eid', exp_version, state_name)
            self.assertEqual(state_answers, None)

        # answer is a string
        event_services.AnswerSubmissionEventHandler.record(
            'eid', exp_version, first_state_name, 'TextInput', 0, 0,
            exp_domain.EXPLICIT_CLASSIFICATION, 'sid1', self.TIME_SPENT,
            self.PARAMS, 'answer1')
        event_services.AnswerSubmissionEventHandler.record(
            'eid', exp_version, first_state_name, 'TextInput', 0, 1,
            exp_domain.EXPLICIT_CLASSIFICATION, 'sid2', self.TIME_SPENT,
            self.PARAMS, 'answer1')
        # answer is a dict
        event_services.AnswerSubmissionEventHandler.record(
            'eid', exp_version, first_state_name, 'TextInput', 1, 0,
            exp_domain.EXPLICIT_CLASSIFICATION, 'sid1', self.TIME_SPENT,
            self.PARAMS, {
                'x': 1.0,
                'y': 5.0
            })
        # answer is a number
        event_services.AnswerSubmissionEventHandler.record(
            'eid', exp_version, first_state_name, 'TextInput', 2, 0,
            exp_domain.EXPLICIT_CLASSIFICATION, 'sid1', self.TIME_SPENT,
            self.PARAMS, 10)
        # answer is a list of dicts
        event_services.AnswerSubmissionEventHandler.record(
            'eid', exp_version, first_state_name, 'TextInput', 3, 0,
            exp_domain.EXPLICIT_CLASSIFICATION, 'sid1', self.TIME_SPENT,
            self.PARAMS, [{
                'a': 'some',
                'b': 'text'
            }, {
                'a': 1.0,
                'c': 2.0
            }])
        # answer is a list
        event_services.AnswerSubmissionEventHandler.record(
            'eid', exp_version, second_state_name, 'TextInput', 2, 0,
            exp_domain.EXPLICIT_CLASSIFICATION, 'sid3', self.TIME_SPENT,
            self.PARAMS, [2, 4, 8])
        # answer is a unicode string
        event_services.AnswerSubmissionEventHandler.record(
            'eid', exp_version, second_state_name, 'TextInput', 1, 1,
            exp_domain.EXPLICIT_CLASSIFICATION, 'sid4', self.TIME_SPENT,
            self.PARAMS, self.UNICODE_TEST_STRING)
        # answer is None (such as for Continue)
        event_services.AnswerSubmissionEventHandler.record(
            'eid', exp_version, third_state_name, 'Continue', 1, 1,
            exp_domain.EXPLICIT_CLASSIFICATION, 'sid5', self.TIME_SPENT,
            self.PARAMS, None)

        expected_submitted_answer_list1 = [{
            'answer': 'answer1',
            'time_spent_in_sec': 5.0,
            'answer_group_index': 0,
            'rule_spec_index': 0,
            'classification_categorization': 'explicit',
            'session_id': 'sid1',
            'interaction_id': 'TextInput',
            'params': {}
        }, {
            'answer': 'answer1',
            'time_spent_in_sec': 5.0,
            'answer_group_index': 0,
            'rule_spec_index': 1,
            'classification_categorization': 'explicit',
            'session_id': 'sid2',
            'interaction_id': 'TextInput',
            'params': {}
        }, {
            'answer': {
                'x': 1.0,
                'y': 5.0
            },
            'time_spent_in_sec': 5.0,
            'answer_group_index': 1,
            'rule_spec_index': 0,
            'classification_categorization': 'explicit',
            'session_id': 'sid1',
            'interaction_id': 'TextInput',
            'params': {}
        }, {
            'answer': 10,
            'time_spent_in_sec': 5.0,
            'answer_group_index': 2,
            'rule_spec_index': 0,
            'classification_categorization': 'explicit',
            'session_id': 'sid1',
            'interaction_id': 'TextInput',
            'params': {}
        }, {
            'answer': [{
                'a': 'some',
                'b': 'text'
            }, {
                'a': 1.0,
                'c': 2.0
            }],
            'time_spent_in_sec':
            5.0,
            'answer_group_index':
            3,
            'rule_spec_index':
            0,
            'classification_categorization':
            'explicit',
            'session_id':
            'sid1',
            'interaction_id':
            'TextInput',
            'params': {}
        }]
        expected_submitted_answer_list2 = [{
            'answer': [2, 4, 8],
            'time_spent_in_sec': 5.0,
            'answer_group_index': 2,
            'rule_spec_index': 0,
            'classification_categorization': 'explicit',
            'session_id': 'sid3',
            'interaction_id': 'TextInput',
            'params': {}
        }, {
            'answer': self.UNICODE_TEST_STRING,
            'time_spent_in_sec': 5.0,
            'answer_group_index': 1,
            'rule_spec_index': 1,
            'classification_categorization': 'explicit',
            'session_id': 'sid4',
            'interaction_id': 'TextInput',
            'params': {}
        }]
        expected_submitted_answer_list3 = [{
            'answer': None,
            'time_spent_in_sec': 5.0,
            'answer_group_index': 1,
            'rule_spec_index': 1,
            'classification_categorization': 'explicit',
            'session_id': 'sid5',
            'interaction_id': 'Continue',
            'params': {}
        }]

        state_answers = stats_services.get_state_answers(
            'eid', exp_version, first_state_name)
        self.assertEqual(state_answers.get_submitted_answer_dict_list(),
                         expected_submitted_answer_list1)

        state_answers = stats_services.get_state_answers(
            'eid', exp_version, second_state_name)
        self.assertEqual(state_answers.get_submitted_answer_dict_list(),
                         expected_submitted_answer_list2)

        state_answers = stats_services.get_state_answers(
            'eid', exp_version, third_state_name)
        self.assertEqual(state_answers.get_submitted_answer_dict_list(),
                         expected_submitted_answer_list3)
    def reduce(key, stringified_values):
        """Calculates and saves each answer submitted for the exploration.

        Args:
            key: str. The unique key of the form:
                <exploration_id>:<exploration_version>:<state_name>
            stringified_values: list(str). A list of stringified_values of the
                submitted answers.

        Yields:
            str. One of the following strings:
                - Expected a single version when aggregating answers for:
                    Occurs when the versions list contains multiple versions
                    instead of a specific version.
                - Expected exactly one interaction ID for exploration:
                    Occurs when there is not exactly one interaction ID
                    for each exploration and version.
                - Expected at least one item ID for exploration:
                    Occurs when there is not at least one Item ID for
                    each exploration and version.
                - Ignoring answers submitted to version:
                    Occurs when version mismatches and the new
                    version has a different interaction ID.
        """
        exploration_id, exploration_version, state_name = key.split(':')

        value_dicts = [
            ast.literal_eval(stringified_value)
            for stringified_value in stringified_values
        ]

        # Extract versions in descending order since answers are prioritized
        # based on recency.
        versions = list(
            set([
                int(value_dict['exploration_version'])
                for value_dict in value_dicts
            ]))
        versions.sort(reverse=True)

        # For answers mapped to specific versions, the versions list should only
        # contain the version they correspond to. Otherwise, if they map to
        # VERSION_ALL, then multiple versions may be included.
        if exploration_version != VERSION_ALL and (
                len(versions) != 1 or versions[0] != int(exploration_version)):
            yield ('ERROR: Expected a single version when aggregating answers '
                   'for exploration %s (v=%s), but found: %s' %
                   (exploration_id, exploration_version, versions))

        # Map interaction IDs and StateAnswersModel IDs to exploration versions.
        versioned_interaction_ids = {version: set() for version in versions}
        versioned_item_ids = {version: set() for version in versions}
        for value_dict in value_dicts:
            version = value_dict['exploration_version']
            versioned_interaction_ids[version].add(
                value_dict['interaction_id'])
            versioned_item_ids[version].add(
                value_dict['state_answers_model_id'])

        # Convert the interaction IDs to a list so they may be easily indexed.
        versioned_interaction_ids = {
            v: list(interaction_ids)
            for v, interaction_ids in versioned_interaction_ids.iteritems()
        }

        # Verify all interaction ID and item ID containers are well-structured.
        for version, interaction_ids in versioned_interaction_ids.iteritems():
            if len(interaction_ids) != 1:
                yield ('ERROR: Expected exactly one interaction ID for '
                       'exploration %s and version %s, found: %s' %
                       (exploration_id, version, len(interaction_ids)))
        for version, item_ids in versioned_item_ids.iteritems():
            if not item_ids:
                yield (
                    'ERROR: Expected at least one item ID for exploration %s '
                    'and version %s, found: %s' %
                    (exploration_id, version, len(item_ids)))

        # Filter out any item IDs which happen at and before a version with a
        # changed interaction ID. Start with the most recent version since it
        # will refer to the most relevant answers.
        latest_version = versions[0]
        latest_interaction_id = versioned_interaction_ids[latest_version][0]

        # Ensure the exploration corresponding to these answers exists.
        exp = exp_services.get_exploration_by_id(exploration_id, strict=False)
        if exp is None:
            return

        if exploration_version == VERSION_ALL:
            # If aggregating across all versions, verify that the latest answer
            # version is equal to the latest version of the exploration,
            # otherwise ignore all answers since none of them can be applied to
            # the latest version.
            if state_name in exp.states:
                loaded_interaction_id = exp.states[state_name].interaction.id
                # Only check if the version mismatches if the new version has a
                # different interaction ID.
                if latest_interaction_id != loaded_interaction_id and (
                        latest_version != exp.version):
                    yield (
                        'INFO: Ignoring answers submitted to version %s and '
                        'below since the latest exploration version is %s' %
                        (latest_version, exp.version))
                    versions = []

        # In the VERSION_ALL case, we only take into account the most recent
        # consecutive block of versions with the same interaction ID as the
        # current version, and ignore all versions prior to this block. This
        # logic isn't needed for individually-mapped versions and, in that case,
        # we skip all this code in favor of performance.
        if len(versions) > 1:
            invalid_version_indexes = [
                index for index, version in enumerate(versions)
                if versioned_interaction_ids[version][0] != (
                    latest_interaction_id)
            ]
            earliest_acceptable_version_index = (invalid_version_indexes[0] -
                                                 1 if invalid_version_indexes
                                                 else len(versions) - 1)
            earliest_acceptable_version = versions[
                earliest_acceptable_version_index]
            # Trim away anything related to the versions which correspond to
            # different or since changed interaction IDs.
            ignored_versions = [
                version for version in versions
                if version < earliest_acceptable_version
            ]
            for ignored_version in ignored_versions:
                del versioned_interaction_ids[ignored_version]
                del versioned_item_ids[ignored_version]
            versions = versions[:earliest_acceptable_version_index + 1]

        # Retrieve all StateAnswerModel entities associated with the remaining
        # item IDs which correspond to a single interaction ID shared among all
        # the versions between start_version and latest_version, inclusive.
        item_ids = set()
        for version in versions:
            item_ids.update(versioned_item_ids[version])

        # Collapse the list of answers into a single answer dict. This
        # aggregates across multiple answers if the key ends with VERSION_ALL.
        # TODO(bhenning): Find a way to iterate across all answers more
        # efficiently and by not loading all answers for a particular
        # exploration into memory.
        submitted_answer_list = []
        combined_state_answers = {
            'exploration_id': exploration_id,
            'exploration_version': exploration_version,
            'state_name': state_name,
            'interaction_id': latest_interaction_id,
            'submitted_answer_list': submitted_answer_list
        }

        # NOTE: The answers stored in submitted_answers_list must be sorted
        # according to the chronological order of their submission otherwise
        # TopNUnresolvedAnswersByFrequency calculation will output invalid
        # results.
        state_answers_models = stats_models.StateAnswersModel.get_multi(
            item_ids)
        for state_answers_model in state_answers_models:
            if state_answers_model:
                submitted_answer_list += (
                    state_answers_model.submitted_answer_list)

        # Get all desired calculations for the current interaction id.
        calc_ids = interaction_registry.Registry.get_interaction_by_id(
            latest_interaction_id).answer_calculation_ids
        calculations = [
            calculation_registry.Registry.get_calculation_by_id(calc_id)
            for calc_id in calc_ids
        ]

        # Perform each calculation, and store the output.
        for calc in calculations:
            calc_output = calc.calculate_from_state_answers_dict(
                combined_state_answers)
            calc_output.save()
Example #36
0
    def test_actions_related_to_suggestions(self):
        self.login(self.EDITOR_EMAIL)
        csrf_token = self.get_csrf_token_from_response(
            self.testapp.get('/create/%s' % self.EXP_ID))
        response_dict = self.get_json(
            '%s/%s?list_type=%s&has_suggestion=%s' %
            (feconf.SUGGESTION_LIST_URL_PREFIX, self.EXP_ID, 'all', 'true'))

        threads = response_dict['threads']
        accepted_suggestion_thread_id = threads[5]['thread_id']
        rejected_suggestion_thread_id = threads[4]['thread_id']
        unsuccessful_accept_thread_id = threads[3]['thread_id']

        self.assertEqual(threads[5]['subject'], 'Suggestion for state A.')
        self.assertEqual(threads[4]['subject'], 'A new value.')
        self.assertEqual(threads[3]['subject'], 'Empty suggestion')

        # Accept a suggestion.
        self._accept_suggestion(accepted_suggestion_thread_id, False,
                                csrf_token)
        updated_exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        self.assertEqual(updated_exploration.states['State A'].content.html,
                         u'new accepted suggestion for state A')

        # Reject a suggestion.
        self._reject_suggestion(rejected_suggestion_thread_id, csrf_token)

        # Get a list of closed threads with suggestion.
        response_dict = self.get_json(
            '%s/%s?list_type=%s&has_suggestion=%s' %
            (feconf.SUGGESTION_LIST_URL_PREFIX, self.EXP_ID, 'closed', 'true'))
        self.assertEqual(len(response_dict['threads']), 2)

        response_dict = self.put_json(
            '%s/%s/%s' % (feconf.SUGGESTION_ACTION_URL_PREFIX, self.EXP_ID,
                          rejected_suggestion_thread_id),
            {'action': u'invalid'},
            csrf_token=csrf_token,
            expect_errors=True,
            expected_status_int=400)
        self.assertIn('Invalid action.', response_dict['error'])

        # Editor tries to accept rejected suggestion.
        exception_msg = 'Suggestion has already been accepted/rejected.'
        response_dict = self._accept_suggestion(rejected_suggestion_thread_id,
                                                False,
                                                csrf_token,
                                                expect_errors=True,
                                                expected_status_int=500)
        self.assertIn(exception_msg, response_dict['error'])

        # Editor tries to reject accepted suggestion.
        response_dict = self._reject_suggestion(rejected_suggestion_thread_id,
                                                csrf_token,
                                                expect_errors=True,
                                                expected_status_int=500)
        self.assertIn(exception_msg, response_dict['error'])
        self.logout()

        # Different editor tries to accept rejected suggestion.
        self.login(self.OWNER_EMAIL)
        csrf_token = self.get_csrf_token_from_response(
            self.testapp.get('/create/%s' % self.EXP_ID))
        response_dict = self._accept_suggestion(rejected_suggestion_thread_id,
                                                False,
                                                csrf_token,
                                                expect_errors=True,
                                                expected_status_int=500)
        self.assertIn(exception_msg, response_dict['error'])

        # Different editor tries to reject accepted suggestion.
        response_dict = self._reject_suggestion(rejected_suggestion_thread_id,
                                                csrf_token,
                                                expect_errors=True,
                                                expected_status_int=500)
        self.assertIn(exception_msg, response_dict['error'])
        self.logout()

        # User(non editor) tries to accept a suggestion.
        self.login(self.VIEWER_EMAIL)
        response = self.testapp.get('/create/%s' % self.EXP_ID)
        csrf_token = self.get_csrf_token_from_response(response)

        response_dict = self._accept_suggestion(unsuccessful_accept_thread_id,
                                                False,
                                                csrf_token,
                                                expect_errors=True,
                                                expected_status_int=401)
        self.assertIn('You do not have credentials', response_dict['error'])
        self.logout()

        # Get a list of all closed threads with suggestion.
        self.login(self.EDITOR_EMAIL)
        response_dict = self.get_json(
            '%s/%s?list_type=%s&has_suggestion=%s' %
            (feconf.SUGGESTION_LIST_URL_PREFIX, self.EXP_ID, 'closed', 'true'))
        threads = response_dict['threads']
        self.assertEqual(len(threads), 2)

        # Get a list of all open threads with suggestion.
        response_dict = self.get_json(
            '%s/%s?list_type=%s&has_suggestion=%s' %
            (feconf.SUGGESTION_LIST_URL_PREFIX, self.EXP_ID, 'open', 'true'))
        threads = response_dict['threads']
        self.assertEqual(len(threads), 4)
Example #37
0
    def test_accept_suggestion(self):
        with self.swap(constants, 'ENABLE_GENERALIZED_FEEDBACK_THREADS', True):
            exploration = exp_services.get_exploration_by_id(self.EXP_ID)

            # Test editor can accept successfully.
            self.login(self.EDITOR_EMAIL)
            response = self.testapp.get('/explore/%s' % self.EXP_ID)
            csrf_token = self.get_csrf_token_from_response(response)

            suggestion_to_accept = self.get_json(
                '%s?author_id=%s' % (feconf.GENERAL_SUGGESTION_LIST_URL_PREFIX,
                                     self.author_id))['suggestions'][0]

            response = self.testapp.get('/explore/%s' % self.EXP_ID)
            csrf_token = self.get_csrf_token_from_response(response)
            self.put_json('%s/exploration/%s/%s' %
                          (feconf.GENERAL_SUGGESTION_ACTION_URL_PREFIX,
                           suggestion_to_accept['target_id'],
                           suggestion_to_accept['suggestion_id']), {
                               'action': u'accept',
                               'commit_message': u'commit message',
                               'review_message': u'Accepted'
                           },
                          csrf_token=csrf_token)
            suggestion_post_accept = self.get_json(
                '%s?author_id=%s' % (feconf.GENERAL_SUGGESTION_LIST_URL_PREFIX,
                                     self.author_id))['suggestions'][0]
            self.assertEqual(suggestion_post_accept['status'],
                             suggestion_models.STATUS_ACCEPTED)
            exploration = exp_services.get_exploration_by_id(self.EXP_ID)
            self.assertEqual(
                exploration.states[suggestion_to_accept['change']
                                   ['state_name']].content.html,
                suggestion_to_accept['change']['new_value']['html'])
            self.logout()

            # Testing user without permissions cannot accept.
            self.login(self.NORMAL_USER_EMAIL)
            suggestion_to_accept = self.get_json(
                '%s?author_id=%s' % (feconf.GENERAL_SUGGESTION_LIST_URL_PREFIX,
                                     self.author_id_2))['suggestions'][0]

            response = self.testapp.get('/explore/%s' % self.EXP_ID)
            csrf_token = self.get_csrf_token_from_response(response)
            self.put_json('%s/exploration/%s/%s' %
                          (feconf.GENERAL_SUGGESTION_ACTION_URL_PREFIX,
                           suggestion_to_accept['target_id'],
                           suggestion_to_accept['suggestion_id']), {
                               'action': u'accept',
                               'commit_message': u'commit message',
                               'review_message': u'Accepted'
                           },
                          csrf_token=csrf_token,
                          expect_errors=True,
                          expected_status_int=401)
            self.logout()

            # Testing that author cannot accept own suggestion.
            self.login(self.AUTHOR_EMAIL_2)
            suggestion_to_accept = self.get_json(
                '%s?author_id=%s' % (feconf.GENERAL_SUGGESTION_LIST_URL_PREFIX,
                                     self.author_id_2))['suggestions'][0]

            response = self.testapp.get('/explore/%s' % self.EXP_ID)
            csrf_token = self.get_csrf_token_from_response(response)
            self.put_json('%s/exploration/%s/%s' %
                          (feconf.GENERAL_SUGGESTION_ACTION_URL_PREFIX,
                           suggestion_to_accept['target_id'],
                           suggestion_to_accept['suggestion_id']), {
                               'action': u'accept',
                               'commit_message': u'commit message',
                               'review_message': u'Accepted'
                           },
                          csrf_token=csrf_token,
                          expect_errors=True,
                          expected_status_int=401)

            # Testing users with scores above threshold can accept.
            self.login(self.AUTHOR_EMAIL)
            suggestion_services.increment_score_for_user(
                self.author_id, 'content.Algebra', 15)

            response = self.testapp.get('/explore/%s' % self.EXP_ID)
            csrf_token = self.get_csrf_token_from_response(response)
            self.put_json('%s/exploration/%s/%s' %
                          (feconf.GENERAL_SUGGESTION_ACTION_URL_PREFIX,
                           suggestion_to_accept['target_id'],
                           suggestion_to_accept['suggestion_id']), {
                               'action': u'accept',
                               'commit_message': u'commit message',
                               'review_message': u'Accepted'
                           },
                          csrf_token=csrf_token)

            suggestion_post_accept = self.get_json(
                '%s?author_id=%s' % (feconf.GENERAL_SUGGESTION_LIST_URL_PREFIX,
                                     self.author_id_2))['suggestions'][0]
            self.assertEqual(suggestion_post_accept['status'],
                             suggestion_models.STATUS_ACCEPTED)
            self.logout()

            # Testing admins can accept suggestions.
            self.login(self.ADMIN_EMAIL)
            response = self.testapp.get('/explore/%s' % self.EXP_ID)
            csrf_token = self.get_csrf_token_from_response(response)
            suggestion_to_accept = self.get_json(
                '%s?author_id=%s' % (feconf.GENERAL_SUGGESTION_LIST_URL_PREFIX,
                                     self.author_id_2))['suggestions'][1]
            self.put_json('%s/exploration/%s/%s' %
                          (feconf.GENERAL_SUGGESTION_ACTION_URL_PREFIX,
                           suggestion_to_accept['target_id'],
                           suggestion_to_accept['suggestion_id']), {
                               'action': u'accept',
                               'commit_message': u'commit message',
                               'review_message': u'Accepted'
                           },
                          csrf_token=csrf_token)
            suggestion_post_accept = self.get_json(
                '%s?author_id=%s' % (feconf.GENERAL_SUGGESTION_LIST_URL_PREFIX,
                                     self.author_id_2))['suggestions'][1]
            self.assertEqual(suggestion_post_accept['status'],
                             suggestion_models.STATUS_ACCEPTED)
            self.logout()
Example #38
0
    def test_one_answer(self):
        with self.swap(jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
                       self.ALL_CC_MANAGERS_FOR_TESTS):

            # Setup example exploration.
            exp_id = 'eid'
            exp = self.save_new_valid_exploration(exp_id, '*****@*****.**')
            first_state_name = exp.init_state_name
            second_state_name = 'State 2'
            exp_services.update_exploration('*****@*****.**', exp_id, [
                exp_domain.ExplorationChange(
                    {
                        'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
                        'state_name': first_state_name,
                        'property_name':
                        exp_domain.STATE_PROPERTY_INTERACTION_ID,
                        'new_value': 'MultipleChoiceInput',
                    }),
                exp_domain.ExplorationChange({
                    'cmd': exp_domain.CMD_ADD_STATE,
                    'state_name': second_state_name,
                }),
                exp_domain.ExplorationChange(
                    {
                        'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
                        'state_name': second_state_name,
                        'property_name':
                        exp_domain.STATE_PROPERTY_INTERACTION_ID,
                        'new_value': 'MultipleChoiceInput',
                    })
            ], 'Add new state')
            exp = exp_services.get_exploration_by_id(exp_id)
            exp_version = exp.version

            time_spent = 5.0
            params = {}

            self._record_start(exp_id, exp_version, first_state_name,
                               'session1')
            self._record_start(exp_id, exp_version, first_state_name,
                               'session2')
            self.process_and_flush_pending_tasks()

            # Add some answers.
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, exp_version, first_state_name, 'MultipleChoiceInput',
                0, 0, exp_domain.EXPLICIT_CLASSIFICATION, 'session1',
                time_spent, params, 'answer1')
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, exp_version, first_state_name, 'MultipleChoiceInput',
                0, 0, exp_domain.EXPLICIT_CLASSIFICATION, 'session2',
                time_spent, params, 'answer1')
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, exp_version, first_state_name, 'MultipleChoiceInput',
                0, 0, exp_domain.EXPLICIT_CLASSIFICATION, 'session1',
                time_spent, params, 'answer2')
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, exp_version, second_state_name, 'MultipleChoiceInput',
                0, 0, exp_domain.EXPLICIT_CLASSIFICATION, 'session2',
                time_spent, params, 'answer3')

            # Run job on exploration with answers.
            MockInteractionAnswerSummariesAggregator.start_computation()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
            self.process_and_flush_pending_tasks()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 0)

            calc_id = 'AnswerFrequencies'

            # Get job output of first state and check it.
            calc_output_model = self._get_calc_output_model(
                exp_id,
                first_state_name,
                calc_id,
                exploration_version=exp_version)
            self.assertEqual('AnswerFrequencies',
                             calc_output_model.calculation_id)

            calculation_output = calc_output_model.calculation_output

            expected_calculation_output = [{
                'answer': 'answer1',
                'frequency': 2
            }, {
                'answer': 'answer2',
                'frequency': 1
            }]

            self.assertEqual(calculation_output, expected_calculation_output)

            # Get job output of second state and check it.
            calc_output_model = self._get_calc_output_model(
                exp_id,
                second_state_name,
                calc_id,
                exploration_version=exp_version)

            self.assertEqual('AnswerFrequencies',
                             calc_output_model.calculation_id)

            calculation_output = calc_output_model.calculation_output

            expected_calculation_output = [{
                'answer': 'answer3',
                'frequency': 1
            }]

            self.assertEqual(calculation_output, expected_calculation_output)
Example #39
0
    def reduce(exp_id, values):
        values = map(ast.literal_eval, values)
        sorted_events_dicts = sorted(values, key=lambda x: x['version'])

        # Find the latest version number
        exploration = exp_services.get_exploration_by_id(exp_id)
        latest_exp_version = exploration.version
        versions = range(1, latest_exp_version + 1)

        # Get a copy of the corrupted statistics models to copy uncorrupted
        # v1 fields
        old_stats = stats_services.get_multiple_exploration_stats_by_version(
            exp_id, versions)
        # Get list of snapshot models for each version of the exploration
        snapshots_by_version = (
            exp_models.ExplorationModel.get_snapshots_metadata(
                exp_id, versions))

        exp_stats_dicts = []
        event_dict_idx = 0
        event_dict = sorted_events_dicts[event_dict_idx]
        for version in versions:
            datastore_stats_for_version = old_stats[version - 1]
            if version == 1:
                # Reset the possibly corrupted stats
                datastore_stats_for_version.num_starts_v2 = 0
                datastore_stats_for_version.num_completions_v2 = 0
                datastore_stats_for_version.num_actual_starts_v2 = 0
                for state_stats in (datastore_stats_for_version.
                                    state_stats_mapping.values()):
                    state_stats.total_answers_count_v2 = 0
                    state_stats.useful_feedback_count_v2 = 0
                    state_stats.total_hit_count_v2 = 0
                    state_stats.first_hit_count_v2 = 0
                    state_stats.num_times_solution_viewed_v2 = 0
                    state_stats.num_completions_v2 = 0
                exp_stats_dict = datastore_stats_for_version.to_dict()
            else:
                change_list = snapshots_by_version[version - 1]['commit_cmds']
                # Copy recomputed v2 events from previous version
                prev_stats_dict = copy.deepcopy(exp_stats_dicts[-1])
                prev_stats_dict = (
                    RecomputeStatisticsOneOffJob._apply_state_name_changes(
                        prev_stats_dict, change_list))
                # Copy uncorrupt v1 stats
                prev_stats_dict['num_starts_v1'] = (
                    datastore_stats_for_version.num_starts_v1)
                prev_stats_dict['num_completions_v1'] = (
                    datastore_stats_for_version.num_completions_v1)
                prev_stats_dict['num_actual_starts_v1'] = (
                    datastore_stats_for_version.num_actual_starts_v1)
                state_stats_mapping = prev_stats_dict['state_stats_mapping']
                for state in state_stats_mapping:
                    state_stats_mapping[state]['total_answers_count_v1'] = (
                        datastore_stats_for_version.state_stats_mapping[state].
                        total_answers_count_v1)
                    state_stats_mapping[state]['useful_feedback_count_v1'] = (
                        datastore_stats_for_version.state_stats_mapping[state].
                        useful_feedback_count_v1)
                    state_stats_mapping[state]['total_hit_count_v1'] = (
                        datastore_stats_for_version.state_stats_mapping[state].
                        total_hit_count_v1)
                    state_stats_mapping[state]['first_hit_count_v1'] = (
                        datastore_stats_for_version.state_stats_mapping[state].
                        first_hit_count_v1)
                    state_stats_mapping[state]['num_completions_v1'] = (
                        datastore_stats_for_version.state_stats_mapping[state].
                        num_completions_v1)
                exp_stats_dict = copy.deepcopy(prev_stats_dict)

            # Compute the statistics for events corresponding to this version
            state_hit_session_ids, solution_hit_session_ids = set(), set()
            while event_dict['version'] == version:
                state_stats = (exp_stats_dict['state_stats_mapping'][
                    event_dict['state_name']])
                if event_dict['event_type'] == (
                        feconf.EVENT_TYPE_ACTUAL_START_EXPLORATION):
                    exp_stats_dict['num_actual_starts_v2'] += 1
                elif event_dict['event_type'] == (
                        feconf.EVENT_TYPE_COMPLETE_EXPLORATION):
                    exp_stats_dict['num_completions_v2'] += 1
                elif event_dict['event_type'] == (
                        feconf.EVENT_TYPE_ANSWER_SUBMITTED):
                    state_stats['total_answers_count_v2'] += 1
                    if event_dict['is_feedback_useful']:
                        state_stats['useful_feedback_count_v2'] += 1
                elif event_dict['event_type'] == feconf.EVENT_TYPE_STATE_HIT:
                    state_stats['total_hit_count_v2'] += 1
                    state_hit_key = (event_dict['session_id'] +
                                     event_dict['state_name'])
                    if state_hit_key not in state_hit_session_ids:
                        state_stats['first_hit_count_v2'] += 1
                        state_hit_session_ids.add(state_hit_key)
                elif event_dict[
                        'event_type'] == feconf.EVENT_TYPE_SOLUTION_HIT:
                    solution_hit_key = (event_dict['session_id'] +
                                        event_dict['state_name'])
                    if solution_hit_key not in solution_hit_session_ids:
                        state_stats['num_times_solution_viewed_v2'] += 1
                        solution_hit_session_ids.add(solution_hit_key)
                elif event_dict['event_type'] == (
                        feconf.EVENT_TYPE_STATE_COMPLETED):
                    state_stats['num_completions_v2'] += 1
                event_dict_idx += 1
                if event_dict_idx < len(sorted_events_dicts):
                    event_dict = sorted_events_dicts[event_dict_idx]
                else:
                    break

            exp_stats_dicts.append(copy.deepcopy(exp_stats_dict))
        stats_models.ExplorationStatsModel.save_multi(exp_stats_dicts)
Example #40
0
    def test_answers_across_multiple_exp_versions_different_interactions(self):
        """Same as
        test_ignores_old_answers_if_new_interaction_has_no_new_answers except
        this also adds additional answers after changing the interaction a few
        times to ensure the aggregation job does not include answers across
        interaction changes, even if the interaction reverts back to a past
        interaction type with answers submitted to both versions of the
        exploration.
        """
        with self.swap(jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
                       self.ALL_CC_MANAGERS_FOR_TESTS):

            # Setup example exploration.
            exp_id = 'eid'
            exp = self.save_new_valid_exploration(exp_id, '*****@*****.**')
            init_state_name = exp.init_state_name

            time_spent = 5.0
            params = {}

            # Add a few different answers.
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, 1, init_state_name, 'TextInput', 0, 0,
                exp_domain.EXPLICIT_CLASSIFICATION, 'session1', time_spent,
                params, 'verb')
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, 1, init_state_name, 'TextInput', 0, 0,
                exp_domain.EXPLICIT_CLASSIFICATION, 'session1', time_spent,
                params, '2')
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, 1, init_state_name, 'TextInput', 0, 0,
                exp_domain.EXPLICIT_CLASSIFICATION, 'session1', time_spent,
                params, 'verb')

            # Change the interaction ID.
            exp_services.update_exploration('*****@*****.**', exp_id, [
                exp_domain.ExplorationChange({
                    'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
                    'state_name': init_state_name,
                    'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
                    'new_value': 'NumericInput',
                })
            ], 'Change to NumericInput')

            # Submit an answer to the numeric interaction.
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, 2, init_state_name, 'NumericInput', 0, 0,
                exp_domain.EXPLICIT_CLASSIFICATION, 'session1', time_spent,
                params, 2)

            # Change back the interaction ID.
            exp_services.update_exploration('*****@*****.**', exp_id, [
                exp_domain.ExplorationChange({
                    'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
                    'state_name': init_state_name,
                    'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
                    'new_value': 'TextInput',
                })
            ], 'Change to TextInput')

            # Submit another number-like answer.
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, 3, init_state_name, 'TextInput', 0, 0,
                exp_domain.EXPLICIT_CLASSIFICATION, 'session1', time_spent,
                params, '2')

            # Create a 4th exploration version by changing the state's content.
            exp_services.update_exploration('*****@*****.**', exp_id, [
                exp_domain.ExplorationChange({
                    'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
                    'state_name': init_state_name,
                    'property_name': exp_domain.STATE_PROPERTY_CONTENT,
                    'new_value': {
                        'content_id': 'content',
                        'html': 'New content description'
                    }
                })
            ], 'Change content description')

            # Submit some more answers to the latest exploration version.
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, 4, init_state_name, 'TextInput', 0, 0,
                exp_domain.EXPLICIT_CLASSIFICATION, 'session1', time_spent,
                params, 'noun')
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, 4, init_state_name, 'TextInput', 0, 0,
                exp_domain.EXPLICIT_CLASSIFICATION, 'session1', time_spent,
                params, 'verb')
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, 4, init_state_name, 'TextInput', 0, 0,
                exp_domain.EXPLICIT_CLASSIFICATION, 'session1', time_spent,
                params, 'noun')

            exp = exp_services.get_exploration_by_id(exp_id)
            self.assertEqual(exp.version, 4)

            # Run the answers aggregation job.
            MockInteractionAnswerSummariesAggregator.start_computation()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
            self.process_and_flush_pending_tasks()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 0)

            calc_id = 'Top10AnswerFrequencies'

            # Check the output of the job.
            calc_output_latest_version_model = self._get_calc_output_model(
                exp_id, init_state_name, calc_id, exploration_version='4')
            calc_output_all_model = self._get_calc_output_model(
                exp_id, init_state_name, calc_id)

            self.assertEqual('Top10AnswerFrequencies',
                             calc_output_latest_version_model.calculation_id)
            self.assertEqual('Top10AnswerFrequencies',
                             calc_output_all_model.calculation_id)

            expected_calculation_latest_version_output = [{
                'answer': 'noun',
                'frequency': 2
            }, {
                'answer': 'verb',
                'frequency': 1
            }]

            # Only includes versions 3-4 since version 2 has a different
            # interaction ID. Note that the output is dependent on the order of
            # submission (verb submitted before 2 -> verb ranked higher).
            expected_calculation_all_versions_output = [{
                'answer': 'noun',
                'frequency': 2
            }, {
                'answer': 'verb',
                'frequency': 1
            }, {
                'answer': '2',
                'frequency': 1
            }]

            calculation_latest_version_output = (
                calc_output_latest_version_model.calculation_output)
            calculation_output_all = calc_output_all_model.calculation_output

            self.assertEqual(calculation_latest_version_output,
                             expected_calculation_latest_version_output)
            self.assertEqual(calculation_output_all,
                             expected_calculation_all_versions_output)
Example #41
0
    def test_uses_old_answers_if_updated_exploration_has_same_interaction(
            self):
        """Similar to
        test_ignores_old_answers_if_new_interaction_has_no_new_answers except
        this is demonstrating that if an exploration is updated and no new
        answers are submitted to the new version, but the interaction ID is the
        same then old answers should still be aggregated.
        """
        with self.swap(jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
                       self.ALL_CC_MANAGERS_FOR_TESTS):

            # Setup example exploration.
            exp_id = 'eid'
            exp = self.save_new_valid_exploration(exp_id, '*****@*****.**')
            init_state_name = exp.init_state_name

            time_spent = 5.0
            params = {}

            # Add a few different answers.
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, 1, init_state_name, 'TextInput', 0, 0,
                exp_domain.EXPLICIT_CLASSIFICATION, 'session1', time_spent,
                params, 'verb')
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, 1, init_state_name, 'TextInput', 0, 0,
                exp_domain.EXPLICIT_CLASSIFICATION, 'session1', time_spent,
                params, '2')
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, 1, init_state_name, 'TextInput', 0, 0,
                exp_domain.EXPLICIT_CLASSIFICATION, 'session1', time_spent,
                params, 'verb')

            # Change something other than the interaction ID.
            exp_services.update_exploration('*****@*****.**', exp_id, [
                exp_domain.ExplorationChange({
                    'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
                    'state_name': init_state_name,
                    'property_name': exp_domain.STATE_PROPERTY_CONTENT,
                    'new_value': {
                        'content_id': 'content',
                        'html': 'New content'
                    },
                })
            ], 'Change state content')

            exp = exp_services.get_exploration_by_id(exp_id)
            self.assertEqual(exp.version, 2)

            # Run the answers aggregation job.
            MockInteractionAnswerSummariesAggregator.start_computation()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
            self.process_and_flush_pending_tasks()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 0)

            calc_id = 'Top10AnswerFrequencies'

            # Extract the output from the job.
            calc_output_model_latest_version = self._get_calc_output_model(
                exp_id, init_state_name, calc_id, exploration_version='2')
            calc_output_model_all = self._get_calc_output_model(
                exp_id, init_state_name, calc_id)

            # Since no answers were submitted to the latest version of the
            # exploration, there should be no calculated output for it.
            self.assertIsNone(calc_output_model_latest_version)

            self.assertEqual('Top10AnswerFrequencies',
                             calc_output_model_all.calculation_id)
            calculation_output_all = calc_output_model_all.calculation_output
            expected_calculation_output_all_answers = [{
                'answer': 'verb',
                'frequency': 2
            }, {
                'answer': '2',
                'frequency': 1
            }]
            self.assertEqual(calculation_output_all,
                             expected_calculation_output_all_answers)
Example #42
0
    def test_ignores_old_answers_if_new_interaction_has_no_new_answers(self):
        """Similar to test_answers_across_multiple_exploration_versions except
        the exploration has changed interactions in the new versions. The
        aggregation job should not include answers corresponding to exploration
        versions which do not match the latest version's interaction ID.
        """
        with self.swap(jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
                       self.ALL_CC_MANAGERS_FOR_TESTS):

            # Setup example exploration.
            exp_id = 'eid'
            exp = self.save_new_valid_exploration(exp_id, '*****@*****.**')
            init_state_name = exp.init_state_name

            time_spent = 5.0
            params = {}

            # Add a few different answers.
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, 1, init_state_name, 'TextInput', 0, 0,
                exp_domain.EXPLICIT_CLASSIFICATION, 'session1', time_spent,
                params, 'verb')
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, 1, init_state_name, 'TextInput', 0, 0,
                exp_domain.EXPLICIT_CLASSIFICATION, 'session1', time_spent,
                params, '2')
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, 1, init_state_name, 'TextInput', 0, 0,
                exp_domain.EXPLICIT_CLASSIFICATION, 'session1', time_spent,
                params, 'verb')

            # Change the interaction ID.
            exp_services.update_exploration('*****@*****.**', exp_id, [
                exp_domain.ExplorationChange({
                    'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
                    'state_name': init_state_name,
                    'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
                    'new_value': 'NumericInput',
                })
            ], 'Change to NumericInput')

            exp = exp_services.get_exploration_by_id(exp_id)
            self.assertEqual(exp.version, 2)

            # Run the answers aggregation job.
            MockInteractionAnswerSummariesAggregator.start_computation()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
            self.process_and_flush_pending_tasks()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 0)

            calc_id = 'Top10AnswerFrequencies'

            # Check the output of the job.
            calc_output_model_latest_version = self._get_calc_output_model(
                exp_id, init_state_name, calc_id, exploration_version='2')
            calc_output_model_first_version = self._get_calc_output_model(
                exp_id, init_state_name, calc_id, exploration_version='1')
            calc_output_model_all = self._get_calc_output_model(
                exp_id, init_state_name, calc_id)

            # Since no answers were submitted to the latest version of the
            # exploration, there should be no calculated output for it.
            self.assertIsNone(calc_output_model_latest_version)

            # Top answers will still be computed for the first version.
            self.assertEqual('Top10AnswerFrequencies',
                             calc_output_model_first_version.calculation_id)
            calculation_output_first = (
                calc_output_model_first_version.calculation_output)
            expected_calculation_output_first_answer = [{
                'answer': 'verb',
                'frequency': 2
            }, {
                'answer': '2',
                'frequency': 1
            }]
            self.assertEqual(calculation_output_first,
                             expected_calculation_output_first_answer)

            self.assertEqual('Top10AnswerFrequencies',
                             calc_output_model_all.calculation_id)

            # No answers should be aggregated since all past answers do not
            # match the newly submitted interaction ID.
            calculation_output_all = calc_output_model_all.calculation_output
            self.assertEqual(calculation_output_all, [])
Example #43
0
    def setUp(self):
        super(SuggestionsIntegrationTests, self).setUp()

        # Register users.
        self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
        self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
        self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
        self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
        self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
        self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)

        self.editor = user_services.UserActionsInfo(self.editor_id)

        # Login and create exploration and suggestions.
        self.login(self.EDITOR_EMAIL)

        # Create exploration.
        self.save_new_valid_exploration(
            self.EXP_ID,
            self.editor_id,
            title='Exploration for suggestions',
            category='This is just a test category',
            objective='Test a suggestion.')

        exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        init_state = exploration.states[exploration.init_state_name]
        init_interaction = init_state.interaction
        init_interaction.default_outcome.dest = exploration.init_state_name
        exploration.add_states(['State A', 'State 2', 'State 3'])
        exploration.states['State A'].update_interaction_id('TextInput')
        # Create content in State A with a single audio subtitle.
        content_id = exploration.states['State A'].content.content_id
        exploration.states['State A'].update_content(
            exp_domain.SubtitledHtml(content_id, 'old content').to_dict())
        exploration.states['State A'].update_content_ids_to_audio_translations(
            {
                content_id: {
                    self.TRANSLATION_LANGUAGE_CODE:
                    exp_domain.AudioTranslation('filename.mp3', 20,
                                                False).to_dict()
                },
                'default_outcome': {}
            })
        exploration.states['State 2'].update_interaction_id('TextInput')
        exploration.states['State 3'].update_interaction_id('TextInput')
        exp_services._save_exploration(self.editor_id, exploration, '', [])  # pylint: disable=protected-access
        rights_manager.publish_exploration(self.editor, self.EXP_ID)
        rights_manager.assign_role_for_exploration(self.editor, self.EXP_ID,
                                                   self.owner_id,
                                                   rights_manager.ROLE_EDITOR)

        response = self.testapp.get('/explore/%s' % self.EXP_ID)
        csrf_token = self.get_csrf_token_from_response(response)

        # Create suggestions.
        self.post_json(
            '%s/%s' % (feconf.SUGGESTION_URL_PREFIX, self.EXP_ID), {
                'exploration_version': 3,
                'state_name': u'State A',
                'description': u'Suggestion for state A.',
                'suggestion_html': 'new accepted suggestion for state A',
            },
            csrf_token=csrf_token)
        self.post_json('%s/%s' % (feconf.SUGGESTION_URL_PREFIX, self.EXP_ID), {
            'exploration_version': 1,
            'state_name': u'State 2',
            'description': u'A new value.',
            'suggestion_html': 'some new value',
        },
                       csrf_token=csrf_token)
        self.post_json('%s/%s' % (feconf.SUGGESTION_URL_PREFIX, self.EXP_ID), {
            'exploration_version': 2,
            'state_name': u'State 3',
            'description': u'Empty suggestion',
            'suggestion_html': '',
        },
                       csrf_token=csrf_token)
        self.post_json('%s/%s' % (feconf.SUGGESTION_URL_PREFIX, self.EXP_ID), {
            'exploration_version': 2,
            'state_name': u'State A',
            'description': u'Just a space.',
            'suggestion_html': ' ',
        },
                       csrf_token=csrf_token)
        self.post_json('%s/%s' % (feconf.SUGGESTION_URL_PREFIX, self.EXP_ID), {
            'exploration_version': 1,
            'state_name': u'State 2',
            'description': u'Random characters.',
            'suggestion_html': '#!$%',
        },
                       csrf_token=csrf_token)
        self.post_json('%s/%s' % (feconf.SUGGESTION_URL_PREFIX, self.EXP_ID), {
            'exploration_version': 2,
            'state_name': u'State 3',
            'description': u'Very bizarre characters.',
            'suggestion_html': u'Ֆݓॵক',
        },
                       csrf_token=csrf_token)
        self.logout()
Example #44
0
    def _get_exploration_data(self,
                              exploration_id,
                              apply_draft=False,
                              version=None):
        """Returns a description of the given exploration."""
        try:
            if apply_draft:
                exploration = exp_services.get_exp_with_draft_applied(
                    exploration_id, self.user_id)
            else:
                exploration = exp_services.get_exploration_by_id(
                    exploration_id, version=version)
        except:
            raise self.PageNotFoundException

        states = {}
        for state_name in exploration.states:
            state_dict = exploration.states[state_name].to_dict()
            states[state_name] = state_dict
        exp_user_data = user_models.ExplorationUserDataModel.get(
            self.user_id, exploration_id)
        draft_changes = (exp_user_data.draft_change_list if exp_user_data
                         and exp_user_data.draft_change_list else None)
        is_version_of_draft_valid = (
            exp_services.is_version_of_draft_valid(
                exploration_id, exp_user_data.draft_change_list_exp_version)
            if exp_user_data and exp_user_data.draft_change_list_exp_version
            else None)
        draft_change_list_id = (exp_user_data.draft_change_list_id
                                if exp_user_data else 0)
        exploration_email_preferences = (
            user_services.get_email_preferences_for_exploration(
                self.user_id, exploration_id))
        editor_dict = {
            'auto_tts_enabled':
            exploration.auto_tts_enabled,
            'category':
            exploration.category,
            'correctness_feedback_enabled':
            (exploration.correctness_feedback_enabled),
            'draft_change_list_id':
            draft_change_list_id,
            'exploration_id':
            exploration_id,
            'init_state_name':
            exploration.init_state_name,
            'language_code':
            exploration.language_code,
            'objective':
            exploration.objective,
            'param_changes':
            exploration.param_change_dicts,
            'param_specs':
            exploration.param_specs_dict,
            'rights':
            rights_manager.get_exploration_rights(exploration_id).to_dict(),
            'show_state_editor_tutorial_on_load':
            (self.user_id and not self.has_seen_editor_tutorial),
            'states':
            states,
            'tags':
            exploration.tags,
            'title':
            exploration.title,
            'version':
            exploration.version,
            'is_version_of_draft_valid':
            is_version_of_draft_valid,
            'draft_changes':
            draft_changes,
            'email_preferences':
            exploration_email_preferences.to_dict()
        }

        return editor_dict
Example #45
0
    def get(self, exploration_id):
        """Handles GET requests."""
        version = self.request.get('v')
        if not version:
            # The default value for a missing parameter seems to be ''.
            version = None
        else:
            version = int(version)

        try:
            exploration = exp_services.get_exploration_by_id(
                exploration_id, version=version)
        except Exception as e:
            raise self.PageNotFoundException(e)

        version = exploration.version

        if not rights_manager.Actor(self.user_id).can_view(exploration_id):
            raise self.PageNotFoundException

        is_iframed = (self.request.get('iframed') == 'true')

        # TODO(sll): Cache these computations.
        gadget_ids = exploration.get_gadget_ids()
        interaction_ids = exploration.get_interaction_ids()
        dependency_ids = (
            interaction_registry.Registry.get_deduplicated_dependency_ids(
                interaction_ids))
        dependencies_html, additional_angular_modules = (
            dependency_registry.Registry.get_deps_html_and_angular_modules(
                dependency_ids))

        gadget_templates = (
            gadget_registry.Registry.get_gadget_html(gadget_ids))

        interaction_templates = (
            rte_component_registry.Registry.get_html_for_all_components() +
            interaction_registry.Registry.get_interaction_html(
                interaction_ids))

        self.values.update({
            'GADGET_SPECS': gadget_registry.Registry.get_all_specs(),
            'INTERACTION_SPECS': interaction_registry.Registry.get_all_specs(),
            'SHARING_OPTIONS': SHARING_OPTIONS.value,
            'SHARING_OPTIONS_TWITTER_TEXT': SHARING_OPTIONS_TWITTER_TEXT.value,
            'additional_angular_modules': additional_angular_modules,
            'can_edit': (
                bool(self.username) and
                self.username not in config_domain.BANNED_USERNAMES.value and
                rights_manager.Actor(self.user_id).can_edit(exploration_id)
            ),
            'dependencies_html': jinja2.utils.Markup(
                dependencies_html),
            'exploration_title': exploration.title,
            'exploration_version': version,
            'gadget_templates': jinja2.utils.Markup(gadget_templates),
            'iframed': is_iframed,
            'interaction_templates': jinja2.utils.Markup(
                interaction_templates),
            'is_private': rights_manager.is_exploration_private(
                exploration_id),
            # Note that this overwrites the value in base.py.
            'meta_name': exploration.title,
            # Note that this overwrites the value in base.py.
            'meta_description': self._make_first_letter_uppercase(
                exploration.objective),
            'nav_mode': feconf.NAV_MODE_EXPLORE,
            'skin_templates': jinja2.utils.Markup(
                skins_services.Registry.get_skin_templates(
                    [exploration.default_skin])),
            'skin_js_url': skins_services.Registry.get_skin_js_url(
                exploration.default_skin),
            'skin_tag': jinja2.utils.Markup(
                skins_services.Registry.get_skin_tag(exploration.default_skin)
            ),
        })

        if is_iframed:
            self.render_template(
                'player/exploration_player.html', iframe_restriction=None)
        else:
            self.render_template('player/exploration_player.html')
Example #46
0
    def get(self, exploration_id, escaped_state_name):
        """Handles GET requests."""
        try:
            exploration = exp_services.get_exploration_by_id(exploration_id)
        except:
            raise self.PageNotFoundException

        state_name = utils.unescape_encoded_uri_component(escaped_state_name)
        if state_name not in exploration.states:
            # If trying to access a non-existing state, there is no training
            # data associated with it.
            self.render_json({'unhandled_answers': []})
            return

        state = exploration.states[state_name]

        # TODO(bhenning): Answers should be bound to a particular exploration
        # version or interaction ID.

        # TODO(bhenning): If the top 100 answers have already been classified,
        # then this handler will always return an empty list.

        # TODO(bhenning): This entire function will not work as expected until
        # the answers storage backend stores answers in a non-lossy way.
        # Currently, answers are stored as HTML strings and they are not able
        # to be converted back to the original objects they started as, so the
        # normalization calls in this function will not work correctly on those
        # strings. Once this happens, this handler should also be tested.

        # The total number of possible answers is 100 because it requests the
        # top 50 answers matched to the default rule and the top 50 answers
        # matched to the classifier individually.

        # TODO(sll): Functionality for retrieving untrained answers was removed
        # in PR 3489 due to infeasibility of the calculation approach. It needs
        # to be reinstated in the future so that the training interface can
        # function.
        submitted_answers = []

        interaction = state.interaction
        unhandled_answers = []
        if feconf.SHOW_TRAINABLE_UNRESOLVED_ANSWERS and interaction.id:
            interaction_instance = (
                interaction_registry.Registry.get_interaction_by_id(
                    interaction.id))

            try:
                # Normalize the answers.
                for answer in submitted_answers:
                    answer['answer'] = interaction_instance.normalize_answer(
                        answer['answer'])

                trained_answers = set()
                for answer_group in interaction.answer_groups:
                    for rule_spec in answer_group.rule_specs:
                        if (rule_spec.rule_type ==
                                exp_domain.RULE_TYPE_CLASSIFIER):
                            trained_answers.update(
                                interaction_instance.normalize_answer(trained)
                                for trained in
                                rule_spec.inputs['training_data'])

                # Include all the answers which have been confirmed to be
                # associated with the default outcome.
                trained_answers.update(
                    set(
                        interaction_instance.normalize_answer(confirmed)
                        for confirmed in
                        interaction.confirmed_unclassified_answers))

                unhandled_answers = [
                    answer for answer in submitted_answers
                    if answer['answer'] not in trained_answers
                ]
            except Exception as e:
                logging.warning(
                    'Error loading untrained answers for interaction %s: %s.' %
                    (interaction.id, e))

        self.render_json({'unhandled_answers': unhandled_answers})
Example #47
0
def _get_exploration_player_data(exploration_id, version, collection_id,
                                 can_edit):
    """Returns a dict of exploration player data.

    Args:
        exploration_id: str. The ID of the exploration.
        version: int or None. The version of the exploration.
        collection_id: str. ID of the collection.
        can_edit: bool. Whether the given user can edit this activity.

    Returns:
        dict. A dict of exploration player data.
        The keys and values of the dict are as follows:
        - 'INTERACTION_SPECS': dict. A dict containing the full specs of each
            interaction. Contains interaction ID and a list of instances of
            all interactions.
        - 'DEFAULT_TWITTER_SHARE_MESSAGE_PLAYER': str. Text for the Twitter
            share message.
        - 'additional_angular_modules': list. A de-duplicated list of strings,
            each representing an additional angular module that should be
            loaded.
        - 'can_edit': bool. Whether the given user can edit this activity.
        - 'dependencies_html': str. The additional HTML to insert on the page.
        - 'exploration_title': str. Title of exploration.
        - 'exploration_version': int. The version of the exploration.
        - 'collection_id': str. ID of the collection.
        - 'collection_title': str. Title of collection.
        - 'interaction_templates': str. The HTML bodies of the interactions
            required by the given exploration ID.
        - 'is_private': bool. Whether the exploration is private or not.
        - 'meta_name': str. Title of exploration.
        - 'meta_description': str. Objective of exploration.
    """
    try:
        exploration = exp_services.get_exploration_by_id(exploration_id,
                                                         version=version)
    except Exception:
        raise Exception

    collection_title = None
    if collection_id:
        try:
            collection = collection_services.get_collection_by_id(
                collection_id)
            collection_title = collection.title
        except Exception:
            raise Exception

    version = exploration.version

    # TODO(sll): Cache these computations.
    interaction_ids = exploration.get_interaction_ids()
    for interaction_id in feconf.ALLOWED_QUESTION_INTERACTION_IDS:
        if interaction_id not in interaction_ids:
            interaction_ids.append(interaction_id)

    dependency_ids = (interaction_registry.Registry.
                      get_deduplicated_dependency_ids(interaction_ids))
    dependencies_html, additional_angular_modules = (
        dependency_registry.Registry.get_deps_html_and_angular_modules(
            dependency_ids))

    interaction_templates = (
        interaction_registry.Registry.get_interaction_html(interaction_ids))

    return {
        'INTERACTION_SPECS':
        interaction_registry.Registry.get_all_specs(),
        'DEFAULT_TWITTER_SHARE_MESSAGE_PLAYER':
        (DEFAULT_TWITTER_SHARE_MESSAGE_PLAYER.value),
        'additional_angular_modules':
        additional_angular_modules,
        'can_edit':
        can_edit,
        'dependencies_html':
        jinja2.utils.Markup(dependencies_html),
        'exploration_title':
        exploration.title,
        'exploration_version':
        version,
        'collection_id':
        collection_id,
        'collection_title':
        collection_title,
        'interaction_templates':
        jinja2.utils.Markup(interaction_templates),
        'is_private':
        rights_manager.is_exploration_private(exploration_id),
        # Note that this overwrites the value in base.py.
        'meta_name':
        exploration.title,
        # Note that this overwrites the value in base.py.
        'meta_description':
        utils.capitalize_string(exploration.objective),
    }
Example #48
0
    def get(self, exploration_id, escaped_state_name):
        """Handles GET requests."""
        try:
            exploration = exp_services.get_exploration_by_id(exploration_id)
        except:
            raise self.PageNotFoundException

        state_name = self.unescape_state_name(escaped_state_name)
        if state_name not in exploration.states:
            # If trying to access a non-existing state, there is no training
            # data associated with it.
            self.render_json({'unhandled_answers': []})
            return

        state = exploration.states[state_name]

        # TODO(bhenning): Answers should be bound to a particular exploration
        # version or interaction ID.

        # TODO(bhenning): If the top 100 answers have already been classified,
        # then this handler will always return an empty list.

        # TODO(bhenning): This entire function will not work as expected until
        # the answers storage backend stores answers in a non-lossy way.
        # Currently, answers are stored as HTML strings and they are not able
        # to be converted back to the original objects they started as, so the
        # normalization calls in this function will not work correctly on those
        # strings. Once this happens, this handler should also be tested.

        NUMBER_OF_TOP_ANSWERS_PER_RULE = 50

        # The total number of possible answers is 100 because it requests the
        # top 50 answers matched to the default rule and the top 50 answers
        # matched to a fuzzy rule individually.
        answers = stats_services.get_top_state_rule_answers(
            exploration_id, state_name,
            [exp_domain.DEFAULT_RULESPEC_STR, rule_domain.FUZZY_RULE_TYPE],
            NUMBER_OF_TOP_ANSWERS_PER_RULE)

        interaction = state.interaction
        unhandled_answers = []
        if feconf.SHOW_TRAINABLE_UNRESOLVED_ANSWERS and interaction.id:
            interaction_instance = (
                interaction_registry.Registry.get_interaction_by_id(
                    interaction.id))

            try:
                # Normalize the answers.
                for answer in answers:
                    answer['value'] = interaction_instance.normalize_answer(
                        answer['value'])

                trained_answers = set()
                for answer_group in interaction.answer_groups:
                    for rule_spec in answer_group.rule_specs:
                        if rule_spec.rule_type == rule_domain.FUZZY_RULE_TYPE:
                            trained_answers.update(
                                interaction_instance.normalize_answer(trained)
                                for trained in
                                rule_spec.inputs['training_data'])

                # Include all the answers which have been confirmed to be
                # associated with the default outcome.
                trained_answers.update(
                    set(
                        interaction_instance.normalize_answer(confirmed)
                        for confirmed in
                        interaction.confirmed_unclassified_answers))

                unhandled_answers = [
                    answer for answer in answers
                    if answer['value'] not in trained_answers
                ]
            except Exception as e:
                logging.warning(
                    'Error loading untrained answers for interaction %s: %s.' %
                    (interaction.id, e))

        self.render_json({'unhandled_answers': unhandled_answers})
Example #49
0
    def get(self, exploration_id):
        """Populates the data on the individual exploration page.

        Args:
            exploration_id: str. The ID of the exploration.
        """
        version = self.request.get('v')
        version = int(version) if version else None

        try:
            exploration = exp_services.get_exploration_by_id(exploration_id,
                                                             version=version)
        except Exception as e:
            raise self.PageNotFoundException(e)

        exploration_rights = rights_manager.get_exploration_rights(
            exploration_id, strict=False)
        user_settings = user_services.get_user_settings(self.user_id)

        preferred_audio_language_code = None
        if user_settings is not None:
            preferred_audio_language_code = (
                user_settings.preferred_audio_language_code)

        # Retrieve all classifiers for the exploration.
        state_classifier_mapping = {}
        classifier_training_jobs = (
            classifier_services.get_classifier_training_jobs(
                exploration_id, exploration.version,
                exploration.states.keys()))
        for index, state_name in enumerate(exploration.states.keys()):
            if classifier_training_jobs[index] is not None:
                classifier_data = classifier_training_jobs[
                    index].classifier_data
                algorithm_id = classifier_training_jobs[index].algorithm_id
                data_schema_version = (
                    classifier_training_jobs[index].data_schema_version)
                state_classifier_mapping[state_name] = {
                    'algorithm_id': algorithm_id,
                    'classifier_data': classifier_data,
                    'data_schema_version': data_schema_version
                }

        self.values.update({
            'can_edit':
            (rights_manager.check_can_edit_activity(self.user,
                                                    exploration_rights)),
            'exploration':
            exploration.to_player_dict(),
            'exploration_id':
            exploration_id,
            'is_logged_in':
            bool(self.user_id),
            'session_id':
            utils.generate_new_session_id(),
            'version':
            exploration.version,
            'preferred_audio_language_code':
            preferred_audio_language_code,
            'state_classifier_mapping':
            state_classifier_mapping,
            'auto_tts_enabled':
            exploration.auto_tts_enabled,
            'correctness_feedback_enabled':
            (exploration.correctness_feedback_enabled),
            'record_playthrough_probability':
            (config_domain.RECORD_PLAYTHROUGH_PROBABILITY.value)
        })
        self.render_json(self.values)
Example #50
0
    def get(self, exploration_id):
        """Handles GET requests."""
        if exploration_id in feconf.DISABLED_EXPLORATION_IDS:
            self.render_template('error/disabled_exploration.html',
                                 iframe_restriction=None)
            return

        exploration = exp_services.get_exploration_by_id(exploration_id,
                                                         strict=False)
        if (exploration is None
                or not rights_manager.Actor(self.user_id).can_view(
                    feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id)):
            self.redirect('/')
            return

        can_edit = (bool(self.user_id) and self.username
                    not in config_domain.BANNED_USERNAMES.value
                    and rights_manager.Actor(self.user_id).can_edit(
                        feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id))

        interaction_ids = (
            interaction_registry.Registry.get_all_interaction_ids())

        interaction_dependency_ids = (
            interaction_registry.Registry.get_deduplicated_dependency_ids(
                interaction_ids))
        dependencies_html, additional_angular_modules = (
            dependency_registry.Registry.get_deps_html_and_angular_modules(
                interaction_dependency_ids + self.EDITOR_PAGE_DEPENDENCY_IDS))

        interaction_templates = (
            rte_component_registry.Registry.get_html_for_all_components() +
            interaction_registry.Registry.get_interaction_html(interaction_ids)
        )

        gadget_types = gadget_registry.Registry.get_all_gadget_types()
        gadget_templates = (
            gadget_registry.Registry.get_gadget_html(gadget_types))

        self.values.update({
            'GADGET_SPECS':
            gadget_registry.Registry.get_all_specs(),
            'INTERACTION_SPECS':
            interaction_registry.Registry.get_all_specs(),
            'PANEL_SPECS':
            feconf.PANELS_PROPERTIES,
            'DEFAULT_OBJECT_VALUES':
            obj_services.get_default_object_values(),
            'DEFAULT_TWITTER_SHARE_MESSAGE_EDITOR':
            (DEFAULT_TWITTER_SHARE_MESSAGE_EDITOR.value),
            'additional_angular_modules':
            additional_angular_modules,
            'can_delete':
            rights_manager.Actor(self.user_id).can_delete(
                feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id),
            'can_edit':
            can_edit,
            'can_modify_roles':
            rights_manager.Actor(self.user_id).can_modify_roles(
                feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id),
            'can_publicize':
            rights_manager.Actor(self.user_id).can_publicize(
                feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id),
            'can_publish':
            rights_manager.Actor(self.user_id).can_publish(
                feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id),
            'can_release_ownership':
            rights_manager.Actor(self.user_id).can_release_ownership(
                feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id),
            'can_unpublicize':
            rights_manager.Actor(self.user_id).can_unpublicize(
                feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id),
            'can_unpublish':
            rights_manager.Actor(self.user_id).can_unpublish(
                feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id),
            'dependencies_html':
            jinja2.utils.Markup(dependencies_html),
            'gadget_templates':
            jinja2.utils.Markup(gadget_templates),
            'interaction_templates':
            jinja2.utils.Markup(interaction_templates),
            'meta_description':
            feconf.CREATE_PAGE_DESCRIPTION,
            'nav_mode':
            feconf.NAV_MODE_CREATE,
            'value_generators_js':
            jinja2.utils.Markup(get_value_generators_js()),
            'title':
            exploration.title,
            'ALL_LANGUAGE_CODES':
            feconf.ALL_LANGUAGE_CODES,
            'ALLOWED_GADGETS':
            feconf.ALLOWED_GADGETS,
            'ALLOWED_INTERACTION_CATEGORIES':
            (feconf.ALLOWED_INTERACTION_CATEGORIES),
            'INVALID_PARAMETER_NAMES':
            feconf.INVALID_PARAMETER_NAMES,
            'NEW_STATE_TEMPLATE':
            NEW_STATE_TEMPLATE,
            'SHOW_TRAINABLE_UNRESOLVED_ANSWERS':
            (feconf.SHOW_TRAINABLE_UNRESOLVED_ANSWERS),
            'TAG_REGEX':
            feconf.TAG_REGEX,
        })

        self.render_template('exploration_editor/exploration_editor.html')
Example #51
0
    def test_basic_computation_ignores_automated_exploration_commits(self):
        with self._get_test_context():
            self.save_new_exp_with_states_schema_v0(EXP_ID, USER_ID, EXP_TITLE)

            # Confirm that the exploration is at version 1.
            exploration = exp_services.get_exploration_by_id(EXP_ID)
            self.assertEqual(exploration.version, 1)

            v1_last_updated_ms = (
                self._get_most_recent_exp_snapshot_created_on_ms(EXP_ID))

            # Start migration job on all explorations, including this one.
            job_id = (
                exp_jobs_one_off.ExplorationMigrationJobManager.create_new())
            exp_jobs_one_off.ExplorationMigrationJobManager.enqueue(job_id)
            self.process_and_flush_pending_tasks()

            # Confirm that the exploration is at version 2.
            exploration = exp_services.get_exploration_by_id(EXP_ID)
            self.assertEqual(exploration.version, 2)

            v2_last_updated_ms = (
                self._get_most_recent_exp_snapshot_created_on_ms(EXP_ID))

            # Run the aggregator.
            ModifiedRecentUpdatesAggregator.start_computation()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
            self.process_and_flush_pending_tasks()
            ModifiedRecentUpdatesAggregator.stop_computation(USER_ID)

            recent_notifications = (ModifiedRecentUpdatesAggregator.
                                    get_recent_notifications(USER_ID)[1])
            self.assertEqual(len(recent_notifications), 1)
            self.assertEqual(
                recent_notifications[0],
                self._get_expected_activity_created_dict(
                    USER_ID, EXP_ID, EXP_TITLE, 'exploration',
                    feconf.UPDATE_TYPE_EXPLORATION_COMMIT, v1_last_updated_ms))
            self.assertLess(recent_notifications[0]['last_updated_ms'],
                            v2_last_updated_ms)

            # Another user makes a commit; this one should now show up in the
            # original user's dashboard.
            exp_services.update_exploration(ANOTHER_USER_ID, EXP_ID, [],
                                            'Update exploration')
            v3_last_updated_ms = (
                self._get_most_recent_exp_snapshot_created_on_ms(EXP_ID))

            ModifiedRecentUpdatesAggregator.start_computation()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
            self.process_and_flush_pending_tasks()

            recent_notifications = (ModifiedRecentUpdatesAggregator.
                                    get_recent_notifications(USER_ID)[1])
            self.assertEqual([{
                'type': feconf.UPDATE_TYPE_EXPLORATION_COMMIT,
                'last_updated_ms': v3_last_updated_ms,
                'activity_id': EXP_ID,
                'activity_title': EXP_TITLE,
                'author_id': ANOTHER_USER_ID,
                'subject': 'Update exploration',
            }], recent_notifications)
Example #52
0
    def setUp(self):
        super(SuggestionsIntegrationTests, self).setUp()

        # Register users.
        self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
        self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
        self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
        self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
        self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
        self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)

        # Load exploration 0.
        exp_services.delete_demo(self.EXP_ID)
        exp_services.load_demo(self.EXP_ID)

        # Login and create exploration and suggestions.
        self.login(self.EDITOR_EMAIL)

        # Create exploration.
        self.save_new_valid_exploration(
            self.EXP_ID,
            self.editor_id,
            title='Exploration for suggestions',
            category='This is just a test category',
            objective='Test a suggestion.')

        exploration = exp_services.get_exploration_by_id(self.EXP_ID)
        init_state = exploration.states[exploration.init_state_name]
        init_interaction = init_state.interaction
        init_interaction.default_outcome.dest = exploration.init_state_name
        exploration.add_states(['State A', 'State 2', 'State 3'])
        exploration.states['State A'].update_interaction_id('TextInput')
        exploration.states['State 2'].update_interaction_id('TextInput')
        exploration.states['State 3'].update_interaction_id('TextInput')
        exp_services._save_exploration(self.editor_id, exploration, '', [])  # pylint: disable=protected-access
        rights_manager.publish_exploration(self.editor_id, self.EXP_ID)
        rights_manager.assign_role_for_exploration(self.editor_id, self.EXP_ID,
                                                   self.owner_id,
                                                   rights_manager.ROLE_EDITOR)

        response = self.testapp.get('/explore/%s' % self.EXP_ID)
        csrf_token = self.get_csrf_token_from_response(response)

        # Create suggestions.
        self.post_json(
            '%s/%s' % (feconf.SUGGESTION_URL_PREFIX, self.EXP_ID), {
                'exploration_version': 3,
                'state_name': u'State A',
                'description': u'Suggestion for state A.',
                'suggestion_content': {
                    'type': 'text',
                    'value': u'new accepted suggestion for state A'
                },
            }, csrf_token)
        self.post_json(
            '%s/%s' % (feconf.SUGGESTION_URL_PREFIX, self.EXP_ID), {
                'exploration_version': 1,
                'state_name': u'State 2',
                'description': u'A new value.',
                'suggestion_content': {
                    'type': 'text',
                    'value': 'some new value'
                },
            }, csrf_token)
        self.post_json(
            '%s/%s' % (feconf.SUGGESTION_URL_PREFIX, self.EXP_ID), {
                'exploration_version': 2,
                'state_name': u'State 3',
                'description': u'Empty suggestion',
                'suggestion_content': {
                    'type': 'text',
                    'value': ''
                },
            }, csrf_token)
        self.post_json(
            '%s/%s' % (feconf.SUGGESTION_URL_PREFIX, self.EXP_ID), {
                'exploration_version': 2,
                'state_name': u'State A',
                'description': u'Just a space.',
                'suggestion_content': {
                    'type': 'text',
                    'value': ' '
                },
            }, csrf_token)
        self.post_json(
            '%s/%s' % (feconf.SUGGESTION_URL_PREFIX, self.EXP_ID), {
                'exploration_version': 1,
                'state_name': u'State 2',
                'description': u'Random characters.',
                'suggestion_content': {
                    'type': 'text',
                    'value': '#!$%'
                },
            }, csrf_token)
        self.post_json(
            '%s/%s' % (feconf.SUGGESTION_URL_PREFIX, self.EXP_ID), {
                'exploration_version': 2,
                'state_name': u'State 3',
                'description': u'Very bizarre characters.',
                'suggestion_content': {
                    'type': 'text',
                    'value': u'Ֆݓॵক'
                },
            }, csrf_token)
        self.logout()
Example #53
0
    def put(self, exploration_id):
        """Updates the editing rights for the given exploration."""
        exploration = exp_services.get_exploration_by_id(exploration_id)
        version = self.payload.get('version')
        _require_valid_version(version, exploration.version)

        is_public = self.payload.get('is_public')
        is_publicized = self.payload.get('is_publicized')
        is_community_owned = self.payload.get('is_community_owned')
        new_member_username = self.payload.get('new_member_username')
        new_member_role = self.payload.get('new_member_role')
        viewable_if_private = self.payload.get('viewable_if_private')

        if new_member_username:
            if not rights_manager.Actor(self.user_id).can_modify_roles(
                    feconf.ACTIVITY_TYPE_EXPLORATION, exploration_id):
                raise self.UnauthorizedUserException(
                    'Only an owner of this exploration can add or change '
                    'roles.')

            new_member_id = user_services.get_user_id_from_username(
                new_member_username)
            if new_member_id is None:
                raise Exception('Sorry, we could not find the specified user.')

            rights_manager.assign_role_for_exploration(self.user_id,
                                                       exploration_id,
                                                       new_member_id,
                                                       new_member_role)
            email_manager.send_role_notification_email(self.user_id,
                                                       new_member_id,
                                                       new_member_role,
                                                       exploration_id,
                                                       exploration.title)

        elif is_public is not None:
            exploration = exp_services.get_exploration_by_id(exploration_id)
            if is_public:
                try:
                    exploration.validate(strict=True)
                except utils.ValidationError as e:
                    raise self.InvalidInputException(e)

                exp_services.publish_exploration_and_update_user_profiles(
                    self.user_id, exploration_id)
                exp_services.index_explorations_given_ids([exploration_id])
            else:
                rights_manager.unpublish_exploration(self.user_id,
                                                     exploration_id)
                exp_services.delete_documents_from_search_index(
                    [exploration_id])

        elif is_publicized is not None:
            exploration = exp_services.get_exploration_by_id(exploration_id)
            if is_publicized:
                try:
                    exploration.validate(strict=True)
                except utils.ValidationError as e:
                    raise self.InvalidInputException(e)

                rights_manager.publicize_exploration(self.user_id,
                                                     exploration_id)
            else:
                rights_manager.unpublicize_exploration(self.user_id,
                                                       exploration_id)

        elif is_community_owned:
            exploration = exp_services.get_exploration_by_id(exploration_id)
            try:
                exploration.validate(strict=True)
            except utils.ValidationError as e:
                raise self.InvalidInputException(e)

            rights_manager.release_ownership_of_exploration(
                self.user_id, exploration_id)

        elif viewable_if_private is not None:
            rights_manager.set_private_viewability_of_exploration(
                self.user_id, exploration_id, viewable_if_private)

        else:
            raise self.InvalidInputException(
                'No change was made to this exploration.')

        self.render_json({
            'rights':
            rights_manager.get_exploration_rights(exploration_id).to_dict()
        })
Example #54
0
    def setUp(self):
        super(TrainedClassifierHandlerTest, self).setUp()

        self.exp_id = 'exp_id1'
        self.title = 'Testing Classifier storing'
        self.category = 'Test'
        yaml_path = os.path.join(feconf.TESTS_DATA_DIR,
                                 'string_classifier_test.yaml')
        with open(yaml_path, 'r') as yaml_file:
            self.yaml_content = yaml_file.read()

        assets_list = []
        with self.swap(feconf, 'ENABLE_ML_CLASSIFIERS', True):
            exp_services.save_new_exploration_from_yaml_and_assets(
                feconf.SYSTEM_COMMITTER_ID, self.yaml_content, self.exp_id,
                assets_list)
        self.exploration = exp_services.get_exploration_by_id(self.exp_id)

        self.classifier_data = {
            '_alpha': 0.1,
            '_beta': 0.001,
            '_prediction_threshold': 0.5,
            '_training_iterations': 25,
            '_prediction_iterations': 5,
            '_num_labels': 10,
            '_num_docs': 12,
            '_num_words': 20,
            '_label_to_id': {
                'text': 1
            },
            '_word_to_id': {
                'hello': 2
            },
            '_w_dp': [],
            '_b_dl': [],
            '_l_dp': [],
            '_c_dl': [],
            '_c_lw': [],
            '_c_l': []
        }
        classifier_training_jobs = (
            classifier_services.get_classifier_training_jobs(
                self.exp_id, self.exploration.version, ['Home']))
        self.assertEqual(len(classifier_training_jobs), 1)
        classifier_training_job = classifier_training_jobs[0]
        self.job_id = classifier_training_job.job_id

        # TODO(pranavsid98): Replace the three commands below with
        # mark_training_job_pending after Giritheja's PR gets merged.
        classifier_training_job_model = (
            classifier_models.ClassifierTrainingJobModel.get(self.job_id,
                                                             strict=False))
        classifier_training_job_model.status = (
            feconf.TRAINING_JOB_STATUS_PENDING)
        classifier_training_job_model.put()

        self.job_result_dict = {
            'job_id': self.job_id,
            'classifier_data': self.classifier_data,
        }

        self.payload = {}
        self.payload['vm_id'] = feconf.DEFAULT_VM_ID
        self.payload['message'] = self.job_result_dict
        secret = feconf.DEFAULT_VM_SHARED_SECRET
        self.payload['signature'] = classifier.generate_signature(
            secret, self.payload['message'])
Example #55
0
    def _run_batch_job_once_and_verify_output(
            self, exp_specs,
            default_title='A title',
            default_category='A category',
            default_status=rights_manager.ACTIVITY_STATUS_PUBLIC):
        """Run batch job for creating exploration summaries once and verify its
        output. exp_specs is a list of dicts with exploration specifications.
        Allowed keys are category, status, title. If a key is not specified,
        the default value is used.
        """
        with self.swap(
            jobs_registry, 'ONE_OFF_JOB_MANAGERS',
            self.ONE_OFF_JOB_MANAGERS_FOR_TESTS
            ):

            default_spec = {
                'title': default_title,
                'category': default_category,
                'status': default_status
            }

            self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
            self.login(self.ADMIN_EMAIL)
            admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
            self.set_admins([self.ADMIN_USERNAME])
            admin = user_services.UserActionsInfo(admin_id)

            # Create and delete an exploration (to make sure job handles
            # deleted explorations correctly).
            exp_id = '100'
            self.save_new_valid_exploration(
                exp_id,
                admin_id,
                title=default_spec['title'],
                category=default_spec['category'])
            exploration = exp_services.get_exploration_by_id(exp_id)
            exp_services.delete_exploration(admin_id, exp_id)

            # Get dummy explorations.
            num_exps = len(exp_specs)
            expected_job_output = {}

            for ind in range(num_exps):
                exp_id = str(ind)
                spec = default_spec
                spec.update(exp_specs[ind])
                self.save_new_valid_exploration(
                    exp_id,
                    admin_id,
                    title=spec['title'],
                    category=spec['category'])
                exploration = exp_services.get_exploration_by_id(exp_id)

                # Publish exploration.
                if spec['status'] == rights_manager.ACTIVITY_STATUS_PUBLIC:
                    rights_manager.publish_exploration(admin, exp_id)

                # Do not include user_id here, so all explorations are not
                # editable for now (will be updated depending on user_id
                # in galleries)
                exp_rights_model = exp_models.ExplorationRightsModel.get(
                    exp_id)

                exploration = exp_services.get_exploration_by_id(exp_id)
                exploration_model_last_updated = exploration.last_updated
                exploration_model_created_on = exploration.created_on
                first_published_msec = (
                    exp_rights_model.first_published_msec)

                # Manually create the expected summary specifying title,
                # category, etc.
                expected_job_output[exp_id] = exp_domain.ExplorationSummary(
                    exp_id,
                    spec['title'],
                    spec['category'],
                    exploration.objective,
                    exploration.language_code,
                    exploration.tags,
                    feconf.get_empty_ratings(),
                    feconf.EMPTY_SCALED_AVERAGE_RATING,
                    spec['status'],
                    exp_rights_model.community_owned,
                    exp_rights_model.owner_ids,
                    exp_rights_model.editor_ids,
                    exp_rights_model.viewer_ids,
                    [admin_id],
                    {admin_id: 1},
                    exploration.version,
                    exploration_model_created_on,
                    exploration_model_last_updated,
                    first_published_msec)

                # Note: Calling constructor for fields that are not required
                # and have no default value does not work, because
                # unspecified fields will be empty list in
                # expected_job_output but will be unspecified in
                # actual_job_output.
                if exploration.tags:
                    expected_job_output[exp_id].tags = exploration.tags
                if exp_rights_model.owner_ids:
                    expected_job_output[exp_id].owner_ids = (
                        exp_rights_model.owner_ids)
                if exp_rights_model.editor_ids:
                    expected_job_output[exp_id].editor_ids = (
                        exp_rights_model.editor_ids)
                if exp_rights_model.viewer_ids:
                    expected_job_output[exp_id].viewer_ids = (
                        exp_rights_model.viewer_ids)
                if exploration.version:
                    expected_job_output[exp_id].version = (
                        exploration.version)

            # Run batch job.
            job_id = (
                exp_jobs_one_off.ExpSummariesCreationOneOffJob.create_new())
            exp_jobs_one_off.ExpSummariesCreationOneOffJob.enqueue(job_id)
            self.process_and_flush_pending_tasks()

            # Get and check job output.
            actual_job_output = exp_services.get_all_exploration_summaries()
            self.assertEqual(
                actual_job_output.keys(), expected_job_output.keys())

            # Note: 'exploration_model_last_updated' is not expected to be the
            # same, because it is now read from the version model representing
            # the exploration's history snapshot, and not the ExplorationModel.
            simple_props = ['id', 'title', 'category', 'objective',
                            'language_code', 'tags', 'ratings', 'status',
                            'community_owned', 'owner_ids',
                            'editor_ids', 'viewer_ids',
                            'contributor_ids', 'contributors_summary',
                            'version', 'exploration_model_created_on']
            for exp_id in actual_job_output:
                for prop in simple_props:
                    self.assertEqual(
                        getattr(actual_job_output[exp_id], prop),
                        getattr(expected_job_output[exp_id], prop))
Example #56
0
    def _get_exploration_data(self,
                              exploration_id,
                              apply_draft=False,
                              version=None):
        """Returns a description of the given exploration."""
        try:
            if apply_draft:
                exploration = exp_services.get_exp_with_draft_applied(
                    exploration_id, self.user_id)
            else:
                exploration = exp_services.get_exploration_by_id(
                    exploration_id, version=version)
        except:
            raise self.PageNotFoundException

        states = {}
        for state_name in exploration.states:
            state_dict = exploration.states[state_name].to_dict()
            state_dict['unresolved_answers'] = (
                stats_services.get_top_unresolved_answers_for_default_rule(
                    exploration_id, state_name))
            states[state_name] = state_dict
        exp_user_data = user_models.ExplorationUserDataModel.get(
            self.user_id, exploration_id)
        draft_changes = (exp_user_data.draft_change_list if exp_user_data
                         and exp_user_data.draft_change_list else None)
        is_version_of_draft_valid = (
            exp_services.is_version_of_draft_valid(
                exploration_id, exp_user_data.draft_change_list_exp_version)
            if exp_user_data and exp_user_data.draft_change_list_exp_version
            else None)
        editor_dict = {
            'category':
            exploration.category,
            'exploration_id':
            exploration_id,
            'init_state_name':
            exploration.init_state_name,
            'language_code':
            exploration.language_code,
            'objective':
            exploration.objective,
            'param_changes':
            exploration.param_change_dicts,
            'param_specs':
            exploration.param_specs_dict,
            'rights':
            rights_manager.get_exploration_rights(exploration_id).to_dict(),
            'show_state_editor_tutorial_on_load':
            (self.user_id and not self.has_seen_editor_tutorial),
            'skin_customizations':
            exploration.skin_instance.to_dict()['skin_customizations'],
            'states':
            states,
            'tags':
            exploration.tags,
            'title':
            exploration.title,
            'version':
            exploration.version,
            'is_version_of_draft_valid':
            is_version_of_draft_valid,
            'draft_changes':
            draft_changes
        }

        return editor_dict
Example #57
0
    def test_two_state_default_hit(self):
        self.save_new_default_exploration('eid', '*****@*****.**')
        exp = exp_services.get_exploration_by_id('eid')

        FIRST_STATE_NAME = exp.init_state_name
        SECOND_STATE_NAME = 'State 2'
        exp_services.update_exploration('*****@*****.**', 'eid',
                                        [{
                                            'cmd': 'edit_state_property',
                                            'state_name': FIRST_STATE_NAME,
                                            'property_name': 'widget_id',
                                            'new_value': 'TextInput',
                                        }, {
                                            'cmd': 'add_state',
                                            'state_name': SECOND_STATE_NAME,
                                        }, {
                                            'cmd': 'edit_state_property',
                                            'state_name': SECOND_STATE_NAME,
                                            'property_name': 'widget_id',
                                            'new_value': 'TextInput',
                                        }], 'Add new state')

        # Hit the default rule of state 1 once, and the default rule of state 2
        # twice. Note that both rules are self-loops.
        event_services.StartExplorationEventHandler.record(
            'eid', 1, FIRST_STATE_NAME, 'session_id', {},
            feconf.PLAY_TYPE_NORMAL)
        event_services.StateHitEventHandler.record('eid', 1, FIRST_STATE_NAME,
                                                   'session_id', {},
                                                   feconf.PLAY_TYPE_NORMAL)
        event_services.AnswerSubmissionEventHandler.record(
            'eid', 1, FIRST_STATE_NAME, self.DEFAULT_RULESPEC_STR, '1')

        for i in range(2):
            event_services.StateHitEventHandler.record('eid', 1,
                                                       SECOND_STATE_NAME,
                                                       'session_id', {},
                                                       feconf.PLAY_TYPE_NORMAL)
            event_services.AnswerSubmissionEventHandler.record(
                'eid', 1, SECOND_STATE_NAME, self.DEFAULT_RULESPEC_STR, '1')
        ModifiedStatisticsAggregator.start_computation()
        self.process_and_flush_pending_tasks()
        with self.swap(stats_jobs_continuous.StatisticsAggregator,
                       'get_statistics',
                       ModifiedStatisticsAggregator.get_statistics):
            states = stats_services.get_state_improvements('eid', 1)
        self.assertEquals(states, [{
            'rank': 2,
            'type': 'default',
            'state_name': SECOND_STATE_NAME
        }, {
            'rank': 1,
            'type': 'default',
            'state_name': FIRST_STATE_NAME
        }])

        # Hit the default rule of state 1 two more times.
        for i in range(2):
            event_services.StateHitEventHandler.record('eid', 1,
                                                       FIRST_STATE_NAME,
                                                       'session_id', {},
                                                       feconf.PLAY_TYPE_NORMAL)
            event_services.AnswerSubmissionEventHandler.record(
                'eid', 1, FIRST_STATE_NAME, self.DEFAULT_RULESPEC_STR, '1')

        with self.swap(stats_jobs_continuous.StatisticsAggregator,
                       'get_statistics',
                       ModifiedStatisticsAggregator.get_statistics):
            states = stats_services.get_state_improvements('eid', 1)
        self.assertEquals(states, [{
            'rank': 3,
            'type': 'default',
            'state_name': FIRST_STATE_NAME
        }, {
            'rank': 2,
            'type': 'default',
            'state_name': SECOND_STATE_NAME
        }])
Example #58
0
    def test_exploration_download_handler_for_default_exploration(self):
        self.login(self.EDITOR_EMAIL)
        owner_id = self.get_user_id_from_email(self.EDITOR_EMAIL)

        # Create a simple exploration
        exp_id = 'eid'
        self.save_new_valid_exploration(
            exp_id, owner_id,
            title='The title for ZIP download handler test!',
            category='This is just a test category',
            objective='')

        exploration = exp_services.get_exploration_by_id(exp_id)
        init_state = exploration.states[exploration.init_state_name]
        init_interaction = init_state.interaction
        init_interaction.default_outcome.dest = exploration.init_state_name
        exploration.add_states(['State A', 'State 2', 'State 3'])
        exploration.states['State A'].update_interaction_id('TextInput')
        exploration.states['State 2'].update_interaction_id('TextInput')
        exploration.states['State 3'].update_interaction_id('TextInput')
        exploration.rename_state('State 2', 'State B')
        exploration.delete_state('State 3')
        exp_services._save_exploration(  # pylint: disable=protected-access
            owner_id, exploration, '', [])
        response = self.testapp.get('/create/%s' % exp_id)

        # Check download to zip file
        # Download to zip file using download handler
        download_url = '/createhandler/download/%s' % exp_id
        response = self.testapp.get(download_url)

        # Check downloaded zip file
        self.assertEqual(response.headers['Content-Type'], 'text/plain')
        filename = 'oppia-ThetitleforZIPdownloadhandlertest!-v2.zip'
        self.assertEqual(response.headers['Content-Disposition'],
                         'attachment; filename=%s' % str(filename))
        zf_saved = zipfile.ZipFile(StringIO.StringIO(response.body))
        self.assertEqual(
            zf_saved.namelist(),
            ['The title for ZIP download handler test!.yaml'])

        # Load golden zip file
        with open(os.path.join(
            feconf.TESTS_DATA_DIR,
            'oppia-ThetitleforZIPdownloadhandlertest!-v2-gold.zip'),
                  'rb') as f:
            golden_zipfile = f.read()
        zf_gold = zipfile.ZipFile(StringIO.StringIO(golden_zipfile))

        # Compare saved with golden file
        self.assertEqual(
            zf_saved.open(
                'The title for ZIP download handler test!.yaml').read(),
            zf_gold.open(
                'The title for ZIP download handler test!.yaml').read())

        # Check download to JSON
        exploration.update_objective('Test JSON download')
        exp_services._save_exploration(  # pylint: disable=protected-access
            owner_id, exploration, '', [])

        # Download to JSON string using download handler
        self.maxDiff = None
        download_url = (
            '/createhandler/download/%s?output_format=%s&width=50' %
            (exp_id, feconf.OUTPUT_FORMAT_JSON))
        response = self.get_json(download_url)

        # Check downloaded dict
        self.assertEqual(self.SAMPLE_JSON_CONTENT, response)

        self.logout()
Example #59
0
    def test_multiple_computations_in_one_job(self):
        with self.swap(jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
                       self.ALL_CC_MANAGERS_FOR_TESTS):

            # Setup example exploration.
            exp_id = 'eid'
            exp = self.save_new_valid_exploration(exp_id, '*****@*****.**')
            first_state_name = exp.init_state_name
            second_state_name = 'State 2'
            exp_services.update_exploration('*****@*****.**', exp_id, [
                exp_domain.ExplorationChange({
                    'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
                    'state_name': first_state_name,
                    'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
                    'new_value': 'SetInput',
                }),
                exp_domain.ExplorationChange({
                    'cmd': exp_domain.CMD_ADD_STATE,
                    'state_name': second_state_name,
                }),
                exp_domain.ExplorationChange({
                    'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
                    'state_name': second_state_name,
                    'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
                    'new_value': 'SetInput',
                })
            ], 'Add new state')
            exp = exp_services.get_exploration_by_id(exp_id)
            exp_version = exp.version

            time_spent = 5.0
            params = {}

            # Add an answer.
            event_services.AnswerSubmissionEventHandler.record(
                exp_id, exp_version, first_state_name, 'SetInput', 0, 0,
                exp_domain.EXPLICIT_CLASSIFICATION, 'session1', time_spent,
                params, ['answer1', 'answer2'])

            # Run the aggregator job.
            MockInteractionAnswerSummariesAggregator.start_computation()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
            self.process_and_flush_pending_tasks()
            self.assertEqual(
                self.count_jobs_in_taskqueue(
                    taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 0)

            # Retrieve outputs for all of the computations running on this
            # interaction.
            answer_frequencies_calc_output_model = self._get_calc_output_model(
                exp_id, first_state_name, 'Top10AnswerFrequencies')
            self.assertEqual(
                'Top10AnswerFrequencies',
                answer_frequencies_calc_output_model.calculation_id)

            common_elements_calc_output_model = self._get_calc_output_model(
                exp_id, first_state_name, 'FrequencyCommonlySubmittedElements')
            self.assertEqual('FrequencyCommonlySubmittedElements',
                             common_elements_calc_output_model.calculation_id)

            calculation_output_first = (
                answer_frequencies_calc_output_model.calculation_output)
            calculation_output_second = (
                common_elements_calc_output_model.calculation_output)

            self.assertEqual(calculation_output_first,
                             [{
                                 'answer': ['answer1', 'answer2'],
                                 'frequency': 1
                             }])
            self.assertEqual(calculation_output_second, [{
                'answer': 'answer1',
                'frequency': 1
            }, {
                'answer': 'answer2',
                'frequency': 1
            }])
Example #60
0
    def reduce(key, stringified_values):
        from core.domain import exp_services
        exploration = None
        (exp_id, version) = key.split(':')
        try:
            if version not in [
                    _NO_SPECIFIED_VERSION_STRING, _ALL_VERSIONS_STRING
            ]:
                exploration = exp_services.get_exploration_by_id(
                    exp_id, version=version)
            else:
                exploration = exp_services.get_exploration_by_id(exp_id)
        except base_models.BaseModel.EntityNotFoundError:
            return

        # Number of times exploration was started
        new_models_start_count = 0
        # Number of times exploration was completed
        new_models_complete_count = 0
        # Session ids that have completed this state
        new_models_end_sessions = set()
        # {session_id: (created-on timestamp of last known maybe leave event,
        # state_name)}
        session_id_to_latest_leave_event = collections.defaultdict(lambda:
                                                                   (0, ''))
        old_models_start_count = 0
        old_models_complete_count = 0

        # {state_name: {'total_entry_count': ...,
        #               'first_entry_count': ...,
        #               'no_answer_count': ...}}
        state_hit_counts = collections.defaultdict(
            lambda: collections.defaultdict(int))
        for state_name in exploration.states:
            state_hit_counts[state_name] = {
                'total_entry_count': 0,
                'first_entry_count': 0,
                'no_answer_count': 0,
            }

        # {state_name: set(session ids that have reached this state)}
        state_session_ids = collections.defaultdict(set)
        for state_name in exploration.states:
            state_session_ids[state_name] = set([])

        # Iterate over and process each event for this exploration.
        for value_str in stringified_values:
            value = ast.literal_eval(value_str)

            state_name = value['state_name']

            # Convert the state name to unicode, if necessary.
            # Note: sometimes, item.state_name is None for
            # StateHitEventLogEntryModel.
            # TODO(sll): Track down the reason for this, and fix it.
            if (state_name is not None
                    and not isinstance(state_name, unicode)):
                state_name = state_name.decode('utf-8')

            if (value['type'] ==
                    StatisticsMRJobManager._TYPE_STATE_COUNTER_STRING):
                if state_name == exploration.init_state_name:
                    old_models_start_count = value['first_entry_count']
                if state_name == OLD_END_DEST:
                    old_models_complete_count = value['first_entry_count']
                else:
                    state_hit_counts[state_name]['no_answer_count'] += (
                        value['first_entry_count'] +
                        value['subsequent_entries_count'] -
                        value['resolved_answer_count'] -
                        value['active_answer_count'])
                    state_hit_counts[state_name]['first_entry_count'] += (
                        value['first_entry_count'])
                    state_hit_counts[state_name]['total_entry_count'] += (
                        value['first_entry_count'] +
                        value['subsequent_entries_count'])
                continue

            event_type = value['event_type']
            created_on = value['created_on']
            session_id = value['session_id']

            # If this is a start event, increment start count.
            if event_type == feconf.EVENT_TYPE_START_EXPLORATION:
                new_models_start_count += 1
            elif event_type == feconf.EVENT_TYPE_COMPLETE_EXPLORATION:
                new_models_complete_count += 1
                # Track that we have seen a 'real' end for this session id
                new_models_end_sessions.add(session_id)
            elif event_type == feconf.EVENT_TYPE_MAYBE_LEAVE_EXPLORATION:
                # Identify the last learner event for this session.
                latest_timestamp_so_far, _ = (
                    session_id_to_latest_leave_event[session_id])
                if latest_timestamp_so_far < created_on:
                    latest_timestamp_so_far = created_on
                    session_id_to_latest_leave_event[session_id] = (created_on,
                                                                    state_name)
            # If this is a state hit, increment the total count and record that
            # we have seen this session id.
            elif event_type == feconf.EVENT_TYPE_STATE_HIT:
                state_hit_counts[state_name]['total_entry_count'] += 1
                state_session_ids[state_name].add(session_id)

        # After iterating through all events, take the size of the set of
        # session ids as the first entry count.
        for state_name in state_session_ids:
            state_hit_counts[state_name]['first_entry_count'] += len(
                state_session_ids[state_name])

        # Get the set of session ids that left without completing. This is
        # determined as the set of session ids with maybe-leave events at
        # intermediate states, minus the ones that have a maybe-leave event
        # at the END state.
        leave_states = set(session_id_to_latest_leave_event.keys()).difference(
            new_models_end_sessions)
        for session_id in leave_states:
            # Grab the state name of the state they left on and count that as a
            # 'no answer' for that state.
            (_, state_name) = session_id_to_latest_leave_event[session_id]
            state_hit_counts[state_name]['no_answer_count'] += 1

        num_starts = (old_models_start_count + new_models_start_count)
        num_completions = (old_models_complete_count +
                           new_models_complete_count)

        stats_models.ExplorationAnnotationsModel.create(
            exp_id, str(version), num_starts, num_completions,
            state_hit_counts)