def transition(self, answer, params, interactive_widget_properties): """Handle feedback interactions with readers.""" dest_id = None feedback = None recorded_answer = answer if interactive_widget_properties['classifier']: # Import the relevant classifier module. classifier_module = '.'.join([ feconf.SAMPLE_CLASSIFIERS_DIR.replace('/', '.'), interactive_widget_properties['classifier'], interactive_widget_properties['classifier']]) Classifier = importlib.import_module(classifier_module) logging.info(Classifier.__name__) norm_answer = Classifier.DEFAULT_NORMALIZER().normalize(answer) if norm_answer is None: raise Exception( 'Invalid input: could not normalize the answer.') answer_handler = None for handler in self.widget.handlers: if handler.name == 'submit': answer_handler = handler for ind, rule in enumerate(answer_handler.rules): if ind == len(answer_handler.rules) - 1: # TODO(sll): This is a special case for multiple-choice input # which should really be handled generically. However, it's # not very interesting anyway because the reader's answer # in this case is already known (it's just the last of the # multiple-choice options given). if self.widget.widget_id == 'MultipleChoiceInput': recorded_answer = ( self.widget.params['choices'][int(answer)]) if rule.name == 'Default': dest_id = rule.dest feedback = (utils.get_random_choice(rule.feedback) if rule.feedback else '') break func_name, param_list = self.get_classifier_info( self.widget.widget_id, handler.name, rule, params) param_list = [norm_answer] + param_list classifier_output = getattr(Classifier, func_name)(*param_list) return_value, unused_return_data = ( utils.normalize_classifier_return(classifier_output)) if return_value: dest_id = rule.dest feedback = (utils.get_random_choice(rule.feedback) if rule.feedback else '') break return dest_id, feedback, rule, recorded_answer
def transition(self, answer, params, handler_name): """Handle feedback interactions with readers.""" recorded_answer = answer # TODO(sll): This is a special case for multiple-choice input # which should really be handled generically. if self.widget.widget_id == 'interactive-MultipleChoiceInput': recorded_answer = self.widget.params['choices'][int(answer)] handlers = [h for h in self.widget.handlers if h.name == handler_name] if not handlers: raise Exception('No handlers found for %s' % handler_name) handler = handlers[0] if handler.classifier is None: selected_rule = handler.rules[0] else: # Import the relevant classifier module. classifier_module = '.'.join([ feconf.SAMPLE_CLASSIFIERS_DIR.replace('/', '.'), handler.classifier, handler.classifier]) Classifier = importlib.import_module(classifier_module) norm_answer = Classifier.DEFAULT_NORMALIZER().normalize(answer) if norm_answer is None: raise Exception('Could not normalize %s.' % answer) selected_rule = self.find_first_match( handler, Classifier, norm_answer, params) feedback = (utils.get_random_choice(selected_rule.feedback) if selected_rule.feedback else '') return selected_rule.dest, feedback, selected_rule, recorded_answer
def post(self, widget_id): """Handles POST requests, for parameterized widgets.""" params = self.payload.get('params', {}) if isinstance(params, list): new_params = {} for item in params: new_params[item['name']] = item['default_value'] params = new_params state_params_dict = {} state_params_given = self.payload.get('state_params') if state_params_given: for param in state_params_given: # Pick a random parameter for each key. state_params_dict[param['name']] = ( utils.get_random_choice(param['values'])) # TODO(sll): In order to unify this with InteractiveWidgetHandler, # we need a convention for which params must be JSONified and which # should not. Fix this. response = NonInteractiveWidget.get_with_params(widget_id, params) self.render_json({ 'widget': response, 'parent_index': self.request.get('parent_index'), })
def post(self, widget_id): """Handles POST requests, for parameterized widgets.""" payload = json.loads(self.request.get('payload')) logging.info(payload) params = payload.get('params', {}) if isinstance(params, list): new_params = {} for item in params: new_params[item['name']] = item['default_value'] params = new_params state_params_dict = {} state_params_given = payload.get('state_params') if state_params_given: for param in state_params_given: # Pick a random parameter for each key. state_params_dict[param['name']] = ( utils.get_random_choice(param['values'])) response = InteractiveWidget.get_with_params( widget_id, params=utils.parse_dict_with_params( params, state_params_dict) ) self.response.write(json.dumps({'widget': response}))
def submit_answer(self, exploration_id, state_name, answer, params=None, unused_exploration_version=None): """Submits an answer as an exploration player and returns the corresponding dict. This function has strong parallels to code in PlayerServices.js which has the non-test code to perform the same functionality. This is replicated here so backend tests may utilize the functionality of PlayerServices.js without being able to access it. TODO(bhenning): Replicate this in an end-to-end Protractor test to protect against code skew here. """ if params is None: params = {} exploration = exp_services.get_exploration_by_id(exploration_id) # First, the answer must be classified. classify_result = self.post_json( '/explorehandler/classify/%s' % exploration_id, { 'old_state': exploration.states[state_name].to_dict(), 'params': params, 'answer': answer }) # Next, ensure the submission is recorded. self.post_json( '/explorehandler/answer_submitted_event/%s' % exploration_id, { 'answer': answer, 'params': params, 'version': exploration.version, 'old_state_name': state_name, 'answer_group_index': classify_result['answer_group_index'], 'rule_spec_index': classify_result['rule_spec_index'] }) # Now the next state's data must be calculated. outcome = classify_result['outcome'] new_state = exploration.states[outcome['dest']] params['answer'] = answer new_params = self.get_updated_param_dict(params, new_state.param_changes, exploration.param_specs) return { 'feedback_html': jinja_utils.parse_string( utils.get_random_choice(outcome['feedback']) if outcome['feedback'] else '', params), 'question_html': new_state.content[0].to_html(new_params), 'state_name': outcome['dest'] }
def get(self): """Handles GET requests.""" explorations = exp_services.get_public_explorations() # Don't use the first exploration; users will have seen that already # on the main page. selected_exploration = utils.get_random_choice(explorations[1:]) self.redirect('/learn/%s' % selected_exploration.id)
def get(self): """Handles GET requests.""" explorations = Exploration.query().filter( Exploration.is_public == True).fetch(100) # Don't use the first exploration; users will have seen that already # on the main page. selected_exploration = utils.get_random_choice(explorations[1:]) self.redirect('/learn/%s' % selected_exploration.id)
def submit_answer( self, exploration_id, state_name, answer, params=None, unused_exploration_version=None): """Submits an answer as an exploration player and returns the corresponding dict. This function has strong parallels to code in PlayerServices.js which has the non-test code to perform the same functionality. This is replicated here so backend tests may utilize the functionality of PlayerServices.js without being able to access it. TODO(bhenning): Replicate this in an end-to-end Protractor test to protect against code skew here. """ if params is None: params = {} exploration = exp_services.get_exploration_by_id(exploration_id) # First, the answer must be classified. classify_result = self.post_json( '/explorehandler/classify/%s' % exploration_id, { 'old_state': exploration.states[state_name].to_dict(), 'params': params, 'answer': answer } ) # Next, ensure the submission is recorded. self.post_json( '/explorehandler/answer_submitted_event/%s' % exploration_id, { 'answer': answer, 'params': params, 'version': exploration.version, 'old_state_name': state_name, 'answer_group_index': classify_result['answer_group_index'], 'rule_spec_index': classify_result['rule_spec_index'] } ) # Now the next state's data must be calculated. outcome = classify_result['outcome'] new_state = exploration.states[outcome['dest']] params['answer'] = answer new_params = self.get_updated_param_dict( params, new_state.param_changes, exploration.param_specs) return { 'feedback_html': jinja_utils.parse_string( utils.get_random_choice(outcome['feedback']) if outcome['feedback'] else '', params), 'question_html': new_state.content[0].to_html(new_params), 'state_name': outcome['dest'] }
def get_params(self, state, existing_params=None): """Updates existing parameters based on changes in the given state.""" if existing_params is None: existing_params = {} # Modify params using param_changes. # TODO(sll): Define this behavior. Currently a new parameter is set # only if it doesn't exist, but it might be the case that the parameter # should be reset each time the state is entered. for item in state.param_changes: if item.name not in existing_params: # Pick a random parameter for this key. existing_params[item.name] = utils.get_random_choice(item.values) return existing_params
def transition(self, answer, params, handler): """Handle feedback interactions with readers.""" recorded_answer = answer answer_handler = None for wi_handler in self.widget.handlers: if wi_handler.name == handler: answer_handler = wi_handler if answer_handler.classifier: # Import the relevant classifier module. classifier_module = '.'.join([ feconf.SAMPLE_CLASSIFIERS_DIR.replace('/', '.'), answer_handler.classifier, answer_handler.classifier]) Classifier = importlib.import_module(classifier_module) logging.info(Classifier.__name__) norm_answer = Classifier.DEFAULT_NORMALIZER().normalize(answer) if norm_answer is None: raise Exception('Could not normalize %s.' % answer) # TODO(sll): This is a special case for multiple-choice input # which should really be handled generically. if self.widget.widget_id == 'MultipleChoiceInput': recorded_answer = self.widget.params['choices'][int(answer)] selected_rule = None for ind, rule in enumerate(answer_handler.rules): if rule.name == 'Default': selected_rule = rule break func_name, param_list = self.get_classifier_info( self.widget.widget_id, answer_handler.name, rule, params) param_list = [norm_answer] + param_list classifier_output = getattr(Classifier, func_name)(*param_list) match, _ = utils.normalize_classifier_return(classifier_output) if match: selected_rule = rule break feedback = (utils.get_random_choice(selected_rule.feedback) if selected_rule.feedback else '') return selected_rule.dest, feedback, rule, recorded_answer
def generate_value(self, context_params, list_of_values): return copy.deepcopy(utils.get_random_choice(list_of_values))
def value(self): if not self.values: return None return utils.get_random_choice(self.values)
def generate_value(self, context_params, list_of_values): return copy.deepcopy(utils.get_random_choice(list_of_values))
def get_feedback_string(self): """Returns a (possibly empty) string with feedback for this rule.""" return utils.get_random_choice(self.feedback) if self.feedback else ''
def get(self): """Handles GET requests.""" explorations = exp_services.get_public_explorations() selected_exploration = utils.get_random_choice(explorations) self.redirect('/learn/%s' % selected_exploration.id)