Beispiel #1
0
def save_survey(request):
    print(request.POST['survey_idx'])
    print(request.POST['num'])
    row = Answer(survey_idx=request.POST['survey_idx'],
                 num=request.POST['num'])
    row.save()
    return render(request, "survey/success.html")
Beispiel #2
0
def save_survey(request):
    # 문제 번호와 응답번호를 Answer 객체에 저장
    vo = Answer(survey_idx=request.POST["survey_idx"], num=request.POST["num"])
    # insert query가 호출됨
    vo.save()
    # success.html로 이동
    return render_to_response("success.html")
Beispiel #3
0
def survey(request):
  user = Resident.objects.get(sunetid=request.user.username)
  if request.method == "POST":
    question = Question.objects.get(id=request.POST['qid'])
    resident = Resident.objects.get(name=request.POST['resident'])
    if question.istwoans:
      resident2 = Resident.objects.get(name=request.POST['resident2'])
    try:
      answer = Answer.objects.get(
          question = question,
          answerer = user)
    except Answer.DoesNotExist:
      answer = Answer(
          question = question,
          answerer = user)
    answer.resident = resident
    if question.istwoans:
      answer.resident2 = resident2
    answer.save()
    return json_response({ 'success': True })
  else:
    questions = Question.objects.all()
    answered_set = user.answered_set.all()
    restored_qs = []
    for question in questions:
      restored_qs.append(RestoredQuestion(question.id, question.qtext,
        question.istwoans))
      matching_ans_set = answered_set.filter(question__exact=question)
      if matching_ans_set:
        restored_qs[-1].prevans = matching_ans_set.get().resident
        if restored_qs[-1].istwoans:
          restored_qs[-1].prevans2 = matching_ans_set.get().resident2
    return render_to_response('survey.html', {'questions': restored_qs, 'feuddatetime': settings.FEUD_DATE_TIME},
        context_instance=RequestContext(request))
Beispiel #4
0
def get_response_validation_form(question=None):

    if question:
        validators = Answer.get_class(question.answer_type).validators()
    else:
        validators = Answer.validators()
    validation_tests = [(validator.__name__, validator.__name__) for validator in validators]

    class ValidationForm(forms.ModelForm, FormOrderMixin):
        min = forms.IntegerField(required=False)
        max = forms.IntegerField(required=False)
        value = forms.CharField(required=False)
        options = forms.ChoiceField(choices=[], required=False)
        CHOICES = [('', '----------Create Operator----------')]
        CHOICES.extend(validation_tests)
        validation_test = forms.ChoiceField(choices=CHOICES, required=False, label='Operator')

        def __init__(self, *args, **kwargs):
            super(ValidationForm, self).__init__(*args, **kwargs)
            field_order = []
            if question is None:
                field_order.append('test_question')
            field_order.extend(['validation_test', 'options', 'value', 'min', 'max'])
            self.order_fields(field_order)

        def clean(self):
            validation_test = self.cleaned_data.get('validation_test', None)
            test_question = self.cleaned_data.get('test_question', None)
            if validation_test is None or test_question is None:
                return self.cleaned_data
            answer_class = Answer.get_class(test_question.answer_type)
            method = getattr(answer_class, validation_test, None)
            if method is None:
                raise forms.ValidationError(
                    'unsupported validator defined on test question')
            if validation_test == 'between':
                if self.cleaned_data.get(
                        'min', False) is False or self.cleaned_data.get(
                        'max', False) is False:
                    raise forms.ValidationError(
                        'min and max values required for between condition')
            elif self.cleaned_data.get('value', False) is False:
                raise forms.ValidationError(
                    'Value is required for %s' %
                    validation_test)
            if test_question.answer_type in [
                    MultiChoiceAnswer.choice_name(),
                    MultiSelectAnswer]:
                if self.cleaned_data.get('options', False) is False:
                    raise forms.ValidationError(
                        'No option selected for %s' %
                        test_question.identifier)
                self.cleaned_data['value'] = self.cleaned_data['options']
            return self.cleaned_data

        def save(self, *args, **kwargs):
            pass

    return ValidationForm
Beispiel #5
0
def save_survey(request):
    survey_idx = request.POST["survey_idx"]
    survey_dto = Answer(survey_idx=int(request.POST["survey_idx"]),
                        num=request.POST["num"])
    print("ans:", request.POST["ans"])
    ans = request.POST["ans"]
    survey_dto.save()
    return render(request, "survey/success.html", {"ans": ans})
 def test_contains(self):
     # test on base class. Just test with text
     answer = 'Say Hey'
     self.assertTrue(Answer.contains(answer, 'hey'))
     self.assertFalse(Answer.contains(answer, 'somethign else'))
     # Test Text
     answer = 'Hey'
     self.assertTrue(TextAnswer.contains(answer, 'hey'))
     self.assertFalse(TextAnswer.contains(answer, 'somethign else'))
 def test_contains(self):
     # test on base class. Just test with text
     answer = 'Say Hey'
     self.assertTrue(Answer.contains(answer, 'hey'))
     self.assertFalse(Answer.contains(answer, 'somethign else'))
     # Test Text
     answer = 'Hey'
     self.assertTrue(TextAnswer.contains(answer, 'hey'))
     self.assertFalse(TextAnswer.contains(answer, 'somethign else'))
Beispiel #8
0
def save_survey(request):
    # 문제 번호와 응답번호를 Answer 객체에 저장한다.
    survey_idx = request.POST["survey_idx"]
    # survey_survey primary key -> 1:n 질문에 대한 답변의 값(survey)
    # ans :선택한 설문의 내용
    dto = Answer(survey_idx=int(request.POST["survey_idx"]), ans=request.POST["num"])
    # insert query 가 호출
    dto.save()
    return render(request, "survey/success.html")
Beispiel #9
0
 def clean_min_value(self):
     if (self.cleaned_data['condition'] == 'between') and len(
             self.cleaned_data['min_value'].strip()) == 0:
         raise ValidationError("Field is required.")
     value = self.cleaned_data.get('min_value', '')
     if value:
         Answer.get_class(
             self.question.answer_type).validate_test_value(value)
     return self.cleaned_data.get('min_value', '')
Beispiel #10
0
 def clean_max_value(self):
     if (self.cleaned_data['condition'] == 'between') and len(
             self.cleaned_data['max_value'].strip()) == 0:
         raise ValidationError("Field is required.")
     value = self.cleaned_data.get('max_value')
     if value:
         Answer.get_class(
             self.question.answer_type).validate_test_value(value)
     return self.cleaned_data.get('max_value', '')
Beispiel #11
0
def save_survey(request):
    # 문제 번호와 응답번호를 Answer 객체에 저장한다.
    survey_idx = request.POST["survey_idx"]
    print("타입: ", type(survey_idx))
    dto = Answer(survey_idx=int(request.POST["survey_idx"]),
                 num=request.POST["num"])
    # insert query 가 호출이 됨(db에 저장됨)
    dto.save()
    # success.html로 이동한다
    return render(request, "survey/success.html")
Beispiel #12
0
 def clean_max_value(self):
     if (self.cleaned_data['condition'] == 'between') and len(self.cleaned_data['max_value'].strip()) == 0:
         raise ValidationError("Field is required.")
     value = self.cleaned_data.get('max_value')
     if value:
         try:
             Answer.get_class(self.question.answer_type)(self.question, value)
         except:
             raise ValidationError("Invalid value.")
     return self.cleaned_data.get('max_value', '')
 def test_prep_values(self):
     # test base answer. For base answer just return value and text values
     answer = 'Test1'
     self.assertEquals(Answer.prep_value(answer).lower(), answer.lower())
     self.assertEquals(Answer.prep_text(answer).lower(), answer.lower())
     # test numerial answer. Confirm zfill normalization
     answer = 1
     self.assertEquals(NumericalAnswer.prep_text(answer), str(answer).zfill(NumericalAnswer.STRING_FILL_LENGTH))
     self.assertEquals(NumericalAnswer.prep_value(answer), str(answer).zfill(NumericalAnswer.STRING_FILL_LENGTH))
     # Confirm the auto response verbose_name (sorry I'm just throwing it in here)'
     self.assertEquals(AutoResponse.choice_name(), 'Auto Generated')
Beispiel #14
0
 def clean_min_value(self):
     if (self.cleaned_data['condition'] == 'between') and len(
             self.cleaned_data['min_value'].strip()) == 0:
         raise ValidationError("Field is required.")
     value = self.cleaned_data.get('min_value', '')
     if value:
         try:
             Answer.get_class(self.question.answer_type)(self.question,
                                                         value)
         except:
             raise ValidationError("Invalid value.")
     return self.cleaned_data.get('min_value', '')
Beispiel #15
0
    def clean_value(self):

        if self.question.answer_type not in [
                MultiSelectAnswer.choice_name(),
                MultiChoiceAnswer.choice_name()] and self.cleaned_data['condition'] != 'between' and len(
                self.cleaned_data['value'].strip()) == 0:
            raise ValidationError("Field is required.")
        value = self.cleaned_data.get('value', '')
        if value:
            Answer.get_class(
                self.question.answer_type).validate_test_value(value)
        return self.cleaned_data.get('value', '')
Beispiel #16
0
    def clean_value(self):

        if self.question.answer_type not in [
                MultiSelectAnswer.choice_name(),
                MultiChoiceAnswer.choice_name()
        ] and self.cleaned_data['condition'] != 'between' and len(
                self.cleaned_data['value'].strip()) == 0:
            raise ValidationError("Field is required.")
        value = self.cleaned_data.get('value', '')
        if value:
            Answer.get_class(
                self.question.answer_type).validate_test_value(value)
        return self.cleaned_data.get('value', '')
Beispiel #17
0
    def clean_value(self):

        if self.question.answer_type not in  [MultiSelectAnswer.choice_name(), MultiChoiceAnswer.choice_name()] \
                            and self.cleaned_data['condition'] != 'between' and len(self.cleaned_data['value'].strip()) == 0:
            raise ValidationError("Field is required.")
        value = self.cleaned_data.get('value', '')
        if value:
            #now check if value is valid answer type
            try:
                Answer.get_class(self.question.answer_type)(self.question, value)
            except:
                raise ValidationError("Invalid value.")
        return self.cleaned_data.get('value', '')
Beispiel #18
0
    def clean_value(self):

        if self.question.answer_type not in  [MultiSelectAnswer.choice_name(), MultiChoiceAnswer.choice_name()] \
                            and self.cleaned_data['condition'] != 'between' and len(self.cleaned_data['value'].strip()) == 0:
            raise ValidationError("Field is required.")
        value = self.cleaned_data.get('value', '')
        if value:
            #now check if value is valid answer type
            try:
                Answer.get_class(self.question.answer_type)(self.question,
                                                            value)
            except:
                raise ValidationError("Invalid value.")
        return self.cleaned_data.get('value', '')
Beispiel #19
0
def submit(request):
    post = request.POST
    qu_ids = []
    answer_set = AnswerSet()
    answer_set.save()
    for key in post.keys():
        try:
            qu_id = int(key)
            if post[key] != '':
                question = Question.objects.get(qu_id=qu_id)
                ans = Answer(qu_id=question, value=post[key], answer_set=answer_set)
                ans.save()
        except ValueError:
            continue
    return HttpResponseRedirect(reverse('survey', args=()))
 def test_prep_values(self):
     # test base answer. For base answer just return value and text values
     answer = 'Test1'
     self.assertEquals(Answer.prep_value(answer).lower(), answer.lower())
     self.assertEquals(Answer.prep_text(answer).lower(), answer.lower())
     # test numerial answer. Confirm zfill normalization
     answer = 1
     self.assertEquals(
         NumericalAnswer.prep_text(answer),
         str(answer).zfill(NumericalAnswer.STRING_FILL_LENGTH))
     self.assertEquals(
         NumericalAnswer.prep_value(answer),
         str(answer).zfill(NumericalAnswer.STRING_FILL_LENGTH))
     # Confirm the auto response verbose_name (sorry I'm just throwing it in here)'
     self.assertEquals(AutoResponse.choice_name(), 'Auto Generated')
 def clean(self):
     validation_test = self.cleaned_data.get('validation_test', None)
     test_question = self.cleaned_data.get('test_question', None)
     if validation_test is None or test_question is None:
         return self.cleaned_data
     answer_class = Answer.get_class(test_question.answer_type)
     method = getattr(answer_class, validation_test, None)
     if method is None:
         raise forms.ValidationError(
             'unsupported validator defined on test question')
     if validation_test == 'between':
         if self.cleaned_data.get(
                 'min', False) is False or self.cleaned_data.get(
                 'max', False) is False:
             raise forms.ValidationError(
                 'min and max values required for between condition')
     elif self.cleaned_data.get('value', False) is False:
         raise forms.ValidationError(
             'Value is required for %s' %
             validation_test)
     if test_question.answer_type in [
             MultiChoiceAnswer.choice_name(),
             MultiSelectAnswer]:
         if self.cleaned_data.get('options', False) is False:
             raise forms.ValidationError(
                 'No option selected for %s' %
                 test_question.identifier)
         self.cleaned_data['value'] = self.cleaned_data['options']
     return self.cleaned_data
Beispiel #22
0
def save_answer(user, question, answer):
	chunk_dict = json.loads(answer)
	utils.renumber_chunks(chunk_dict)
	answer = utils.dump_data(chunk_dict)
#	if not compare.compare(utils.sent_breakdown(question.question), utils.sent_breakdown(answer)):
#		return False
	if answer == question.question:
		return False
	try:
		final = Answer(question = question,
				answer = answer,
				user = user)
		final.save()
	except:
		return False
	return True
Beispiel #23
0
    def clean(self):
        super(SamplingCriterionForm, self).clean()
        validation_test = self.cleaned_data.get('validation_test', None)
        if validation_test is None:
            raise ValidationError('This field is Required')
            return self.cleaned_data['validation_test']
        listing_question = self.cleaned_data.get('listing_question', None)
        if listing_question:
            answer_class = Answer.get_class(listing_question.answer_type)
            method = getattr(answer_class, validation_test, None)
            if method is None:
                raise ValidationError(
                    'unsupported validator defined on listing question')

        if validation_test == 'between':
            if self.cleaned_data.get(
                    'min', False) is False or self.cleaned_data.get(
                    'max', False) is False:
                raise ValidationError(
                    'min and max values required for between condition')
                return self.cleaned_data

        if validation_test == 'equals':
            if not self.cleaned_data['value']:
                raise ValidationError('Value Required')

        elif self.cleaned_data.get('value', False) is False:
            raise ValidationError('Value is required for %s' % validation_test)
        if listing_question and listing_question.answer_type == MultiChoiceAnswer.choice_name():
            if self.cleaned_data.get('options', False) is False:
                raise ValidationError(
                    'No option selected for %s' %
                    listing_question.identifier)
            self.cleaned_data['value'] = self.cleaned_data['options']
        return self.cleaned_data
 def setUp(self):
     BaseTest.setUp(self)
     self.survey = Survey.objects.create(
         name="Internal Test Survey", status='Active',
         need_logged_user=False, display_by_question=False
     )
     self.response = Response.objects.create(survey=self.survey)
     self.questions = []
     self.answers = []
     self.data = [
         [Question.PARA, "Mytext", None],
         [Question.SHORT, "Mytext", None],
         [Question.RADIO, "Yes", "Yes, No, Maybe"],
         [Question.CHECKBOX, "Yes", "Yes, No, Maybe"],
         [Question.INTEGER, 42, None],
     ]
     for i, data in enumerate(self.data):
         qtype, answer_body, answer_choices = data
         question = Question.objects.create(
             text="{} question ?".format(qtype), choices=answer_choices,
             order=i + 1, required=True, survey=self.survey,
             type=qtype,
         )
         self.questions.append(question)
         answer = Answer(
             response=self.response, question=question, body=answer_body
         )
         self.answers.append(answer)
 def setUp(self):
     BaseTest.setUp(self)
     self.survey = Survey.objects.create(name="Internal Test Survey",
                                         is_published=True,
                                         need_logged_user=False,
                                         display_by_question=False)
     self.response = Response.objects.create(survey=self.survey)
     self.questions = []
     self.answers = []
     self.data = [
         [Question.TEXT, "Mytext", None],
         [Question.SHORT_TEXT, "Mytext", None],
         [Question.RADIO, "Yes", "Yes, No, Maybe"],
         [Question.SELECT, "No", "Yes, No, Maybe"],
         # [Question.SELECT_IMAGE,Answer, "TODO" ,None],
         [Question.SELECT_MULTIPLE, "Yes", "Yes, No, Maybe"],
         [Question.INTEGER, 42, None],
         [Question.SELECT_MULTIPLE, "[u'2', u'4']", "2, 4, 6"],
     ]
     for i, data in enumerate(self.data):
         qtype, answer_body, answer_choices = data
         question = Question.objects.create(
             text="{} question ?".format(qtype),
             choices=answer_choices,
             order=i + 1,
             required=True,
             survey=self.survey,
             type=qtype,
         )
         self.questions.append(question)
         answer = Answer(response=self.response,
                         question=question,
                         body=answer_body)
         self.answers.append(answer)
Beispiel #26
0
 def clean(self):
     validation_test = self.cleaned_data.get('validation_test', None)
     test_question = self.cleaned_data.get('test_question', None)
     if validation_test is None or test_question is None:
         return self.cleaned_data
     answer_class = Answer.get_class(test_question.answer_type)
     method = getattr(answer_class, validation_test, None)
     if method is None:
         raise forms.ValidationError(
             'unsupported validator defined on test question')
     if validation_test == 'between':
         if self.cleaned_data.get('min',
                                  None) is None or self.cleaned_data.get(
                                      'max', None) is None:
             raise forms.ValidationError(
                 'min and max values required for between condition')
     elif test_question.answer_type in [
             MultiChoiceAnswer.choice_name(), MultiSelectAnswer
     ]:
         if not self.cleaned_data.get('options', False):
             raise forms.ValidationError('No option selected for %s' %
                                         test_question.identifier)
         self.cleaned_data['value'] = self.cleaned_data['options']
     elif self.cleaned_data.get('value', False) is False:
         raise forms.ValidationError('Value is required for %s' %
                                     validation_test)
     return self.cleaned_data
Beispiel #27
0
 def _get_group_next_question(question, proposed_next):
     next_question = proposed_next
     present_question_group = question.group if hasattr(question, 'group') else None
     if next_question and AnswerAccessDefinition.is_valid(access.choice_name(),
                                                          next_question.answer_type) is False:
         next_question = _get_group_next_question(question, next_question.next_question(reply))
     # I hope the next line is not so confusing!
     # Basically it means treat only if the next question belongs to a different group from the present.
     # That's if present has a group
     if hasattr(next_question, 'group') and present_question_group != next_question.group:
         question_group = next_question.group
         if question_group:
             qset = QuestionSet.get(pk=next_question.qset.pk)
             valid_group = True
             for condition in question_group.group_conditions.all():
                 # we are interested in the qset param list with same identifier name as condition.test_question
                 test_question = qset.parameter_list.questions.get(identifier=condition.test_question.identifier)
                 param_value = ''            # use answer.as value
                 if session_data[ANSWERS][-1].get(test_question.identifier, None):    # last answer entry
                     param_value = session_data[ANSWERS][-1][test_question.identifier]
                 answer_class = Answer.get_class(condition.test_question.answer_type)
                 validator = getattr(answer_class, condition.validation_test, None)
                 if validator is None:
                     raise ValueError('unsupported validator defined on listing question')
                 try:
                     slogger.debug('parm val: %s, params: %s' % (param_value, condition.test_params))
                     is_valid = validator(param_value, *condition.test_params)
                 except:
                     is_valid = True
                 if is_valid is False:
                     valid_group = False
                     break   # fail if any condition fails
             if valid_group is False:
                 next_question = _get_group_next_question(question, next_question.next_question(reply))
     return next_question
Beispiel #28
0
def is_relevant_odk(context, question, interviewer):
    batch = question.qset
    if question.pk == batch.start_question.pk:
        default_relevance = 'true()'
    else:
        default_relevance = 'false()'
    relevance_context = ' (%s)' % (
        ' or '.join(context.get(question.pk, [default_relevance, ])),
    )
    if hasattr(question, 'group') and question.group:
        relevance_context = '%s %s' % (relevance_context, is_relevant_by_group(context, question))

    # do not include back to flows to this
    flows = question.flows.exclude(desc=LogicForm.BACK_TO_ACTION)
    node_path = get_node_path(question)
    flow_conditions = []
    if flows:
        for flow in flows:
            if flow.validation_test:
                text_params = [t.param for t in flow.text_arguments]
                answer_class = Answer.get_class(question.answer_type)
                flow_condition = answer_class.print_odk_validation(     # get appropriate flow condition
                    node_path, flow.validation_test, *text_params)
                flow_conditions.append(flow_condition)
                if flow.next_question:
                    next_question = flow.next_question
                    next_q_context = context.get(
                        next_question.pk, ['false()', ])
                    next_q_context.append(flow_condition)
                    context[next_question.pk] = next_q_context
        null_flows = flows.filter(validation__isnull=True, next_question__isnull=False)
        if null_flows:
            null_flow = null_flows[0]
            # check if next question if we are moving to a less looped question
            # essentially same as checking if next question is outside current questions loop
            loop_story = question.qset.get_loop_story()
            # fix for side by side loops. check
            # basically check if next question is not on same loop
            if len(loop_story.get(question.pk, [])) > len(loop_story.get(null_flow.next_question.pk, [])):
                null_condition = ["count(%s) > 0" % node_path, ]
            else:
                null_condition = ["string-length(%s) > 0" % node_path, ]
            # ['true()', "string-length(%s) > 0" % node_path]
            # null_condition = ['true()', ]
            if len(flow_conditions) > 0 and hasattr(question, 'loop_ended') is False:
                null_condition.append('not(%s)' %
                                      ' or '.join(flow_conditions))
            next_question = null_flow.next_question
            next_q_context = context.get(next_question.pk, ['false()', ])
            next_q_context.append('(%s)' % ' and '.join(null_condition))
            if hasattr(question, 'group') and (hasattr(next_question, 'group') is False or
                                                       question.group != next_question.group):
                next_q_context.append('true()')
            # if get_loop_aware_path(question) != get_loop_aware_path(next_question):
            #     next_q_context.append('true()')
            # if hasattr(next_question, 'loop_ended'):
            #     next_q_context.append('true()')
            context[next_question.pk] = next_q_context
    return mark_safe(relevance_context)
 def test_supported_answers_does_not_contain_abstract_model_nor_non_response(
         self):
     supported_answers = Answer.supported_answers()
     self.assertFalse(NonResponseAnswer in supported_answers)
     for answer in supported_answers:
         self.assertFalse(answer._meta.abstract)
     self.assertIn(NumericalAnswer, supported_answers)
     self.assertIn(AutoResponse, supported_answers)
 def test_answer_types_available(self):
     known_answer_types = [
         NumericalAnswer, AutoResponse, MultiSelectAnswer, TextAnswer,
         MultiChoiceAnswer, AudioAnswer, VideoAnswer, ImageAnswer,
         GeopointAnswer
     ]
     for answer_type in known_answer_types:
         self.assertIn(answer_type.choice_name(), Answer.answer_types())
Beispiel #31
0
def quest_validation_opts(batch):
    opts_dict = {}
    for cls in Answer.__subclasses__():
        opts = []
        for validator in cls.validators():
            opts.append({'display': validator.__name__, 'value': validator.__name__.upper() })
        opts_dict[cls.choice_name()] = opts
    return mark_safe(json.dumps(opts_dict));
Beispiel #32
0
def validation_args(batch):
    args_map = {}
    for validator in Answer.validators():
        args_map.update({
            validator.__name__.upper():
            len(inspect.getargspec(validator).args) - 2
        })  #validator is a class method, plus answer extra pram
    return mark_safe(json.dumps(args_map))
 def test_logic_form_does_not_have_options_for_non_multi_type_questions(self):
     answer_types = Answer.answer_types()
     for answer_type in answer_types:
         if answer_type not in [MultiSelectAnswer.choice_name(), MultiChoiceAnswer.choice_name()]:                
             q = Question.objects.create(identifier=answer_type, text="text", answer_type=answer_type,
                                             qset_id=self.qset.id, response_validation_id=1)
             l = LogicForm(q)
             self.assertFalse(l.fields.get('option'))
Beispiel #34
0
def quest_validation_opts(batch):
    opts_dict = {}
    for cls in Answer.__subclasses__():
        opts = []
        for validator in cls.validators():
            opts.append({'display': validator.__name__,
                         'value': validator.__name__.upper()})
        opts_dict[cls.choice_name()] = opts
    return mark_safe(json.dumps(opts_dict))
Beispiel #35
0
def get_answer_validations(request):
    """This function is meant to create json posted response validation
    :param request:
    :return:
    """
    answer_type = request.GET.get('answer_type') if request.method == 'GET' else request.POST.get('answer_type')
    answer_class = Answer.get_class(answer_type)
    validator_names = [validator.__name__ for validator in answer_class.validators()]
    return JsonResponse(validator_names, safe=False)
Beispiel #36
0
 def save(self, commit=True):
     """ Save the response object """
     # Recover an existing response from the database if any
     #  There is only one response by logged user.
     response = self._get_preexisting_response()
     if response is None:
         response = super(ResponseForm, self).save(commit=False)
     response.survey = self.survey
     response.interview_uuid = self.uuid
     if self.user.is_authenticated():
         response.user = self.user
     response.save()
     # response "raw" data as dict (for signal)
     data = {
         'survey_id': response.survey.id,
         'interview_uuid': response.interview_uuid,
         'responses': []
     }
     # create an answer object for each question and associate it with this
     # response.
     for field_name, field_value in self.cleaned_data.items():
         if field_name.startswith("question_"):
             # warning: this way of extracting the id is very fragile and
             # entirely dependent on the way the question_id is encoded in
             # the field name in the __init__ method of this form class.
             q_id = int(field_name.split("_")[1])
             question = Question.objects.get(pk=q_id)
             answer = self._get_preexisting_answer(question)
             if answer is None:
                 answer = Answer(question=question)
             if question.type == Question.SELECT_IMAGE:
                 value, img_src = field_value.split(":", 1)
                 # TODO
             answer.body = field_value
             data['responses'].append((answer.question.id, answer.body))
             LOGGER.debug(
                 "Creating answer for question %d of type %s : %s", q_id,
                 answer.question.type, field_value
             )
             answer.response = response
             answer.save()
     survey_completed.send(sender=Response, instance=response, data=data)
     return response
Beispiel #37
0
def get_response_validations(request):
    """This function is meant to create json posted response validation
    :param request:
    :return:
    """
    answer_type = request.GET.get('answer_type') if request.method == 'GET' else request.POST.get('answer_type')
    answer_class = Answer.get_class(answer_type)
    validator_names = [validator.__name__ for validator in answer_class.validators()]
    validations = ResponseValidation.objects.filter(validation_test__in=validator_names).values_list('id', flat=True)
    return JsonResponse(list(validations), safe=False)
Beispiel #38
0
    def __init__(self, question, initial=None, *args, **kwargs):
        super(LogicForm, self).__init__(initial=initial, *args, **kwargs)
        data = kwargs.get('data', None)
        batch = question.qset
        self.question = question
        self.batch = batch
        self.fields['condition'] = forms.ChoiceField(
            label='Eligibility criteria',
            choices=[],
            widget=forms.Select,
            required=False)
        self.fields['attribute'] = forms.ChoiceField(
            label='Attribute', choices=[
                ('value', 'Value'), ], widget=forms.Select, required=False)
        self.fields['condition'].choices = [
            (validator.__name__,
             validator.__name__.upper()) for validator in Answer.get_class(
                question.answer_type).validators()]
        if question.answer_type in [
                MultiChoiceAnswer.choice_name(),
                MultiSelectAnswer.choice_name()]:
            self.fields['option'] = forms.ChoiceField(
                label='', choices=[], widget=forms.Select, required=True)
            self.fields['option'].choices = [
                (option.text, option.text) for option in question.options.all()]
        else:
            self.fields['value'] = forms.CharField(label='', required=False)
            self.fields['min_value'] = forms.CharField(
                label='', required=False, widget=forms.TextInput(
                    attrs={
                        'placeholder': 'Min Value'}))
            self.fields['max_value'] = forms.CharField(
                label='', required=False, widget=forms.TextInput(
                    attrs={
                        'placeholder': 'Max Value'}))
            if question.answer_type == DateAnswer.choice_name():
                self.fields['value'].widget.attrs['class'] = 'datepicker'
                self.fields['min_value'].widget.attrs['class'] = 'datepicker'
                self.fields['max_value'].widget.attrs['class'] = 'datepicker'
            # validate_with_question = forms.ChoiceField(label='', choices=[], widget=forms.Select, required=False)
        self.fields['action'] = forms.ChoiceField(
            label='Then', choices=[], widget=forms.Select, required=True)
        flows = self.question.flows.all()
        [f.next_question.pk for f in flows if f.next_question]
        next_q_choices = [(q.pk, q.text) for q in batch.questions_inline(
        ) if q.pk is not self.question.pk]
        # and q.pk not in existing_nexts]
        next_q_choices.extend([(q.pk, q.text)
                               for q in batch.zombie_questions()])
        self.fields['next_question'] = forms.ChoiceField(
            label='', choices=next_q_choices, widget=forms.Select, required=False)
        #self.fields['next_question'].widget.attrs['class'] = 'chzn-select'
        self.fields['action'].choices = self.ACTIONS.items()

        data.get('action', None) if data else None
Beispiel #39
0
 def _get_answer():
     answer_class = Answer.get_class(question.answer_type)
     if answer_class in [VideoAnswer, AudioAnswer, ImageAnswer]:
         url_component = '%s %s' % (question.pk, interview.pk)
         return mark_safe('<a href="{% url download_qset_attachment ' +
                          url_component + ' %}">Download</a>')
     else:
         answer = answer_class.objects.filter(interview=interview,
                                              question=question).last()
         if answer:
             return answer.value
 def test_correct_validators_is_applied_as_per_question_answer_type(self):
     answer_types = Answer.supported_answers()  # different types of questions
     for answer_type in answer_types:
         q = Question.objects.create(qset_id=self.qset.id, response_validation_id=1,
                                     identifier=answer_type.choice_name(), text='test',
                                     answer_type=answer_type.choice_name())
         l = LogicForm(q)
         answer_choice_names = [(validator.__name__, validator.__name__.upper())
                                for validator in answer_type.validators()]
         self.assertEqual(
             set(l.fields['condition'].choices), set(answer_choice_names))
Beispiel #41
0
def get_on_response_xform(interviewer, survey):
    registered_households = interviewer.generate_survey_households(survey)
    batches = interviewer.ea.open_batches(survey)
    return render_to_string("odk/survey_form-no-repeat.xml", {
        'interviewer': interviewer,
        'registered_households': registered_households, #interviewer.households.filter(survey=survey, ea=interviewer.ea).all(),
        'title' : '%s - %s' % (survey, ', '.join([batch.name for batch in batches])),
        'survey' : survey,
        'survey_batches' : batches,
        'messages' : MESSAGES,
        'answer_types' : dict([(cls.__name__.lower(), cls.choice_name()) for cls in Answer.supported_answers()])
        })
Beispiel #42
0
 def _is_relevant_by_group(qset):
     qset = QuestionSet.get(pk=qset.pk)
     relevant_new = []
     for condition in question_group.group_conditions.all():
         test_question = qset.parameter_list.questions.get(identifier=condition.test_question.identifier)
         answer_class = Answer.get_class(condition.test_question.answer_type)
         relevant_new.append(answer_class.print_odk_validation(get_group_question_path(qset, test_question),
                                                               condition.validation_test,  *condition.test_params))
     relevance_builder = ['false()', ]
     if relevant_new:
         relevance_builder.append('(%s)' % ' and '.join(relevant_new))
     return ' and (%s)' % ' or '.join(relevance_builder)
Beispiel #43
0
 def _get_answer():
     answer_class = Answer.get_class(question.answer_type)
     try:
         if answer_class in [VideoAnswer, AudioAnswer, ImageAnswer]:
             return mark_safe('<a href="{% url download_qset_attachment %s %s %}">Download</a>' % (question.pk,
                                                                                                   interview.pk))
         else:
             answer = answer_class.objects.filter(interview=interview, question=question).last()
             if answer:
                 return answer.value
     except answer_class.DoesNotExist:
         return ''
def submit(request):
    logger.info(request.POST)
    p = request.POST

    
    try: 
        user = User.objects.get(email=p["email"])
    except User.DoesNotExist:
        # 유져 정보 저장
        user = User(email=p["email"], name=p["name"], exp=p["exp"])        
        user.save();
        
    
    # 문항 최대 번호 검색
    #max_seq_record = Question.objects.all().aggregate(Max('seq'))
    #max_seq = int(max_seq_record['seq__max']) + 1
    
    questions = Question.objects.all()
    
    # 점수 계산, 답변 저장
    score = 0

    #for i in range(1, max_seq):
    for question in questions:        
        #question = Question.objects.get(seq=i)        
        value = '%s' % question.pk        
        score += int(p[value])
        logger.error(p[value])
        
        answer = Answer(user=user, question=question, answer=p[value])
        answer.save()
        
        
    d = dict(user=user, score=score)
    d.update(csrf(request))    
        
    #return render_to_response("submit.html", d)

    return HttpResponseRedirect(reverse("survey.views.user", args=[user.pk]))
Beispiel #45
0
def get_qset_xform(interviewer, allocations, qset, ea_samples={}):
    return render_to_string("odk/question_set.xml",
                            {'interviewer': interviewer,
                             'qset': qset,
                             'stage': allocations[0].stage,
                                'assignments': allocations,
                                'non_response_reasons': settings.NON_RESPONSE_REASONS,
                                'answer_types': dict([(
                                    cls.__name__.lower(),
                                    cls.choice_name())
                                    for cls in Answer.supported_answers()]),
                                'ea_samples': ea_samples,
                             })
Beispiel #46
0
def record_interview_answer(interview, question, answer):
    if not isinstance(answer, NonResponseAnswer):
        answer_class = Answer.get_class(question.answer_type)
        print "answer type ", answer_class.__name__
        print "question is ", question
        print "question pk is ", question.pk
        print "interview is ", interview
        print "answer text is ", answer
        return answer_class.create(interview, question, answer)
    else:
        answer.interview = interview
        answer.save()
        return answer
Beispiel #47
0
def get_answer_validations(request):
    """This function is meant to create json posted response validation
    :param request:
    :return:
    """
    answer_type = request.GET.get(
        'answer_type') if request.method == 'GET' else request.POST.get(
            'answer_type')
    answer_class = Answer.get_class(answer_type)
    validator_names = [
        validator.__name__ for validator in answer_class.validators()
    ]
    return JsonResponse(validator_names, safe=False)
Beispiel #48
0
def is_relevant_odk(context, question, interviewer, registered_households):
    batch = question.batch
    if question.pk == batch.start_question.pk:
        default_relevance = 'true()'
    else:
        default_relevance = 'false()'
    relevance_context = ' (%s) %s' % (
                                ' or '.join(context.get(question.pk, [default_relevance, ])),
                                is_relevant_by_group(context, question, registered_households)
                                )
    flows = question.flows.all()
    node_path = '/survey/b%s/q%s' % (batch.pk, question.pk)
    flow_conditions = []
    if flows:
        for flow in flows:
            if flow.validation_test:
                text_params = [t.param for t in flow.text_arguments]
                answer_class = Answer.get_class(question.answer_type)
                flow_condition = answer_class.print_odk_validation(node_path, flow.validation_test, *text_params)
                flow_conditions.append(flow_condition)
                if flow.next_question:
                    next_question = flow.next_question
                    next_q_context = context.get(next_question.pk, ['false()', ])
                    next_q_context.append(flow_condition)
                    context[next_question.pk] = next_q_context

        null_flows = flows.filter(validation_test__isnull=True, next_question__isnull=False)
        connecting_flows = question.connecting_flows.all()
        if null_flows:
            null_flow = null_flows[0]
            null_condition = ["string-length(%s) &gt; 0" % node_path, ]
            # null_condition = ['true()', ]
            if len(flow_conditions) > 0:
                null_condition.append('not (%s)' % ' or '.join(flow_conditions))
            next_question = null_flow.next_question
            next_q_context = context.get(next_question.pk, ['false()', ])
            next_q_context.append('(%s)' % ' and '.join(null_condition))
            if question.group != next_question.group:
                next_q_context.append('true()')
            context[next_question.pk] = next_q_context
            # if connecting_flows.count() == 0 or (next_question and
            #                                              question.group != next_question.group):
            #     prob_next = batch.next_inline(next_question,
            #                                   exclude_groups=[next_question.group, ])
            #     if prob_next:
            #         prob_next_context = context.get(prob_next.pk, [])
            #         prob_next_context.append('true()')
            #         else:
            #             prob_next_context.append("string-length(%s) &gt; 0" % node_path)
            #         context[prob_next.pk] = prob_next_context
    return mark_safe(relevance_context)
Beispiel #49
0
 def _get_group_next_question(question, proposed_next):
     next_question = proposed_next
     present_question_group = question.group if hasattr(
         question, 'group') else None
     if next_question and AnswerAccessDefinition.is_valid(
             access.choice_name(), next_question.answer_type) is False:
         next_question = _get_group_next_question(
             question, next_question.next_question(reply))
     # I hope the next line is not so confusing!
     # Basically it means treat only if the next question belongs to a different group from the present.
     # That's if present has a group
     if hasattr(
             next_question,
             'group') and present_question_group != next_question.group:
         question_group = next_question.group
         if question_group:
             qset = QuestionSet.get(pk=next_question.qset.pk)
             valid_group = True
             for condition in question_group.group_conditions.all():
                 # we are interested in the qset param list with same identifier name as condition.test_question
                 test_question = qset.parameter_list.questions.get(
                     identifier=condition.test_question.identifier)
                 param_value = ''  # use answer.as value
                 if session_data[ANSWERS][-1].get(
                         test_question.identifier,
                         None):  # last answer entry
                     param_value = session_data[ANSWERS][-1][
                         test_question.identifier]
                 answer_class = Answer.get_class(
                     condition.test_question.answer_type)
                 validator = getattr(answer_class,
                                     condition.validation_test, None)
                 if validator is None:
                     raise ValueError(
                         'unsupported validator defined on listing question'
                     )
                 try:
                     slogger.debug('parm val: %s, params: %s' %
                                   (param_value, condition.test_params))
                     is_valid = validator(param_value,
                                          *condition.test_params)
                 except:
                     is_valid = True
                 if is_valid is False:
                     valid_group = False
                     break  # fail if any condition fails
             if valid_group is False:
                 next_question = _get_group_next_question(
                     question, next_question.next_question(reply))
     return next_question
Beispiel #50
0
 def clean(self):
     answer_type = self.cleaned_data.get('answer_type', None)
     options = self.cleaned_data.get('options', None)
     response_validation = self.cleaned_data.get('response_validation', None)
     text = self.cleaned_data.get('text', None)
     self._check__multichoice_and_options_compatibility(
         answer_type, options)
     self._strip_special_characters_for_ussd(text)
     if answer_type:
         answer_class = Answer.get_class(answer_type)
         validator_names = [validator.__name__ for validator in answer_class.validators()]
         if response_validation and response_validation.validation_test not in validator_names:
             raise ValidationError('Selected Validation is not compatible with chosen answer type')
     return self.cleaned_data
 def test_reload_answer_access(self):
     AnswerAccessDefinition.objects.all().delete()
     self.assertEquals(AnswerAccessDefinition.objects.count(), 0)
     AnswerAccessDefinition.reload_answer_categories()
     self.assertTrue(AnswerAccessDefinition.objects.count() > 0)
     # chech for each access type has an entry
     channels = [USSDAccess.choice_name(), ODKAccess.choice_name(), WebAccess.choice_name()]
     allowed_channels = AnswerAccessDefinition.objects.values_list('channel', flat=True)
     for channel in channels:
         self.assertIn(channel, allowed_channels)
         self.assertTrue(len(AnswerAccessDefinition.answer_types(channel)) > 0)
     answer_types = Answer.answer_types()
     for answer_type in [VideoAnswer, AudioAnswer, ImageAnswer]:
         self.assertNotIn(answer_type.choice_name(), AnswerAccessDefinition.answer_types(USSDAccess.choice_name()))
Beispiel #52
0
    def post(self, request, **kwargs):
        created_at = request.data.get('created_at')
        updated_at = request.data.get('updated_at')
        survey_id = request.data.get('survey_id')
        # survey_count = request.data.get('survey_count')
        lat_lon = request.data.get('lat_lon')

        user_ans = Answer.objects.all().filter(user=request.user).annotate(counter=Max("count_id")).order_by("-counter").first()
        if user_ans is not None:
            survey_count = user_ans.count_id +1
        else:
            survey_count = 1


        data_list = []
        for val in request.data.get('ans'):

            data_dict = {
                "created_at": str(created_at),
                "updated_at": str(updated_at),
                "survey_id": survey_id,
                "survey_count": survey_count,
                "lat_lon": lat_lon,
                "question": Question.objects.get(id=val['question']),
                "q_ans": val['q_ans']
            }
            data_list.append(data_dict)

        for data in data_list:
            ans = Answer(created_at=data['created_at'], updated_at=data['updated_at'], survey_id=data['survey_id'], lat_lon=data['lat_lon'], count_id=data['survey_count'], question=data['question'], q_ans=data['q_ans'],user=request.user)
            ans.save()

        responses = {
            "Status": "success",
            "Error": '',
        }
        return Response(responses, status=200)
 def test_logic_form_does_not_have_options_for_non_multi_type_questions(
         self):
     answer_types = Answer.answer_types()
     for answer_type in answer_types:
         if answer_type not in [
                 MultiSelectAnswer.choice_name(),
                 MultiChoiceAnswer.choice_name()
         ]:
             q = Question.objects.create(identifier=answer_type,
                                         text="text",
                                         answer_type=answer_type,
                                         qset_id=self.qset.id,
                                         response_validation_id=1)
             l = LogicForm(q)
             self.assertFalse(l.fields.get('option'))
Beispiel #54
0
 def clean(self):
     answer_type = self.cleaned_data.get('answer_type', None)
     options = self.cleaned_data.get('options', None)
     response_validation = self.cleaned_data.get('response_validation', None)
     text = self.cleaned_data.get('text', None)
     self._check__multichoice_and_options_compatibility(
         answer_type, options)
     self._strip_special_characters_for_ussd(text)
     self._prevent_duplicate_subquestions(text)
     if answer_type:
         answer_class = Answer.get_class(answer_type)
         validator_names = [validator.__name__ for validator in answer_class.validators()]
         if response_validation and response_validation.validation_test not in validator_names:
             raise ValidationError('Selected Validation is not compatible with chosen answer type')
     return self.cleaned_data
 def test_correct_validators_is_applied_as_per_question_answer_type(self):
     answer_types = Answer.supported_answers(
     )  # different types of questions
     for answer_type in answer_types:
         q = Question.objects.create(qset_id=self.qset.id,
                                     response_validation_id=1,
                                     identifier=answer_type.choice_name(),
                                     text='test',
                                     answer_type=answer_type.choice_name())
         l = LogicForm(q)
         answer_choice_names = [(validator.__name__,
                                 validator.__name__.upper())
                                for validator in answer_type.validators()]
         self.assertEqual(set(l.fields['condition'].choices),
                          set(answer_choice_names))
Beispiel #56
0
def get_response_validations(request):
    """This function is meant to create json posted response validation
    :param request:
    :return:
    """
    answer_type = request.GET.get(
        'answer_type') if request.method == 'GET' else request.POST.get(
            'answer_type')
    answer_class = Answer.get_class(answer_type)
    validator_names = [
        validator.__name__ for validator in answer_class.validators()
    ]
    validations = ResponseValidation.objects.filter(
        validation_test__in=validator_names).values_list('id', flat=True)
    return JsonResponse(list(validations), safe=False)