def passes_test(self, value):
     answer_class = Answer.get_class(self.listing_question.answer_type)
     method = getattr(answer_class, self.validation_test, None)
     if method not in answer_class.validators():
         raise ValueError(
             'unsupported validator defined on listing question')
     return method(value, *self.test_params)
Esempio n. 2
0
 def get_variable_aggregates(self, base_location, report_level=1):
     indicator = self.indicator
     ikwargs = {'question_set__pk': indicator.question_set.pk,
                'survey__pk': indicator.survey.pk}
     parent_loc = 'ea__locations'
     hierachy_count = Location.country().type.get_descendant_count()
     for i in range(hierachy_count - report_level):    # fetches direct descendants from ea__locs
         parent_loc = '%s__parent' % parent_loc
     # exploiting mptt artributes to speed up this query
     lowest_level = base_location.type.get_descendants(include_self=False).last().level
     left = base_location.lft
     right = base_location.rght
     if report_level <= base_location.level:     # if you want to include base_location (anything above becomes self
         left += 1
         right -= 1
     ikwargs.update({'ea__locations__lft__gte': left, 'ea__locations__lft__lte': right,
                     'ea__locations__level': lowest_level})
     interviews = Interview.objects.filter(**ikwargs)
     for criterion in self.criteria.all():
         kwargs = dict()
         kwargs['answer__question__identifier__iexact'] = criterion.test_question.identifier
         # be careful here regarding multiple validation tests with same name (e.g a__gt=2, a__gt=10)
         kwargs.update(Answer.get_validation_queries(criterion.validation_test, 'as_value',
                                                     namespace='answer__', *criterion.prepped_args))
         interviews = interviews.filter(**kwargs)
     parent_loc = '%s__name' % parent_loc
     return dict(interviews.values_list(parent_loc).annotate(total=Count('id', distinct=True)))
Esempio n. 3
0
 def passes_test(self, value):
     answer_class = Answer.get_class(self.listing_question.answer_type)
     method = getattr(answer_class, self.validation_test, None)
     if method is None:
         raise ValueError(
             'unsupported validator defined on listing question')
     return method(value, *self.test_params)
Esempio n. 4
0
 def get_display_label(self):
     survey = self.survey
     naming_label = survey.random_sample_label
     interview = self.interview
     # get the exact answer type
     pattern = '{{ *([0-9a-zA-Z_]+) *}}'
     identifiers = re.findall(pattern, naming_label)
     listing_form = survey.preferred_listing.listing_form if survey.preferred_listing else survey.listing_form
     questions = listing_form.questions.filter(identifier__in=identifiers)
     context = {}
     for question in questions:
         answer_class = Answer.get_class(question.answer_type)
         try:
             answer = answer_class.objects.get(interview=interview, question=question)
             context[question.identifier] = answer.value
         except answer_class.DoesNotExist:
             pass
     question_context = template.Context(context)
     label = template.Template(html.escape(naming_label)).render(question_context)
     # now if label happens to be empty, just use the first response as label
     if not label:
         try:
             label = interview.answer.first().as_text
         except:
             pass
     return label or interview.question_set.name
Esempio n. 5
0
class GenericQuestion(BaseModel):
    ANSWER_TYPES = [(name, name) for name in Answer.answer_types()]
    identifier = models.CharField(max_length=100, verbose_name='Variable Name')
    text = models.CharField(max_length=250)
    answer_type = models.CharField(
        max_length=100, blank=False, null=False, choices=ANSWER_TYPES)
    response_validation = models.ForeignKey(ResponseValidation, related_name='%(class)s', null=True, blank=True,
                                            verbose_name='Validation Rule')

    @classmethod
    def type_name(cls):
        return cls._meta.verbose_name.title()

    class Meta:
        abstract = True

    def validators(self):
        return Answer.get_class(self.answer_type).validators()

    def validator_names(self):
        return [v.__name__ for v in Answer.get_class(self.answer_type).validators()]

    def odk_constraint(self):
        if self.response_validation:
            return self.response_validation.get_odk_constraint(self)

    def odk_constraint_msg(self):
        if self.response_validation:
            return self.response_validation.dconstraint_message
Esempio n. 6
0
class RespondentGroupCondition(BaseModel):
    VALIDATION_TESTS = [(validator.__name__, validator.__name__)
                        for validator in Answer.validators()]
    respondent_group = models.ForeignKey(RespondentGroup,
                                         related_name='group_conditions')
    test_question = models.ForeignKey(ParameterTemplate,
                                      related_name='group_condition')
    validation_test = models.CharField(max_length=200,
                                       null=True,
                                       blank=True,
                                       choices=VALIDATION_TESTS)

    class Meta:
        app_label = 'survey'

    @property
    def test_params(self):
        return [t.param for t in self.test_arguments]

    def params_display(self):
        params = []
        for arg in self.text_arguments:
            if self.question.answer_type == MultiChoiceAnswer.choice_name():
                params.append(self.question.options.get(order=arg.param).text)
            else:
                params.append(arg.param)
        return params

    @property
    def test_arguments(self):
        return GroupTestArgument.objects.filter(
            group_condition=self).order_by('position')
Esempio n. 7
0
 def reply(self, question):
     if self.belongs_to(question.group):
         answer_class = Answer.get_class(question.answer_type)
         answers = answer_class.objects.filter(interview__householdmember=self, question=question)
         if answers.exists():
             reply = unicode(answers[0].to_text())
             return string.capwords(reply)
     return ''
Esempio n. 8
0
 def reply(self, question):
     if self.belongs_to(question.group):
         answer_class = Answer.get_class(question.answer_type)
         answers = answer_class.objects.filter(
             interview__householdmember=self, question=question)
         if answers.exists():
             reply = unicode(answers[0].to_text())
             return string.capwords(reply)
     return ''
Esempio n. 9
0
 def delete(self, using=None):
     '''
     Delete related answers before deleting this object
     :param using:
     :return:
     '''
     answer_class = Answer.get_class(self.answer_type)
     answer_class.objects.filter(question=self).delete()
     return super(Question, self).delete(using=using)
Esempio n. 10
0
 def delete(self, using=None):
     '''
     Delete related answers before deleting this object
     :param using:
     :return:
     '''
     answer_class = Answer.get_class(self.answer_type)
     answer_class.objects.filter(question=self).delete()
     return super(Question, self).delete(using=using)
Esempio n. 11
0
class RandomizationCriterion(BaseModel):
    VALIDATION_TESTS = [(validator.__name__, validator.__name__)
                        for validator in Answer.validators()]
    survey = models.ForeignKey(Survey, related_name='randomization_criteria')
    listing_question = models.ForeignKey(Question, related_name='criteria')
    validation_test = models.CharField(
        max_length=200,
        choices=VALIDATION_TESTS,
    )

    class Meta:
        app_label = 'survey'

    @property
    def test_params(self):
        return [t.param for t in self.text_arguments]

    @property
    def text_arguments(self):
        return self.arguments.all()

    def params_display(self):
        params = []
        for arg in self.text_arguments:
            if self.listing_question.answer_type == MultiChoiceAnswer.choice_name(
            ):
                try:
                    params.append(
                        self.listing_question.options.get(
                            order=arg.param).text)
                except ValueError:
                    params.append(arg.param)
            else:
                params.append(arg.param)

        return params

    def passes_test(self, value):
        answer_class = Answer.get_class(self.listing_question.answer_type)
        method = getattr(answer_class, self.validation_test, None)
        if method not in answer_class.validators():
            raise ValueError(
                'unsupported validator defined on listing question')
        return method(value, *self.test_params)

    def qs_passes_test(self, value_key, queryset):
        answer_class = Answer.get_class(self.listing_question.answer_type)
        method = getattr(answer_class, 'fetch_%s' % self.validation_test, None)
        return method(value_key, *self.test_params, qs=queryset)

    @property
    def test_arguments(self):
        return CriterionTestArgument.objects.filter(
            test_condition=self).values_list('param',
                                             flat=True).order_by('position')
Esempio n. 12
0
class QuestionFlow(BaseModel):
    VALIDATION_TESTS = [(validator.__name__, validator.__name__)
                        for validator in Answer.validators()]
    question = models.ForeignKey(Question, related_name='flows')
    validation_test = models.CharField(max_length=200,
                                       null=True,
                                       blank=True,
                                       choices=VALIDATION_TESTS)
    name = models.CharField(
        max_length=200, null=True, blank=True
    )  #if validation passes, classify this flow response as having this value
    desc = models.CharField(
        max_length=200, null=True,
        blank=True)  #this would provide a brief description of this flow
    next_question = models.ForeignKey(Question,
                                      related_name='connecting_flows',
                                      null=True,
                                      blank=True,
                                      on_delete=models.SET_NULL)

    class Meta:
        app_label = 'survey'
        # unique_together = [('question', 'next_question', 'desc', ),]

    @property
    def test_params(self):
        return [t.param for t in self.text_arguments]

    def params_display(self):
        params = []
        for arg in self.text_arguments:
            if self.question.answer_type == MultiChoiceAnswer.choice_name():
                params.append(self.question.options.get(order=arg.param).text)
            else:
                params.append(arg.param)

        return params

    @property
    def text_arguments(self):
        return TextArgument.objects.filter(flow=self).order_by('position')

    @property
    def test_arguments(self):
        return TestArgument.objects.filter(
            flow=self).select_subclasses().order_by('position')

    def save(self, *args, **kwargs):
        # if self.name is None:
        #     if self.next_question:
        #         identifier = self.next_question.identifier
        #     else: identifier = ''
        #     self.name = "%s %s %s" % (self.question.identifier, self.validation_test or "", identifier)
        return super(QuestionFlow, self).save(*args, **kwargs)
Esempio n. 13
0
 def next_question(self, reply):
     flows = self.flows.all()
     answer_class = Answer.get_class(self.answer_type)
     resulting_flow = None
     for flow in flows:
         if flow.validation_test:
             test_values = [arg.param for arg in flow.text_arguments]
             if getattr(answer_class, flow.validation_test)(reply, *test_values) == True:
                 resulting_flow = flow
                 break
         else:
             resulting_flow = flow
     if resulting_flow:
         return resulting_flow.next_question
Esempio n. 14
0
 def next_question(self, reply):
     flows = self.flows.all()
     answer_class = Answer.get_class(self.answer_type)
     resulting_flow = None
     for flow in flows:
         if flow.validation_test:
             test_values = [arg.param for arg in flow.text_arguments]
             if getattr(answer_class, flow.validation_test)(reply, *test_values) is True:
                 resulting_flow = flow
                 break
         else:
             resulting_flow = flow
     if resulting_flow and resulting_flow.next_question:
         # better for it to know who it is
         return Question.get(id=resulting_flow.next_question.id)
Esempio n. 15
0
 def next_question(self, reply):
     flows = self.flows.all()
     answer_class = Answer.get_class(self.answer_type)
     resulting_flow = None
     for flow in flows:
         if flow.validation_test:
             test_values = [arg.param for arg in flow.text_arguments]
             if getattr(answer_class,
                        flow.validation_test)(reply, *test_values) == True:
                 resulting_flow = flow
                 break
         else:
             resulting_flow = flow
     if resulting_flow:
         return resulting_flow.next_question
Esempio n. 16
0
    def generate_random_samples(cls, from_survey, to_survey, ea):
        """ Used to generate random samples from listing conducted by from_survey to be used by to_survey
        to do: optimize this method queries
        :param from_survey: Survey from which listing was done
        :param to_survey: Survey for whom the random sample is being generated.
        :param ea: the EA where the survey was conducted
        :return: None
        """
        if cls.samples(to_survey, ea).exists():
            raise cls.SamplesAlreadyGenerated('Samples already generated')

        if to_survey.has_sampling is False or from_survey.has_sampling is False:
            raise ValueError('Either source or destination survey does not support sampling')
        valid_interviews = from_survey.interviews.filter(ea=ea,     # the listed interviews in the ea
                                                         question_set=from_survey.listing_form).values_list('id',
                                                                                                            flat=True)
        #valid_interviews = set(valid_interviews)
        # now get the interviews that meet the randomization criteria
        for criterion in to_survey.randomization_criteria.all(
        ):  # need to optimize this
            answer_type = criterion.listing_question.answer_type
            if answer_type == MultiChoiceAnswer.choice_name():
                value_key = 'value__text'
            else:
                value_key = 'value'
            answer_class = Answer.get_class(answer_type)
            kwargs = {
                'question': criterion.listing_question,
                'interview__id__in': valid_interviews,
            }
            # if qs:
            # kwargs['interview__id__in'] = valid_interviews
            valid_interviews = criterion.qs_passes_test(value_key, answer_class.objects.filter(
                **kwargs). only('interview__id').values_list('interview__id', flat=True))
        valid_interviews = list(valid_interviews)
        random.shuffle(valid_interviews)
        random_samples = valid_interviews[:to_survey.sample_size]
        samples = []
        for interview_id in random_samples:
            samples.append(
                ListingSample(
                    survey=to_survey,
                    interview_id=interview_id))
        with transaction.atomic():
            ListingSample.objects.bulk_create(samples)
Esempio n. 17
0
 def get_valid_qs(self, locations):
     """Return the queryset valid according to this indicator variable
     :param locations:
     :return:
     """
     indicator = self.indicator
     ikwargs = {'ea__locations__in': locations,
                'question_set__pk': indicator.question_set.pk,
                'survey__pk': indicator.survey.pk}
     interviews = Interview.objects.filter(**ikwargs)
     for criterion in self.criteria.all():
         kwargs = dict()
         kwargs['answer__question__identifier__iexact'] = criterion.test_question.identifier
         # be careful here regarding multiple validation tests with same name (e.g a__gt=2, a__gt=10)
         kwargs.update(Answer.get_validation_queries(criterion.validation_test, 'as_value',
                                                     namespace='answer__', *criterion.prepped_args))
         interviews = interviews.filter(**kwargs)
     return interviews.distinct('id')
Esempio n. 18
0
class IndicatorVariableCriteria(BaseModel):
    """A variable is essential a filtered set of answers. Hence they need the filter criteria to be defined.
    This is the purpose of this class
    """
    VALIDATION_TESTS = [(validator.__name__, validator.__name__)
                        for validator in Answer.validators()]
    variable = models.ForeignKey(IndicatorVariable, related_name='criteria')
    # batch & parameter_list questions
    test_question = models.ForeignKey(
        Question,
        related_name='indicator_criteria',
        verbose_name='Filter')
    validation_test = models.CharField(max_length=200, choices=VALIDATION_TESTS, verbose_name='Condition')

    class Meta:
        app_label = 'survey'

    @property
    def test_params(self):
        return [t.param for t in self.test_arguments]

    def params_display(self):
        params = []
        for arg in self.text_arguments:
            if self.question.answer_type == MultiChoiceAnswer.choice_name():
                params.append(self.question.options.get(order=arg.param).text)
            else:
                params.append(arg.param)
        return params

    @property
    def test_arguments(self):
        return IndicatorCriteriaTestArgument.objects.filter(
            criteria=self).order_by('position')

    @property
    def prepped_args(self):
        answer_class = Answer.get_class(self.test_question.answer_type)
        return [answer_class.prep_value(val) for val in self.test_params]

    def qs_passes_test(self, value_key, queryset):
        test_args = self.prepped_args
        method = getattr(Answer, 'fetch_%s' % self.validation_test, None)
        return method(value_key, *test_args, qs=queryset)
Esempio n. 19
0
    def next_question(self, reply):
        flows = self.flows.all()
        answer_class = Answer.get_class(self.answer_type)
        resulting_flow = None
        for flow in flows:
            if flow.validation_test:

                test_values = [arg.param for arg in flow.text_arguments]
                if getattr(
                        answer_class,
                        flow.validation_test)(
                        reply,
                        *test_values) is True:
                    resulting_flow = flow
                    break
            else:
                resulting_flow = flow
        if resulting_flow and resulting_flow.next_question:
            # better for it to know who it is
            return Question.get(id=resulting_flow.next_question.id)
Esempio n. 20
0
class ResponseValidation(CloneableMixin, BaseModel):
    VALIDATION_TESTS = [(validator.__name__, validator.__name__)
                        for validator in Answer.validators()]
    validation_test = models.CharField(max_length=200, choices=VALIDATION_TESTS)
    # if validation passes, classify this flow response as having this value
    constraint_message = models.TextField(default='', blank=True, null=True)

    @property
    def dconstraint_message(self):
        if self.constraint_message:
            return self.constraint_message
        else:
            return unicode(self)

    class Meta:
        app_label = 'survey'

    @property
    def test_params(self):
        return [t.param for t in self.text_arguments]

    @property
    def text_arguments(self):
        return TextArgument.objects.filter(validation=self).order_by('position')

    @property
    def test_arguments(self):
        return TextArgument.objects.filter(validation=self).order_by('position')

    def __unicode__(self):
        return '%s: %s' % (self.validation_test, ', '.join(self.text_arguments.values_list('param', flat=True)))

    def get_odk_constraint(self, test_question):
        answer_class = Answer.get_class(test_question.answer_type)
        return mark_safe(answer_class.print_odk_validation('.', self.validation_test,  *self.test_params))

    def validate(self, value, test_question):
        answer_class = Answer.get_class(test_question.answer_type)
        method = getattr(answer_class, self.validation_test, None)
        return method(value, *self.test_params)
Esempio n. 21
0
        def _get_possible_samples():

            if to_survey.has_sampling is False or from_survey.has_sampling is False:
                raise ValueError(
                    'Either source or destination survey does not support sampling'
                )
            valid_interviews = from_survey.interviews.filter(
                ea=ea,  # the listed interviews in the ea
                question_set=from_survey.listing_form).values_list('id',
                                                                   flat=True)
            #valid_interviews = set(valid_interviews)
            # now get the interviews that meet the randomization criteria
            for criterion in to_survey.randomization_criteria.all(
            ):  # need to optimize this
                answer_type = criterion.listing_question.answer_type
                if answer_type == MultiChoiceAnswer.choice_name():
                    value_key = 'value__text'
                else:
                    value_key = 'value'
                answer_class = Answer.get_class(answer_type)
                kwargs = {
                    'question': criterion.listing_question,
                    'interview__id__in': valid_interviews,
                }
                # if qs:
                # kwargs['interview__id__in'] = valid_interviews
                valid_interviews = criterion.qs_passes_test(
                    value_key,
                    answer_class.objects.filter(
                        **kwargs).only('interview__id').values_list(
                            'interview__id', flat=True))
            valid_interviews = list(valid_interviews)
            random.shuffle(valid_interviews)
            random_samples = valid_interviews[:to_survey.sample_size]
            samples = []
            for interview_id in random_samples:
                samples.append(
                    ListingSample(survey=to_survey, interview_id=interview_id))
            return samples
Esempio n. 22
0
class QuestionTemplate(BaseModel):
    ANSWER_TYPES = [(name, name) for name in Answer.answer_types()]
    identifier = models.CharField(max_length=100,
                                  blank=False,
                                  null=True,
                                  unique=True)
    group = models.ForeignKey("HouseholdMemberGroup",
                              related_name="question_templates")
    text = models.CharField(
        max_length=150,
        blank=False,
        null=False,
        #help_text="To replace the household member's name \
        #in the question, please include the variable FAMILY_NAME in curly brackets, e.g. {{ FAMILY_NAME }}. "
    )
    answer_type = models.CharField(max_length=100,
                                   blank=False,
                                   null=False,
                                   choices=ANSWER_TYPES)
    module = models.ForeignKey("QuestionModule",
                               related_name="question_templates")

    class Meta:
        app_label = 'survey'

    def __unicode__(self):
        return "%s - %s: (%s)" % (self.identifier, self.text,
                                  self.answer_type.upper())

    def save(self, *args, **kwargs):
        if self.answer_type not in [
                MultiChoiceAnswer.choice_name(),
                MultiSelectAnswer.choice_name()
        ]:
            self.options.all().delete()
        return super(QuestionTemplate, self).save(*args, **kwargs)
Esempio n. 23
0
 def validators(self):
     return Answer.get_class(self.answer_type).validators()
Esempio n. 24
0
 def qs_passes_test(self, value_key, queryset):
     answer_class = Answer.get_class(self.listing_question.answer_type)
     method = getattr(answer_class, 'fetch_%s' % self.validation_test, None)
     return method(value_key, *self.test_params, qs=queryset)
Esempio n. 25
0
 def total_answers(
     self
 ):  #just utility to get number of times this question has been answered
     return Answer.get_class(
         self.answer_type).objects.filter(question=self).count()
Esempio n. 26
0
 def answers(self):
     return Answer.get_class(self.answer_type).objects.filter(question=self)
Esempio n. 27
0
class Question(BaseModel):
    ANSWER_TYPES = [(name, name) for name in Answer.answer_types()]
    identifier = models.CharField(max_length=100,
                                  blank=False,
                                  null=True,
                                  verbose_name='Variable Name')
    text = models.CharField(
        max_length=150,
        blank=False,
        null=False,
        #help_text="To replace the household member's name \
        #in the question, please include the variable FAMILY_NAME in curly brackets, e.g. {{ FAMILY_NAME }}. "
    )
    answer_type = models.CharField(max_length=100,
                                   blank=False,
                                   null=False,
                                   choices=ANSWER_TYPES)
    group = models.ForeignKey(HouseholdMemberGroup, related_name='questions')
    batch = models.ForeignKey('Batch', related_name='batch_questions')
    module = models.ForeignKey("QuestionModule",
                               related_name="questions",
                               default='')

    class Meta:
        app_label = 'survey'
        unique_together = [
            ('identifier', 'batch'),
        ]

    def answers(self):
        return Answer.get_class(self.answer_type).objects.filter(question=self)

    def total_answers(
        self
    ):  #just utility to get number of times this question has been answered
        return Answer.get_class(
            self.answer_type).objects.filter(question=self).count()

    def is_loop_start(self):
        from survey.forms.logic import LogicForm
        return self.connecting_flows.filter(
            desc=LogicForm.BACK_TO_ACTION).exists(
            )  #actually the more correct way is to
        # check if the next is previous

    def is_loop_end(self):
        from survey.forms.logic import LogicForm
        return self.flows.filter(desc=LogicForm.BACK_TO_ACTION).exists(
        )  #actually the more correct way is
        #to check if connecting quest is asked after

    @property
    def loop_ender(self):
        try:
            from survey.forms.logic import LogicForm
            return self.connecting_flows.get(
                desc=LogicForm.BACK_TO_ACTION).question
        except QuestionFlow.DoesNotExist:
            inlines = self.batch.questions_inline()

    @property
    def looper_flow(self):
        #if self.is_loop_start() or self.is_loop_end():
        return self.batch.get_looper_flow(self)

    def loop_boundary(self):
        return self.batch.loop_back_boundaries().get(self.pk, None)

    # def loop_inlines(self):

    def delete(self, using=None):
        '''
        Delete related answers before deleting this object
        :param using:
        :return:
        '''
        answer_class = Answer.get_class(self.answer_type)
        answer_class.objects.filter(question=self).delete()
        return super(Question, self).delete(using=using)

    def display_text(self, channel=None):
        text = self.text
        if channel and channel == USSDAccess.choice_name(
        ) and self.answer_type == MultiChoiceAnswer.choice_name():
            extras = []
            #append question options
            for option in self.options.all().order_by('order'):
                extras.append(option.to_text)
            text = '%s\n%s' % (text, '\n'.join(extras))
        return text

    def next_question(self, reply):
        flows = self.flows.all()
        answer_class = Answer.get_class(self.answer_type)
        resulting_flow = None
        for flow in flows:
            if flow.validation_test:
                test_values = [arg.param for arg in flow.text_arguments]
                if getattr(answer_class,
                           flow.validation_test)(reply, *test_values) == True:
                    resulting_flow = flow
                    break
            else:
                resulting_flow = flow
        if resulting_flow:
            return resulting_flow.next_question

    def previous_inlines(self):
        inlines = self.batch.questions_inline()
        if self not in inlines:
            raise ValidationError('%s not inline' % self.identifier)
        previous = []
        for q in inlines:
            if q.identifier == self.identifier:
                break
            else:
                previous.append(q)
        return set(previous)

    def direct_sub_questions(self):
        from survey.forms.logic import LogicForm
        sub_flows = self.flows.filter(desc=LogicForm.SUBQUESTION_ACTION,
                                      validation_test__isnull=False)
        return OrderedSet([flow.next_question for flow in sub_flows])

    def conditional_flows(self):
        return self.flows.filter(validation_test__isnull=False)

    def preceeding_conditional_flows(self):
        return self.connecting_flows.filter(validation_test__isnull=False)

    def __unicode__(self):
        return "%s - %s: (%s)" % (self.identifier, self.text,
                                  self.answer_type.upper())

    def save(self, *args, **kwargs):
        if self.answer_type not in [
                MultiChoiceAnswer.choice_name(),
                MultiSelectAnswer.choice_name()
        ]:
            self.options.all().delete()
        return super(Question, self).save(*args, **kwargs)

    @classmethod
    def zombies(cls, batch):
        #these are the batch questions that do not belong to any flow in any way
        survey_questions = batch.survey_questions
        return batch.batch_questions.exclude(
            pk__in=[q.pk for q in survey_questions])

    def hierarchical_result_for(self, location_parent, survey):
        locations = location_parent.get_children().order_by('name')[:10]
        answers = self.multichoiceanswer.all()
        return self._format_answer(locations, answers, survey)

    def _format_answer(self, locations, answers, survey):
        question_options = self.options.all()
        data = OrderedDict()
        for location in locations:
            households = Household.all_households_in(location, survey)
            data[location] = {
                option.text: answers.filter(
                    value=option,
                    interview__householdmember__household__in=households).
                count()
                for option in question_options
            }
        return data
Esempio n. 28
0
 def total_answers(self):
     return Answer.get_class(
         self.answer_type).objects.filter(
         question=self).count()
Esempio n. 29
0
from django.db import models
from django.db.models import Max
from survey.models.householdgroups import HouseholdMemberGroup
from survey.models.locations import Location
from survey.models.surveys import Survey
from survey.models.base import BaseModel
from survey.utils.views_helper import get_descendants
from survey.models.questions import Question, QuestionFlow
from survey.models.access_channels import InterviewerAccess
# from survey.models.enumeration_area import EnumerationArea
from survey.models.interviews import AnswerAccessDefinition, Answer
from survey.models.access_channels import ODKAccess
from ordered_set import OrderedSet

ALL_GROUPS = HouseholdMemberGroup.objects.all()
ALL_ANSWERS = Answer.answer_types()
class Batch(BaseModel):
    order = models.PositiveIntegerField(max_length=2, null=True)
    name = models.CharField(max_length=100, blank=False, null=True)
    description = models.CharField(max_length=300, blank=True, null=True)
    survey = models.ForeignKey(Survey, null=True, related_name="batches")
#     eas = models.ManyToManyField(EnumerationArea, related_name='batches', null=True) #enumeration areas for which this Batch is open
#     group = models.ForeignKey("HouseholdMemberGroup", null=True, related_name="question_group")
    start_question = models.OneToOneField(Question, related_name='starter_batch', null=True, blank=True, on_delete=models.SET_NULL)
    BATCH_IS_OPEN_MESSAGE = "Batch cannot be deleted because it is open in %s."
    BATCH_HAS_ANSWERS_MESSAGE = "Batch cannot be deleted because it has responses."

    class Meta:
        app_label = 'survey'
        unique_together = [('survey', 'name',) ]
        
Esempio n. 30
0
 def answers(self):
     return Answer.get_class(self.answer_type).objects.filter(question=self)
Esempio n. 31
0
 def get_odk_constraint(self, test_question):
     answer_class = Answer.get_class(test_question.answer_type)
     return mark_safe(answer_class.print_odk_validation('.', self.validation_test,  *self.test_params))
Esempio n. 32
0
 def qs_passes_test(self, value_key, queryset):
     answer_class = Answer.get_class(self.listing_question.answer_type)
     method = getattr(answer_class, 'fetch_%s' % self.validation_test, None)
     return method(value_key, *self.test_params, qs=queryset)
Esempio n. 33
0
 def validate(self, value, test_question):
     answer_class = Answer.get_class(test_question.answer_type)
     method = getattr(answer_class, self.validation_test, None)
     return method(value, *self.test_params)
Esempio n. 34
0
 def validator_names(self):
     return [v.__name__ for v in Answer.get_class(self.answer_type).validators()]
Esempio n. 35
0
 def total_answers(self):
     return Answer.get_class(
         self.answer_type).objects.filter(
         question=self).count()
Esempio n. 36
0
from survey.models.base import BaseModel
from survey.utils.views_helper import get_descendants
from survey.models.questions import Question, QuestionFlow
from survey.forms.logic import LogicForm
from survey.models.access_channels import InterviewerAccess
# from survey.models.enumeration_area import EnumerationArea
from survey.models.interviews import AnswerAccessDefinition, Answer
from survey.models.access_channels import ODKAccess
from django.core.exceptions import ValidationError
from ordered_set import OrderedSet
from collections import OrderedDict
from cacheops import cached_as
from django.conf import settings

ALL_GROUPS = HouseholdMemberGroup.objects.all()
ALL_ANSWERS = Answer.answer_types()


class Batch(BaseModel):
    order = models.PositiveIntegerField(null=True)
    name = models.CharField(max_length=100,
                            blank=False,
                            null=True,
                            db_index=True)
    description = models.CharField(max_length=300, blank=True, null=True)
    survey = models.ForeignKey(Survey, null=True, related_name="batches")
    #     eas = models.ManyToManyField(EnumerationArea, related_name='batches', null=True) #enumeration areas for which this Batch is open
    #     group = models.ForeignKey("HouseholdMemberGroup", null=True, related_name="question_group")
    start_question = models.OneToOneField(Question,
                                          related_name='starter_batch',
                                          null=True,
Esempio n. 37
0
class QuestionFlow(CloneableMixin, BaseModel):
    VALIDATION_TESTS = [(validator.__name__, validator.__name__)
                        for validator in Answer.validators()]
    question = models.ForeignKey(Question, related_name='flows')
    question_type = models.CharField(max_length=100)
    validation = models.ForeignKey(ResponseValidation, null=True, blank=True, related_name='flows')
    # if validation passes, classify this flow response as having this value
    name = models.CharField(max_length=200, null=True, blank=True)
    # this would provide a brief description of this flow
    desc = models.CharField(max_length=200, null=True, blank=True)
    next_question = models.ForeignKey(
        Question,
        related_name='connecting_flows',
        null=True,
        blank=True,
        on_delete=models.SET_NULL)
    next_question_type = models.CharField(max_length=100)

    def params_display(self):
        return self.text_arguments.values_list('param', flat=True)

    @property
    def validation_test(self):
        if self.validation:
            return self.validation.validation_test

    @validation_test.setter
    def validation_test(self, test):
        if self.validation:
            self.validation.validation_test = test
            self.validation.save()
        else:
            self.validation = ResponseValidation.objects.create(validation_test=test)

    class Meta:
        app_label = 'survey'
        # unique_together = [('question', 'next_question', 'desc', ),]

    @property
    def test_params(self):
        if self.validation:
            return self.validation.test_params

    @property
    def text_arguments(self):
        if self.validation:
            return self.validation.text_arguments
        else:
            from survey.models import TextArgument
            return TextArgument.objects.none()

    @property
    def test_arguments(self):
        if self.validation:
            return self.validation.test_arguments
        else:
            from survey.models import TextArgument
            return TextArgument.objects.none()

    def save(self, *args, **kwargs):
        invalidate_obj(self.question)
        invalidate_obj(QuestionSet.get(pk=self.question.qset.pk))
        if self.next_question:
            invalidate_obj(self.next_question)
        return super(QuestionFlow, self).save(*args, **kwargs)
Esempio n. 38
0
 def prepped_args(self):
     answer_class = Answer.get_class(self.test_question.answer_type)
     return [answer_class.prep_value(val) for val in self.test_params]