def passes_test(self, value): answer_class = Answer.get_class(self.listing_question.answer_type) method = getattr(answer_class, self.validation_test, None) if method is None: raise ValueError( 'unsupported validator defined on listing question') return method(value, *self.test_params)
def passes_test(self, value): answer_class = Answer.get_class(self.listing_question.answer_type) method = getattr(answer_class, self.validation_test, None) if method not in answer_class.validators(): raise ValueError( 'unsupported validator defined on listing question') return method(value, *self.test_params)
def get_display_label(self): survey = self.survey naming_label = survey.random_sample_label interview = self.interview # get the exact answer type pattern = '{{ *([0-9a-zA-Z_]+) *}}' identifiers = re.findall(pattern, naming_label) listing_form = survey.preferred_listing.listing_form if survey.preferred_listing else survey.listing_form questions = listing_form.questions.filter(identifier__in=identifiers) context = {} for question in questions: answer_class = Answer.get_class(question.answer_type) try: answer = answer_class.objects.get(interview=interview, question=question) context[question.identifier] = answer.value except answer_class.DoesNotExist: pass question_context = template.Context(context) label = template.Template(html.escape(naming_label)).render(question_context) # now if label happens to be empty, just use the first response as label if not label: try: label = interview.answer.first().as_text except: pass return label or interview.question_set.name
def reply(self, question): if self.belongs_to(question.group): answer_class = Answer.get_class(question.answer_type) answers = answer_class.objects.filter(interview__householdmember=self, question=question) if answers.exists(): reply = unicode(answers[0].to_text()) return string.capwords(reply) return ''
def reply(self, question): if self.belongs_to(question.group): answer_class = Answer.get_class(question.answer_type) answers = answer_class.objects.filter( interview__householdmember=self, question=question) if answers.exists(): reply = unicode(answers[0].to_text()) return string.capwords(reply) return ''
def delete(self, using=None): ''' Delete related answers before deleting this object :param using: :return: ''' answer_class = Answer.get_class(self.answer_type) answer_class.objects.filter(question=self).delete() return super(Question, self).delete(using=using)
def next_question(self, reply): flows = self.flows.all() answer_class = Answer.get_class(self.answer_type) resulting_flow = None for flow in flows: if flow.validation_test: test_values = [arg.param for arg in flow.text_arguments] if getattr(answer_class, flow.validation_test)(reply, *test_values) == True: resulting_flow = flow break else: resulting_flow = flow if resulting_flow: return resulting_flow.next_question
def next_question(self, reply): flows = self.flows.all() answer_class = Answer.get_class(self.answer_type) resulting_flow = None for flow in flows: if flow.validation_test: test_values = [arg.param for arg in flow.text_arguments] if getattr(answer_class, flow.validation_test)(reply, *test_values) is True: resulting_flow = flow break else: resulting_flow = flow if resulting_flow and resulting_flow.next_question: # better for it to know who it is return Question.get(id=resulting_flow.next_question.id)
def generate_random_samples(cls, from_survey, to_survey, ea): """ Used to generate random samples from listing conducted by from_survey to be used by to_survey to do: optimize this method queries :param from_survey: Survey from which listing was done :param to_survey: Survey for whom the random sample is being generated. :param ea: the EA where the survey was conducted :return: None """ if cls.samples(to_survey, ea).exists(): raise cls.SamplesAlreadyGenerated('Samples already generated') if to_survey.has_sampling is False or from_survey.has_sampling is False: raise ValueError('Either source or destination survey does not support sampling') valid_interviews = from_survey.interviews.filter(ea=ea, # the listed interviews in the ea question_set=from_survey.listing_form).values_list('id', flat=True) #valid_interviews = set(valid_interviews) # now get the interviews that meet the randomization criteria for criterion in to_survey.randomization_criteria.all( ): # need to optimize this answer_type = criterion.listing_question.answer_type if answer_type == MultiChoiceAnswer.choice_name(): value_key = 'value__text' else: value_key = 'value' answer_class = Answer.get_class(answer_type) kwargs = { 'question': criterion.listing_question, 'interview__id__in': valid_interviews, } # if qs: # kwargs['interview__id__in'] = valid_interviews valid_interviews = criterion.qs_passes_test(value_key, answer_class.objects.filter( **kwargs). only('interview__id').values_list('interview__id', flat=True)) valid_interviews = list(valid_interviews) random.shuffle(valid_interviews) random_samples = valid_interviews[:to_survey.sample_size] samples = [] for interview_id in random_samples: samples.append( ListingSample( survey=to_survey, interview_id=interview_id)) with transaction.atomic(): ListingSample.objects.bulk_create(samples)
def next_question(self, reply): flows = self.flows.all() answer_class = Answer.get_class(self.answer_type) resulting_flow = None for flow in flows: if flow.validation_test: test_values = [arg.param for arg in flow.text_arguments] if getattr( answer_class, flow.validation_test)( reply, *test_values) is True: resulting_flow = flow break else: resulting_flow = flow if resulting_flow and resulting_flow.next_question: # better for it to know who it is return Question.get(id=resulting_flow.next_question.id)
def _get_possible_samples(): if to_survey.has_sampling is False or from_survey.has_sampling is False: raise ValueError( 'Either source or destination survey does not support sampling' ) valid_interviews = from_survey.interviews.filter( ea=ea, # the listed interviews in the ea question_set=from_survey.listing_form).values_list('id', flat=True) #valid_interviews = set(valid_interviews) # now get the interviews that meet the randomization criteria for criterion in to_survey.randomization_criteria.all( ): # need to optimize this answer_type = criterion.listing_question.answer_type if answer_type == MultiChoiceAnswer.choice_name(): value_key = 'value__text' else: value_key = 'value' answer_class = Answer.get_class(answer_type) kwargs = { 'question': criterion.listing_question, 'interview__id__in': valid_interviews, } # if qs: # kwargs['interview__id__in'] = valid_interviews valid_interviews = criterion.qs_passes_test( value_key, answer_class.objects.filter( **kwargs).only('interview__id').values_list( 'interview__id', flat=True)) valid_interviews = list(valid_interviews) random.shuffle(valid_interviews) random_samples = valid_interviews[:to_survey.sample_size] samples = [] for interview_id in random_samples: samples.append( ListingSample(survey=to_survey, interview_id=interview_id)) return samples
def total_answers(self): return Answer.get_class( self.answer_type).objects.filter( question=self).count()
def validator_names(self): return [v.__name__ for v in Answer.get_class(self.answer_type).validators()]
def qs_passes_test(self, value_key, queryset): answer_class = Answer.get_class(self.listing_question.answer_type) method = getattr(answer_class, 'fetch_%s' % self.validation_test, None) return method(value_key, *self.test_params, qs=queryset)
def get_odk_constraint(self, test_question): answer_class = Answer.get_class(test_question.answer_type) return mark_safe(answer_class.print_odk_validation('.', self.validation_test, *self.test_params))
def validators(self): return Answer.get_class(self.answer_type).validators()
def answers(self): return Answer.get_class(self.answer_type).objects.filter(question=self)
def total_answers( self ): #just utility to get number of times this question has been answered return Answer.get_class( self.answer_type).objects.filter(question=self).count()
def validate(self, value, test_question): answer_class = Answer.get_class(test_question.answer_type) method = getattr(answer_class, self.validation_test, None) return method(value, *self.test_params)
def prepped_args(self): answer_class = Answer.get_class(self.test_question.answer_type) return [answer_class.prep_value(val) for val in self.test_params]