Exemple #1
0
 def __init__(self, *args, **kwargs):
     is_test = kwargs.pop('is_test')
     self.question = kwargs.pop('question')
     item_question = kwargs.pop('item_question')
     question_property = kwargs.pop('question_property')
     self.feedbacks = kwargs.pop('feedbacks')
     super().__init__(*args, **kwargs)
     self.fields['scale_value'].empty_label = None
     if self.question.rating_comment == self.question.RATING_COMMENT_NONE:
         self.fields['comment'].widget = forms.HiddenInput()
     elif self.question.rating_comment == self.question.RATING_COMMENT_REQUIRED:
         self.fields['comment'].label = self.study.comment_label
         self.fields['comment'].required = True
     else:
         self.fields['comment'].label = '{} ({})'.format(self.study.comment_label, self.study.optional_label)
     self.fields['question'].initial = self.question.number
     scale_value = self.fields.get('scale_value')
     scale_value.queryset = scale_value.queryset.filter(question=self.question)
     scale_value.label = (
         item_question.question if item_question and item_question.question else self.question.question
     )
     scale_value.help_text = item_question.legend if item_question and item_question.legend else self.question.legend
     choices = scale_value.choices
     if item_question and item_question.scale_labels:
         custom_choices = []
         for (pk, _), custom_label in zip(scale_value.choices, split_list_string(item_question.scale_labels)):
             custom_choices.append((pk, custom_label))
         choices = custom_choices
     if question_property and question_property.scale_order:
         reordered_choices = []
         for pos in question_property.scale_order.split(','):
             reordered_choices.append(list(choices)[int(pos)])
         choices = reordered_choices
     scale_value.choices = choices
Exemple #2
0
 def save_form(self, form, number):
     form.instance.study = self.study
     form.instance.number = number
     super().save_form(form, number)
     scale_values = []
     number_changed = False
     scale_values_old_count = form.instance.scale_values.all().count()
     scale_labels = split_list_string(form.cleaned_data['scale_labels'])
     scale_values_count = len(scale_labels)
     if scale_values_old_count > scale_values_count:
         n_deleted_scales = scale_values_old_count - scale_values_count
         form.instance.scale_values.all()[n_deleted_scales:].delete()
         number_changed = True
     for j, scale_label in enumerate(scale_labels):
         if scale_label:
             scale_value, created = models.ScaleValue.objects.get_or_create(
                 number=j,
                 question=form.instance,
             )
             scale_value.label = scale_label
             scale_values.append(scale_value)
             if created:
                 number_changed = True
     if number_changed and self.study.has_item_questions:
         self._invalidate_materials_items()
     for scale_value in scale_values:
         scale_value.save()
Exemple #3
0
 def clean_scale_labels(self):
     data = self.cleaned_data['scale_labels']
     scale_labels = split_list_string(data)
     if len(scale_labels) != self.question.scale_values.count():
         raise ValidationError(
             'Invalid scale label number. Must match the original ???')
     return data
Exemple #4
0
 def check_upload_form(self, reader, cleaned_data):
     min_columns = contrib_csv.get_min_columns(cleaned_data)
     for row in reader:
         if not row:
             continue
         assert len(row) >= min_columns
         int(row[cleaned_data['number_column'] - 1])
         assert row[cleaned_data['condition_column'] - 1]
         assert len(row[cleaned_data['condition_column'] - 1]) <= 16
         assert row[cleaned_data['content_column'] - 1]
         if self.study.has_audiolink_items:
             validate_urls(row[cleaned_data['content_column'] - 1])
         if cleaned_data['block_column'] > 0:
             int(row[cleaned_data['block_column'] - 1])
         for question in self.study.questions.all():
             if cleaned_data['question_{}_question_column'.format(
                     question.number + 1)] > 0:
                 assert row[cleaned_data['question_{}_question_column'.
                                         format(question.number + 1)] - 1]
             if cleaned_data['question_{}_scale_column'.format(
                     question.number + 1)] > 0:
                 assert row[cleaned_data['question_{}_scale_column'.format(
                     question.number + 1)] - 1]
                 scale_values = split_list_string(
                     row[cleaned_data['question_{}_scale_column'.format(
                         question.number + 1)] - 1])
                 assert len(scale_values) == question.scale_values.count()
                 assert all(
                     len(scale_value) <=
                     study_models.ScaleValue.LABEL_MAX_LENGTH
                     for scale_value in scale_values)
             if cleaned_data['question_{}_legend_column'.format(
                     question.number + 1)] > 0:
                 assert row[cleaned_data['question_{}_legend_column'.format(
                     question.number + 1)] - 1]
Exemple #5
0
 def clean(self):
     data = super().clean()
     question = data.get('question')
     scale_values = split_list_string(data.get('scale_values'))
     if not all(
             question.is_valid_scale_value(scale_value)
             for scale_value in scale_values):
         raise ValidationError('Invalid scale values')
Exemple #6
0
 def validate(self, value):
     super().validate(value)
     value_list = utils.split_list_string(value)
     if not len(value_list) > 1:
         raise forms.ValidationError('At least two values must be entered.')
     if any(
             len(value) > models.ScaleValue.LABEL_MAX_LENGTH
             for value in value_list):
         raise forms.ValidationError(
             'A scale value is too long. Limit: {} characters.'.format(
                 models.ScaleValue.LABEL_MAX_LENGTH))
Exemple #7
0
 def show_feedback(self, scale_value):
     return scale_value.label in split_list_string(self.scale_values)
Exemple #8
0
    def validate_items(self):
        warnings = []
        conditions = []
        self.set_items_validated(False)

        items = self.items.prefetch_related('materials', 'textitem',
                                            'markdownitem', 'audiolinkitem',
                                            'item_questions')
        items = list(items.all())
        if len(items) == 0:
            raise AssertionError('No items.')

        for item in items:
            if item.condition not in conditions:
                conditions.append(item.condition)
            else:
                break

        n_items = len(items)
        if self.item_list_distribution == self.LIST_DISTRIBUTION_LATIN_SQUARE:
            if n_items % self.condition_count != 0:
                msg = 'Number of stimuli is not a multiple of the number of conditions (stimuli: {}, conditions: {})'.format(
                    n_items, ', '.join('"{}"'.format(condition)
                                       for condition in conditions))
                raise AssertionError(msg)

        questions = list(self.study.questions.all())
        item_number = 0
        for i, item in enumerate(items):
            if self.study.has_text_items:
                if not item.textitem.text:
                    raise AssertionError('Item {} has no text.'.format(item))
            elif self.study.has_markdown_items:
                if not item.markdownitem.text:
                    raise AssertionError('Item {} has no text.'.format(item))
            elif self.study.has_audiolink_items:
                if not item.audiolinkitem.urls:
                    raise AssertionError('Item {} has no URLs.'.format(item))

            if i % self.condition_count == 0:
                item_number += 1
            if item.number != item_number or item.condition != conditions[
                    i % self.condition_count]:
                msg = 'Item "{}" was not expected. Check whether item number/condition is correct.'.format(
                    item)
                raise AssertionError(msg)

            for item_question in item.item_questions.all():
                if item_question.number >= len(questions):
                    raise AssertionError(
                        'For item question validation the study question(s) must be defined first.'
                    )
                if item_question.scale_labels and \
                        len(split_list_string(item_question.scale_labels)) !=  \
                        questions[item_question.number].scale_values.count():
                    msg = 'Scale of the item question "{}" does not match the study question {} ' \
                          'scale.'.format(item, item_question.number + 1)
                    raise AssertionError(msg)

        if self.study.has_text_items or self.study.has_markdown_items:
            if self.study.has_text_items:
                items_by_text = groupby(items, lambda x: x.textitem.text)
            else:
                items_by_text = groupby(items, lambda x: x.markdownitem.text)
            for _, items_with_same_text in items_by_text:
                items = list(items_with_same_text)
                if len(items) > 1:
                    warnings.append('Items {} have the same text.'.format(
                        self._warn_items_string(items)))
        elif self.study.has_audiolink_items:
            item_links = []
            for item in items:
                item_links.extend([(url, item)
                                   for url in item.audiolinkitem.urls_list])
            items_by_link = groupby(item_links, lambda x: x[0])
            for _, items_with_same_link in items_by_link:
                item_list = [
                    item_link[1] for item_link in items_with_same_link
                ]
                if item_list:
                    items = set(item_list)
                    if len(items) > 1:
                        warnings.append('Items {} have the same URL.'.format(
                            self._warn_items_string(items)))
                    item_counter = Counter(item_list)
                    duplicate_items = [
                        item for item in item_counter if item_counter[item] > 1
                    ]
                    if duplicate_items:
                        warnings.append(
                            'Items {} use the same URL multiple times.'.format(
                                self._warn_items_string(duplicate_items)))
        msg = 'Detected {} items with following conditions: {} (sum: {} stimuli).'.format(
            item_number,
            ', '.join('"{}"'.format(condition) for condition in conditions),
            n_items,
        )
        warnings.append(msg)
        self.items_validated = True
        self.save()
        self.create_item_lists()
        return warnings