示例#1
0
    def before_import(self,
                      dataset,
                      result,
                      using_transactions,
                      dry_run=True,
                      **kwargs):
        session_no = 1
        if len(dataset) > 1:
            prev_exam_name = dataset[0][1]
            prev_figure = dataset[0][4]
        for row in dataset:
            exam_name = row[1]
            q_category = row[2]
            q_subcategory = row[3]
            figure = row[4]
            exam, crt = ELearning.objects.get_or_create(
                name=exam_name, exam_type=Exam.ELEARNING)
            if not figure == 'n' and prev_figure == 'n':
                session_no += 1
            if not prev_exam_name == exam_name:
                session_no = 1
            prev_exam_name = exam_name
            prev_figure = figure
            session, crt = ELearningSession.objects.get_or_create(
                elearning=exam, number=session_no)
            if not figure == 'n':
                s, crt = Slide.objects.get_or_create(elearning=exam,
                                                     image=figure)
                if crt:
                    session.slides.add(s)
                    session.save()
            else:
                q_text = row[5]
                q_explanation = row[6]
                correct_answer_text = row[7]
                wrong_1 = row[8]
                wrong_2 = row[9]
                wrong_3 = row[10]
                q, crt = Question.objects.get_or_create(exam=exam, text=q_text)
                if crt:
                    session.questions.add(q)
                    session.save()
                    q.explanation = q_explanation
                    q.text = q_text
                    q.category = q_category
                    q.subcategory = q_subcategory
                    q.save()
                    Answer.objects.create(question=q,
                                          text=correct_answer_text,
                                          correct=True)
                    Answer.objects.create(question=q, text=wrong_1)
                    Answer.objects.create(question=q, text=wrong_2)
                    Answer.objects.create(question=q, text=wrong_3)

        return Result()
class ResultTest(TestCase):
    def setUp(self):
        self.result = Result()
        headers = ['id', 'book_name']
        rows = [(1, 'Some book')]
        self.dataset = Dataset(*rows, headers=headers)

    def test_add_dataset_headers(self):
        target = ['Error']
        self.result.add_dataset_headers([])
        self.assertEqual(target, self.result.failed_dataset.headers)

    def test_result_append_failed_row_with_ValidationError(self):
        target = [[1, 'Some book', "['some error']"]]
        self.result.append_failed_row(self.dataset.dict[0],
                                      ValidationError('some error'))
        self.assertEqual(target, self.result.failed_dataset.dict)

    def test_result_append_failed_row_with_wrapped_error(self):
        target = [[1, 'Some book', "['some error']"]]
        row_result = RowResult()
        error = Error(ValidationError('some error'))
        row_result.errors = [error]
        self.result.append_failed_row(self.dataset.dict[0],
                                      row_result.errors[0])
        self.assertEqual(target, self.result.failed_dataset.dict)
示例#3
0
    def import_file(self, request, resource_class, skip_update, filename=None):
        """Imports data from file into database."""

        input_file = self.get_file(request, filename)
        dataset, error_msg = self.file_to_dataset(input_file)
        logger.debug('dataset %s', dataset)
        if dataset is None:
            result = Result()
        else:
            resource = self.initialize_resource(resource_class, skip_update)
            result = self.dataset_to_database(resource, dataset)

        return result, error_msg
示例#4
0
    def test_log_entry_is_added(self):
        initial_count = Journal.objects.count()

        user = AidantFactory(is_staff=True, is_superuser=True)
        factory = RequestFactory()
        request = factory.get("/")
        request.user = user

        result = Result()

        a = CarteTOTPAdmin(CarteTOTP, self.admin)
        a.generate_log_entries(result, request)

        self.assertEqual(Journal.objects.count(), 1 + initial_count)
    def test_adapt_context(self):
        result = Result()
        result.totals['new'] = 1
        result.totals['update'] = 2
        result.totals['delete'] = 3
        result.totals['skip'] = 4
        result.totals['error'] = 5
        result.totals['invalid'] = 6
        error_msg = "lalala"
        context = self.adapt_context(result, error_msg)

        self.assertEqual(
            context, {
                'new_rows': 1,
                'update_rows': 2,
                'delete_rows': 3,
                'skip_rows': 4,
                'error_rows': 5,
                'invalid_rows': 6,
                'error_msg': error_msg
            })
class ResultTest(TestCase):
    def setUp(self):
        self.result = Result()
        headers = ['id', 'book_name']
        rows = [(1, 'Some book')]
        self.dataset = Dataset(*rows, headers=headers)

    def test_add_dataset_headers(self):
        target = ['Error']
        self.result.add_dataset_headers([])
        self.assertEqual(target, self.result.failed_dataset.headers)

    def test_result_append_failed_row_with_ValidationError(self):
        target = [[1, 'Some book', "['some error']"]]
        self.result.append_failed_row(self.dataset.dict[0],
                                      ValidationError('some error'))
        self.assertEqual(target, self.result.failed_dataset.dict)

    def test_result_append_failed_row_with_wrapped_error(self):
        target = [[1, 'Some book', "['some error']"]]
        row_result = RowResult()
        error = Error(ValidationError('some error'))
        row_result.errors = [error]
        self.result.append_failed_row(self.dataset.dict[0],
                                      row_result.errors[0])
        self.assertEqual(target, self.result.failed_dataset.dict)

    def test_add_instance_info_null_instance(self):
        row_result = RowResult()
        row_result.add_instance_info(None)
        self.assertEqual(None, row_result.object_id)
        self.assertEqual(None, row_result.object_repr)

    def test_add_instance_info_no_instance_pk(self):
        row_result = RowResult()
        row_result.add_instance_info(Book())
        self.assertEqual(None, row_result.object_id)
        self.assertEqual("", row_result.object_repr)

    def test_add_instance_info(self):
        class BookWithObjectRepr(Book):
            def __str__(self):
                return self.name

        row_result = RowResult()
        row_result.add_instance_info(BookWithObjectRepr(pk=1,
                                                        name="some book"))
        self.assertEqual(1, row_result.object_id)
        self.assertEqual("some book", row_result.object_repr)
 def setUp(self):
     self.result = Result()
     headers = ['id', 'book_name']
     rows = [(1, 'Some book')]
     self.dataset = Dataset(*rows, headers=headers)
示例#8
0
 def test_ResultType(self):
     x = Result()
     self.assertEqual(co.ResultType(x), None)
     x = 'aaa'
     self.assertRaises(ValueError, co.ResultType, x)
示例#9
0
    def import_data(self,
                    dataset,
                    dry_run=False,
                    raise_errors=False,
                    use_transactions=None,
                    **kwargs):
        result = Result()
        result.diff_headers = self.get_diff_headers()
        headers = [storehouse.country for storehouse in self.storehouses]
        headers.insert(0, 'Fabric')
        result.diff_headers = headers

        result.totals = OrderedDict([(RowResult.IMPORT_TYPE_NEW, 0),
                                     (RowResult.IMPORT_TYPE_UPDATE, 0),
                                     (RowResult.IMPORT_TYPE_DELETE, 0),
                                     (RowResult.IMPORT_TYPE_SKIP, 0),
                                     (RowResult.IMPORT_TYPE_ERROR, 0),
                                     ('total', len(dataset))])

        result.totals['total'] = len(dataset)

        if use_transactions is None:
            use_transactions = self.get_use_transactions()

        if use_transactions is True:
            real_dry_run = False
            sp1 = savepoint()
        else:
            real_dry_run = dry_run

        try:
            self.before_import(dataset, real_dry_run, **kwargs)
        except Exception as e:
            logging.exception(e)
            tb_info = traceback.format_exc()
            result.base_errors.append(Error(e, tb_info))
            if raise_errors:
                if use_transactions:
                    savepoint_rollback(sp1)
                raise

        fabric_dict = {x.code: x for x in self.get_queryset()}

        for row in dataset.dict:
            try:
                row_result = RowResult()
                code = row['Fabric']
                if not Fabric.is_valid_code(code):
                    continue
                try:
                    instance = fabric_dict[row['Fabric']]
                    new = False
                except KeyError:
                    instance = self._meta.model(code=row['Fabric'])
                    new = True
                instance.residuals_set = row
                if new:
                    row_result.import_type = RowResult.IMPORT_TYPE_NEW
                else:
                    row_result.import_type = RowResult.IMPORT_TYPE_UPDATE
                row_result.new_record = new
                original = deepcopy(instance)
                if self.for_delete(row, instance):
                    if new:
                        row_result.import_type = RowResult.IMPORT_TYPE_SKIP
                        row_result.diff = self.get_diff(
                            None, None, real_dry_run)
                    else:
                        row_result.import_type = RowResult.IMPORT_TYPE_DELETE
                        self.delete_instance(instance, real_dry_run)
                        row_result.diff = self.get_diff(
                            original, None, real_dry_run)
                else:
                    if not real_dry_run:
                        with transaction.atomic():
                            self.save_instance(instance, real_dry_run)
                    row_result.object_repr = force_text(instance)
                    row_result.object_id = instance.pk
                    result.totals[row_result.import_type] += 1
                    row_result.diff = self.get_diff(instance, instance,
                                                    real_dry_run)
            except Exception as e:
                # There is no point logging a transaction error for each row
                # when only the original error is likely to be relevant
                if not isinstance(e, TransactionManagementError):
                    logging.exception(e)
                tb_info = traceback.format_exc()
                row_result.errors.append(Error(e, tb_info, row))
                result.totals[row_result.IMPORT_TYPE_ERROR] += 1
                if raise_errors:
                    if use_transactions:
                        savepoint_rollback(sp1)
                    six.reraise(*sys.exc_info())
            if (row_result.import_type != RowResult.IMPORT_TYPE_SKIP
                    or self._meta.report_skipped):
                result.rows.append(row_result)

        # Reset the SQL sequences when new objects are imported
        # Adapted from django's loaddata
        if not dry_run and any(r.import_type == RowResult.IMPORT_TYPE_NEW
                               for r in result.rows):
            connection = connections[DEFAULT_DB_ALIAS]
            sequence_sql = connection.ops.sequence_reset_sql(
                no_style(), [self.Meta.model])
            if sequence_sql:
                with connection.cursor() as cursor:
                    for line in sequence_sql:
                        cursor.execute(line)

        if use_transactions:
            if dry_run or result.has_errors():
                savepoint_rollback(sp1)
            else:
                savepoint_commit(sp1)
        result.rows.sort(key=lambda x: x.new_record, reverse=True)
        return result