예제 #1
0
    def import_csv(self):
        try:
            reader = csv.DictReader(
                self.cleaned_data['csv_file'],
                fieldnames=self.importer_class.Meta.fields,
                dialect=self.dialect,
            )

            reader_iter = enumerate(reader, 1)
            archives_id = -1 #No Archives
            if self.cleaned_data['has_headers']:
                six.advance_iterator(reader_iter)
            if self.cleaned_data['archives']:
                archives_id = self.cleaned_data['archives'].id
            old_archives = self.importer_class.Meta.model.objects.filter(archives_id=archives_id)
            if old_archives:
                if self.cleaned_data['archives_check']:
                    self.append_import_error(_("Defination already exist for this archives."))
                    raise CSVImportError()
                else:
                    old_archives.delete()
            self.process_csv(reader_iter,archives_id)
            if not self.is_valid():
                raise CSVImportError()  # Abort the transaction
        except csv.Error:
            self.append_import_error(_("Bad CSV format"))
            raise CSVImportError()
예제 #2
0
    def import_csv(self):
        try:
            reader = csv.DictReader(
                self.cleaned_data['csv_file'],
                fieldnames=self.importer_class._meta.fields,
                dialect=self.dialect,
            )

            reader_iter = enumerate(reader, 1)
            if self.cleaned_data['has_headers']:
                six.advance_iterator(reader_iter)

            self.process_csv(reader_iter)
            if not self.is_valid():
                raise CSVImportError()  # Abort the transaction
        except csv.Error:
            self.append_import_error(_("Bad CSV format"))
            raise CSVImportError()
예제 #3
0
    def import_csv(self):
        try:
            reader = csv.DictReader(
                self.cleaned_data['csv_file'],
                fieldnames=self.importer_class._meta.fields,
                dialect=self.dialect,
            )

            reader_iter = enumerate(reader, 1)
            if self.cleaned_data['has_headers']:
                six.advance_iterator(reader_iter)

            self.process_csv(reader_iter)
            if not self.is_valid():
                raise CSVImportError()  # Abort the transaction
        except csv.Error:
            self.append_import_error(_("Bad CSV format"))
            raise CSVImportError()
예제 #4
0
    def test_convert_fileline_to_dict(self):
        check_against_dict = {
            'city_id': '1',
            'city_name': 'Хмельницкий',
            'region_name': 'Хмельницкая область',
            'district_name': 'Центральная Украина',
            'longitude': '49.416668',
            'latitude': '27.000000'
        }

        backend = IpGeobase()
        generator = backend._line_to_dict(
            file=io.open(os.path.join(TEST_STATIC_DIR, 'cities.txt'), encoding=settings.IPGEOBASE_FILE_ENCODING),
            field_names=settings.IPGEOBASE_CITIES_FIELDS)
        result = six.advance_iterator(generator)
        self.assertEqual(result, check_against_dict)
예제 #5
0
def parse_tag_input(input):
    """
    Parses tag input, with multiple word input being activated and
    delineated by commas and double quotes. Quotes take precedence, so
    they may contain commas.

    Returns a sorted list of unique tag names.
    """
    if not input:
        return []

    input = force_text(input)

    # Special case - if there are no commas or double quotes in the
    # input, we don't *do* a recall... I mean, we know we only need to
    # split on spaces.
    if u',' not in input and u'"' not in input:
        words = list(set(split_strip(input, u' ')))
        words.sort()
        return words

    words = []
    buffer = []
    # Defer splitting of non-quoted sections until we know if there are
    # any unquoted commas.
    to_be_split = []
    saw_loose_comma = False
    open_quote = False
    i = iter(input)
    try:
        while 1:
            c = six.advance_iterator(i)
            if c == u'"':
                if buffer:
                    to_be_split.append(u''.join(buffer))
                    buffer = []
                # Find the matching quote
                open_quote = True
                c = six.advance_iterator(i)
                while c != u'"':
                    buffer.append(c)
                    c = six.advance_iterator(i)
                if buffer:
                    word = u''.join(buffer).strip()
                    if word:
                        words.append(word)
                    buffer = []
                open_quote = False
            else:
                if not saw_loose_comma and c == u',':
                    saw_loose_comma = True
                buffer.append(c)
    except StopIteration:
        # If we were parsing an open quote which was never closed treat
        # the buffer as unquoted.
        if buffer:
            if open_quote and u',' in buffer:
                saw_loose_comma = True
            to_be_split.append(u''.join(buffer))
    if to_be_split:
        if saw_loose_comma:
            delimiter = u','
        else:
            delimiter = u' '
        for chunk in to_be_split:
            words.extend(split_strip(chunk, delimiter))
    words = list(set(words))
    words.sort()
    return words