Example #1
0
def cache_first_rows(import_file, parser):
    """Cache headers, and rows 2-6 for validation/viewing.

    :param import_file: ImportFile inst.
    :param parser: unicode-csv.Reader instance.

    Unfortunately, this is duplicated logic from data_importer,
    but since data_importer makes many faulty assumptions we need to do
    it differently.

    """
    parser.seek_to_beginning()
    rows = parser.next()

    validation_rows = []
    for i in range(5):
        row = rows.next()
        if row:
            validation_rows.append(row)

    import_file.cached_second_to_fifth_row = "\n".join(
        [
            ROW_DELIMITER.join(map(lambda x: str(x), r.values()))
            for r in validation_rows
        ]
    )
    first_row = rows.next().keys()
    if first_row:
        first_row = ROW_DELIMITER.join(first_row)
    import_file.cached_first_row = first_row or ''

    import_file.save()
    # Reset our file pointer for mapping.
    parser.seek_to_beginning()
Example #2
0
def cache_first_rows(import_file, parser):
    """Cache headers, and rows 2-6 for validation/viewing.

    :param import_file: ImportFile inst.
    :param parser: unicode-csv.Reader instance.

    Unfortunately, this is duplicated logic from data_importer,
    but since data_importer makes many faulty assumptions we need to do
    it differently.

    """
    parser.seek_to_beginning()
    rows = parser.next()

    validation_rows = []
    for i in range(5):
        row = rows.next()
        if row:
            validation_rows.append(row)

    import_file.cached_second_to_fifth_row = "\n".join(
        [
            ROW_DELIMITER.join(map(lambda x: str(x), r.values()))
            for r in validation_rows
        ]
    )
    first_row = rows.next().keys()
    if first_row:
        first_row = ROW_DELIMITER.join(first_row)
    import_file.cached_first_row = first_row or ''

    import_file.save()
    # Reset our file pointer for mapping.
    parser.seek_to_beginning()
Example #3
0
def cache_first_rows(import_file, parser):
    """Cache headers, and rows 2-6 for validation/viewing.

    :param import_file: ImportFile inst.
    :param parser: unicode-csv.Reader instance.

    Unfortunately, this is duplicated logic from data_importer,
    but since data_importer makes many faulty assumptions we need to do
    it differently.

    """
    parser.seek_to_beginning()
    rows = parser.next()

    validation_rows = []
    for i in range(5):
        try:
            row = rows.next()
            if row:
                validation_rows.append(row)
        except StopIteration:
            """Less than 5 rows in file"""
            break

    # This is a fix for issue #24 to use original field order when importing
    # This is ultimately not the correct place for this fix.  The correct fix
    # is to update the mcm code to a newer version where the readers in mcm/reader.py
    # have a headers() function defined and then just do
    # first_row = parser.headers()
    # But until we can patch the mcm code this should fix the issue.
    local_reader = parser.reader
    if isinstance(local_reader, reader.ExcelParser):
        first_row = local_reader.sheet.row_values(local_reader.header_row)
    elif isinstance(local_reader, reader.CSVParser):
        first_row = local_reader.csvreader.fieldnames
        first_row = [local_reader._clean_super(x) for x in first_row]
    else:
        # default to the original behavior if a new type of parser for lack of anything better
        first_row = rows.next().keys()

    tmp = []
    for r in validation_rows:
        tmp.append(ROW_DELIMITER.join([str(r[x]) for x in first_row]))

    import_file.cached_second_to_fifth_row = "\n".join(tmp)

    if first_row:
        first_row = ROW_DELIMITER.join(first_row)
    import_file.cached_first_row = first_row or ''

    import_file.save()
    # Reset our file pointer for mapping.
    parser.seek_to_beginning()
Example #4
0
def cache_first_rows(import_file, parser):
    """Cache headers, and rows 2-6 for validation/viewing.

    :param import_file: ImportFile inst.
    :param parser: unicode-csv.Reader instance.

    Unfortunately, this is duplicated logic from data_importer,
    but since data_importer makes many faulty assumptions we need to do
    it differently.

    """
    parser.seek_to_beginning()
    rows = parser.next()

    validation_rows = []
    for i in range(5):
        try:
            row = rows.next()
            if row:
                validation_rows.append(row)
        except StopIteration:
            """Less than 5 rows in file"""
            break

    #This is a fix for issue #24 to use original field order when importing
    #This is ultimately not the correct place for this fix.  The correct fix 
    #is to update the mcm code to a newer version where the readers in mcm/reader.py
    #have a headers() function defined and then just do
    #first_row = parser.headers()
    #But until we can patch the mcm code this should fix the issue.
    local_reader = parser.reader
    if isinstance(local_reader, reader.ExcelParser):
        first_row = local_reader.sheet.row_values(local_reader.header_row)
    elif isinstance(local_reader, reader.CSVParser):
        first_row = local_reader.csvreader.fieldnames
        first_row = [local_reader._clean_super(x) for x in first_row]
    else:
        #default to the original behavior if a new type of parser for lack of anything better
        first_row = rows.next().keys()

    tmp = []
    for r in validation_rows:
        tmp.append(ROW_DELIMITER.join([str(r[x]) for x in first_row]))

    import_file.cached_second_to_fifth_row = "\n".join(tmp)

    if first_row:
        first_row = ROW_DELIMITER.join(first_row)
    import_file.cached_first_row = first_row or ''

    import_file.save()
    # Reset our file pointer for mapping.
    parser.seek_to_beginning()
Example #5
0
def cache_first_rows(import_file, parser):
    """Cache headers, and rows 2-6 for validation/viewing.

    :param import_file: ImportFile inst.
    :param parser: unicode-csv.Reader instance.

    Unfortunately, this is duplicated logic from data_importer,
    but since data_importer makes many faulty assumptions we need to do
    it differently.

    """
    parser.seek_to_beginning()
    rows = parser.next()

    validation_rows = []
    for i in range(5):
        try:
            row = rows.next()
            if row:
                validation_rows.append(row)
        except StopIteration:
            """Less than 5 rows in file"""
            break

    # return the first row of the headers which are cleaned
    first_row = parser.headers()

    tmp = []
    for r in validation_rows:
        tmp.append(ROW_DELIMITER.join([str(r[x]) for x in first_row]))

    import_file.cached_second_to_fifth_row = "\n".join(tmp)

    if first_row:
        first_row = ROW_DELIMITER.join(first_row)
    import_file.cached_first_row = first_row or ''

    import_file.save()

    # Reset our file pointer for mapping.
    parser.seek_to_beginning()