示例#1
0
    def test_get_column_mappings(self):
        """We produce appropriate data structure for mapping"""
        expected = dict(
            sorted(
                [
                    (u"example_9", u"mapped_9"),
                    (u"example_8", u"mapped_8"),
                    (u"example_7", u"mapped_7"),
                    (u"example_6", u"mapped_6"),
                    (u"example_5", u"mapped_5"),
                    (u"example_4", u"mapped_4"),
                    (u"example_3", u"mapped_3"),
                    (u"example_2", u"mapped_2"),
                    (u"example_1", u"mapped_1"),
                    (u"example_0", u"mapped_0"),
                ]
            )
        )
        org = Organization.objects.create()

        raw = []
        mapped = []
        for x in range(10):
            raw.append(seed_models.Column.objects.create(column_name="example_{0}".format(x), organization=org))
            mapped.append(seed_models.Column.objects.create(column_name="mapped_{0}".format(x), organization=org))

        for x in range(10):
            column_mapping = seed_models.ColumnMapping.objects.create(super_organization=org)

            column_mapping.column_raw.add(raw[x])
            column_mapping.column_mapped.add(mapped[x])

        test_mapping, _ = seed_models.get_column_mappings(org)
        self.assertDictEqual(test_mapping, expected)
    def test_get_column_mappings(self):
        """We produce appropriate data structure for mapping"""
        expected = dict(
            sorted([(u'example_9', u'mapped_9'), (u'example_8', u'mapped_8'),
                    (u'example_7', u'mapped_7'), (u'example_6', u'mapped_6'),
                    (u'example_5', u'mapped_5'), (u'example_4', u'mapped_4'),
                    (u'example_3', u'mapped_3'), (u'example_2', u'mapped_2'),
                    (u'example_1', u'mapped_1'), (u'example_0', u'mapped_0')]))
        org = Organization.objects.create()

        raw = []
        mapped = []
        for x in range(10):
            raw.append(
                seed_models.Column.objects.create(
                    column_name='example_{0}'.format(x), organization=org))
            mapped.append(
                seed_models.Column.objects.create(
                    column_name='mapped_{0}'.format(x), organization=org))

        for x in range(10):
            column_mapping = seed_models.ColumnMapping.objects.create(
                super_organization=org, )

            column_mapping.column_raw.add(raw[x])
            column_mapping.column_mapped.add(mapped[x])

        test_mapping, _ = seed_models.get_column_mappings(org)
        self.assertDictEqual(test_mapping, expected)
示例#3
0
    def test_get_column_mappings(self):
        """We produce appropriate data structure for mapping"""
        expected = dict(sorted([
            (u'example_9', u'mapped_9'),
            (u'example_8', u'mapped_8'),
            (u'example_7', u'mapped_7'),
            (u'example_6', u'mapped_6'),
            (u'example_5', u'mapped_5'),
            (u'example_4', u'mapped_4'),
            (u'example_3', u'mapped_3'),
            (u'example_2', u'mapped_2'),
            (u'example_1', u'mapped_1'),
            (u'example_0', u'mapped_0')
        ]))
        org = Organization.objects.create()
        for x in range(10):
            seed_models.ColumnMapping.objects.create(
                super_organization=org,
                source_type=seed_models.ASSESSED_RAW,
                column_raw='example_{0}'.format(x),
                column_mapped='mapped_{0}'.format(x)
            )
        test_mapping = seed_models.get_column_mappings(org)

        self.assertDictEqual(test_mapping, expected)
示例#4
0
def map_row_chunk(chunk, file_pk, source_type, prog_key, increment, *args,
                  **kwargs):
    """Does the work of matching a mapping to a source type and saving

    :param chunk: list of dict of str. One row's worth of parse data.
    :param file_pk: int, the PK for an ImportFile obj.
    :param source_type: int, represented by either ASSESSED_RAW, or
        PORTFOLIO_RAW.
    :param cleaner: (optional), the cleaner class you want to send
    to mapper.map_row. (e.g. turn numbers into floats.).
    :param raw_ids: (optional kwarg), the list of ids in chunk order.

    """
    import_file = ImportFile.objects.get(pk=file_pk)
    save_type = PORTFOLIO_BS
    if source_type == ASSESSED_RAW:
        save_type = ASSESSED_BS

    concats = []

    org = Organization.objects.get(
        pk=import_file.import_record.super_organization.pk)

    mapping, concats = get_column_mappings(org)
    map_cleaner = _build_cleaner(org)

    # For those column mapping which are not db columns, we
    # need to let MCM know that we apply our mapping function to those.
    apply_columns = []

    mappable_columns = get_mappable_columns()
    for item in mapping:
        if mapping[item] not in mappable_columns:
            apply_columns.append(item)

    apply_func = apply_data_func(mappable_columns)

    for row in chunk:
        model = mapper.map_row(row,
                               mapping,
                               BuildingSnapshot,
                               cleaner=map_cleaner,
                               concat=concats,
                               apply_columns=apply_columns,
                               apply_func=apply_func,
                               *args,
                               **kwargs)

        model.import_file = import_file
        model.source_type = save_type
        model.clean()
        model.super_organization = import_file.import_record.super_organization
        model.save()
    if model:
        # Make sure that we've saved all of the extra_data column names
        save_column_names(model, mapping=mapping)

    increment_cache(prog_key, increment)
示例#5
0
def map_row_chunk(
    chunk, file_pk, source_type, prog_key, increment, *args, **kwargs
):
    """Does the work of matching a mapping to a source type and saving

    :param chunk: list of dict of str. One row's worth of parse data.
    :param file_pk: int, the PK for an ImportFile obj.
    :param source_type: int, represented by either ASSESSED_RAW, or
        PORTFOLIO_RAW.
    :param cleaner: (optional), the cleaner class you want to send
    to mapper.map_row. (e.g. turn numbers into floats.).
    :param raw_ids: (optional kwarg), the list of ids in chunk order.

    """
    import_file = ImportFile.objects.get(pk=file_pk)
    save_type = PORTFOLIO_BS
    mapping = espm_mapping.MAP
    map_cleaner = PORTFOLIO_CLEANER

    # Default to PM so we don't unnecessarily query for mapping
    if source_type == ASSESSED_RAW:
        org = Organization.objects.filter(
            users=import_file.import_record.owner
        )[0]
        mapping = get_column_mappings(org)
        save_type = ASSESSED_BS
        map_cleaner = ASSESSED_CLEANER

    # Pull out any columns meant to be concatenated together.
    mapping, concats = filter_concat_configs(mapping)

    for row in chunk:
        model = mapper.map_row(
            row,
            mapping,
            BuildingSnapshot,
            cleaner=map_cleaner,
            concat=concats,
            *args,
            **kwargs
        )

        model.import_file = import_file
        model.source_type = save_type
        model.clean()
        model.super_organization = import_file.import_record.super_organization
        model.save()

    increment_cache(prog_key, increment)
示例#6
0
    def test_get_column_mappings(self):
        """We produce appropriate data structure for mapping"""
        expected = dict(sorted([
            (u'example_9', u'mapped_9'),
            (u'example_8', u'mapped_8'),
            (u'example_7', u'mapped_7'),
            (u'example_6', u'mapped_6'),
            (u'example_5', u'mapped_5'),
            (u'example_4', u'mapped_4'),
            (u'example_3', u'mapped_3'),
            (u'example_2', u'mapped_2'),
            (u'example_1', u'mapped_1'),
            (u'example_0', u'mapped_0')
        ]))
        org = Organization.objects.create()

        raw = []
        mapped = []
        for x in range(10):
            raw.append(seed_models.Column.objects.create(
                column_name='example_{0}'.format(x), organization=org
            ))
            mapped.append(seed_models.Column.objects.create(
                column_name='mapped_{0}'.format(x), organization=org
            ))

        for x in range(10):
            column_mapping = seed_models.ColumnMapping.objects.create(
                super_organization=org,
            )

            column_mapping.column_raw.add(raw[x])
            column_mapping.column_mapped.add(mapped[x])

        test_mapping, _ = seed_models.get_column_mappings(org)
        self.assertDictEqual(test_mapping, expected)
示例#7
0
def map_row_chunk(chunk, file_pk, source_type, prog_key, increment, *args, **kwargs):
    """Does the work of matching a mapping to a source type and saving

    :param chunk: list of dict of str. One row's worth of parse data.
    :param file_pk: int, the PK for an ImportFile obj.
    :param source_type: int, represented by either ASSESSED_RAW, or
        PORTFOLIO_RAW.
    :param prog_key: string, key of the progress key
    :param increment: double, value by which to increment progress key
    :param cleaner: (optional), the cleaner class you want to send
    to mapper.map_row. (e.g. turn numbers into floats.).
    :param raw_ids: (optional kwarg), the list of ids in chunk order.

    """

    import_file = ImportFile.objects.get(pk=file_pk)
    save_type = PORTFOLIO_BS
    if source_type == ASSESSED_RAW:
        save_type = ASSESSED_BS

    concats = []

    org = Organization.objects.get(
        pk=import_file.import_record.super_organization.pk
    )

    mapping, concats = get_column_mappings(org)
    map_cleaner = _build_cleaner(org)

    # For those column mapping which are not db columns, we
    # need to let MCM know that we apply our mapping function to those.
    apply_columns = []

    mappable_columns = get_mappable_columns()
    for item in mapping:
        if mapping[item] not in mappable_columns:
            apply_columns.append(item)

    apply_func = apply_data_func(mappable_columns)

    for row in chunk:
        model = mapper.map_row(
            row,
            mapping,
            BuildingSnapshot,
            cleaner=map_cleaner,
            concat=concats,
            apply_columns=apply_columns,
            apply_func=apply_func,
            *args,
            **kwargs
        )

        model.import_file = import_file
        model.source_type = save_type
        model.clean()
        model.super_organization = import_file.import_record.super_organization
        model.save()
    if model:
        # Make sure that we've saved all of the extra_data column names
        save_column_names(model, mapping=mapping)

    increment_cache(prog_key, increment)