def map_row_chunk(chunk, file_pk, source_type, prog_key, increment, *args, **kwargs): """Does the work of matching a mapping to a source type and saving :param chunk: list of dict of str. One row's worth of parse data. :param file_pk: int, the PK for an ImportFile obj. :param source_type: int, represented by either ASSESSED_RAW, or PORTFOLIO_RAW. :param cleaner: (optional), the cleaner class you want to send to mapper.map_row. (e.g. turn numbers into floats.). :param raw_ids: (optional kwarg), the list of ids in chunk order. """ import_file = ImportFile.objects.get(pk=file_pk) save_type = PORTFOLIO_BS if source_type == ASSESSED_RAW: save_type = ASSESSED_BS concats = [] org = Organization.objects.get( pk=import_file.import_record.super_organization.pk) mapping, concats = get_column_mappings(org) map_cleaner = _build_cleaner(org) # For those column mapping which are not db columns, we # need to let MCM know that we apply our mapping function to those. apply_columns = [] mappable_columns = get_mappable_columns() for item in mapping: if mapping[item] not in mappable_columns: apply_columns.append(item) apply_func = apply_data_func(mappable_columns) for row in chunk: model = mapper.map_row(row, mapping, BuildingSnapshot, cleaner=map_cleaner, concat=concats, apply_columns=apply_columns, apply_func=apply_func, *args, **kwargs) model.import_file = import_file model.source_type = save_type model.clean() model.super_organization = import_file.import_record.super_organization model.save() if model: # Make sure that we've saved all of the extra_data column names save_column_names(model, mapping=mapping) increment_cache(prog_key, increment)
def map_row_chunk(chunk, file_pk, source_type, prog_key, increment, *args, **kwargs): """Does the work of matching a mapping to a source type and saving :param chunk: list of dict of str. One row's worth of parse data. :param file_pk: int, the PK for an ImportFile obj. :param source_type: int, represented by either ASSESSED_RAW, or PORTFOLIO_RAW. :param prog_key: string, key of the progress key :param increment: double, value by which to increment progress key :param cleaner: (optional), the cleaner class you want to send to mapper.map_row. (e.g. turn numbers into floats.). :param raw_ids: (optional kwarg), the list of ids in chunk order. """ import_file = ImportFile.objects.get(pk=file_pk) save_type = PORTFOLIO_BS if source_type == ASSESSED_RAW: save_type = ASSESSED_BS concats = [] org = Organization.objects.get( pk=import_file.import_record.super_organization.pk ) mapping, concats = get_column_mappings(org) map_cleaner = _build_cleaner(org) # For those column mapping which are not db columns, we # need to let MCM know that we apply our mapping function to those. apply_columns = [] mappable_columns = get_mappable_columns() for item in mapping: if mapping[item] not in mappable_columns: apply_columns.append(item) apply_func = apply_data_func(mappable_columns) for row in chunk: model = mapper.map_row( row, mapping, BuildingSnapshot, cleaner=map_cleaner, concat=concats, apply_columns=apply_columns, apply_func=apply_func, *args, **kwargs ) model.import_file = import_file model.source_type = save_type model.clean() model.super_organization = import_file.import_record.super_organization model.save() if model: # Make sure that we've saved all of the extra_data column names save_column_names(model, mapping=mapping) increment_cache(prog_key, increment)