def clean_unique_together_values(self, row_data: Dict, row: List[Any], row_index: int) -> Dict: is_not_unique_row = False if not self._unique_together: return row_data for unique_together_columns in self._unique_together: values = tuple(( row_data[column_name] for column_name in unique_together_columns if row_data[column_name] is not None )) if len(values) == len(unique_together_columns): duplicate_row = self._unique_together_values[unique_together_columns].get(values, None) if duplicate_row: error = ', '.join(( f'{column_name} ({column_value})' for column_name, column_value in zip(unique_together_columns, values) )) self.add_errors( f'{error} is a duplicate of row {duplicate_row}', row_index=row_index, ) is_not_unique_row = True else: self._unique_together_values[unique_together_columns][values] = row_index if is_not_unique_row: raise SkipRow(f'Row {row_index} is not unique.') return row_data
def parse_row(self, row: List[Any], row_index: int, worksheet_title: str = None) -> Dict: row_data = {} if worksheet_title: row_data['worksheet'] = worksheet_title row_has_errors = False for column in self.columns: try: row_data[column.name] = self.parse_column( row, column, row_index) except ColumnError as e: row_has_errors = True self.add_errors( e.messages, row_index=row_index, col_index=column.index, worksheet_title=worksheet_title, ) if row_has_errors: raise SkipRow('Not processed because the string contains errors.') return self.clean_row(row_data, row, row_index, worksheet_title=worksheet_title)
def clean_row_required_columns(self, row_data: Dict, row: List[Any], row_index: int) -> Dict: has_empty_required_columns = False for column in self.columns: if column.required and row_data.get(column.name) is None: self.add_errors( f'Column {column.header or column.name} is required.', row_index=row_index, col_index=column.index, ) has_empty_required_columns = True if has_empty_required_columns: raise SkipRow(f'Row {row_index} contains blank columns.') return row_data