def test_resolve_orm_path_m2m(self): """Verify that ExampleModel->>>RelatedM2MModel.name == RelatedM2MModel.name""" remote_field = resolve_orm_path(ExampleModel, "relateds__name") self.assertEqual(remote_field, RelatedM2MModel._meta.get_field("name"))
def test_resolve_orm_path_local(self): """Verifies that references to a local field on a model are returned.""" field = resolve_orm_path(ExampleModel, "name") self.assertEqual(field, ExampleModel._meta.get_field("name"))
def apply_queryset_options(self, queryset): """ Interprets the datatable options. Options requiring manual massaging of the queryset are handled here. The output of this method should be treated as a list, since complex options might convert it out of the original queryset form. """ options = self._get_datatable_options() # These will hold residue queries that cannot be handled in at the database level. Anything # in these variables by the end will be handled manually (read: less efficiently) sort_fields = [] # filters = [] searches = [] # This count is for the benefit of the frontend datatables.js total_initial_record_count = queryset.count() if options.ordering: db_fields, sort_fields = split_real_fields(self.model, options.ordering) queryset = queryset.order_by(*db_fields) # if options.filters: # if isinstance(options.filters, dict): # filters = options.filters.items() # else: # # sequence of 2-tuples # filters = options.filters # # # The first field in a string like "description__icontains" determines if the lookup # # is concrete (can be handled by the database query) or virtual. A query such as # # "foreignkey__virtualfield__icontains" is not supported. A query such as # # "virtualfield__icontains" IS supported but will be handled manually. # key_function = lambda item: item[0].split('__')[0] # # db_filters, filters = filter_real_fields(self.model, filters, key=key_function) # # queryset = queryset.filter(**dict(db_filters)) # if options.search: def key_function(item): """ Converts items in the 'columns' definition to field names for determining if it's concrete or not. """ if isinstance(item, (tuple, list)): item = item[1] if item is None: return item if not isinstance(item, (tuple, list)): item = (item,) return item[0].split('__')[0] return item db_fields, searches = filter_real_fields(self.model, options.columns, key=key_function) db_fields.extend(options.search_fields) queries = [] # Queries generated to search all fields for all terms search_terms = map(unicode.strip, options.search.split()) for term in search_terms: term_queries = [] # Queries generated to search all fields for this term # Every concrete database lookup string in 'columns' is followed to its trailing field descriptor. For example, "subdivision__name" terminates in a CharField. The field type determines how it is probed for search. for name in db_fields: if isinstance(name, (tuple, list)): name = name[1] if not isinstance(name, (tuple, list)): name = (name,) for component_name in name: field_queries = [] # Queries generated to search this database field for the search term try: field = resolve_orm_path(self.model, component_name) except models.fields.FieldDoesNotExist: # Virtual columns won't be found on the model, so this is the escape # hatch. # FIXME: Should this even happen if we can promise that this includes # only database fields? continue if isinstance(field, (models.CharField, models.TextField)): field_queries = [{component_name + '__icontains': term}] elif isinstance(field, models.DateField): try: date_obj = dateutil.parser.parse(term) except ValueError: # This exception is theoretical, but it doesn't seem to raise. pass except TypeError: # Failed conversions can lead to the parser adding ints to None. pass else: field_queries.append({component_name: date_obj}) # Add queries for more granular date field lookups try: numerical_value = int(term) except ValueError: pass else: if 0 < numerical_value < 3000: field_queries.append({component_name + '__year': numerical_value}) if 0 < numerical_value <= 12: field_queries.append({component_name + '__month': numerical_value}) if 0 < numerical_value <= 31: field_queries.append({component_name + '__day': numerical_value}) elif isinstance(field, models.BooleanField): if term.lower() in ('true', 'yes'): term = True elif term.lower() in ('false', 'no'): term = False else: continue field_queries = [{component_name: term}] elif isinstance(field, models.IntegerField): try: field_queries = [{component_name: int(term)}] except ValueError: pass elif isinstance(field, (models.FloatField, models.DecimalField)): try: field_queries = [{component_name: float(term)}] except ValueError: pass else: raise ValueError("Unhandled field type for %s (%r) in search." % (name, type(field))) # print field_queries # Append each field inspection for this term term_queries.extend(map(lambda q: Q(**q), field_queries)) # Append the logical OR of all field inspections for this term if len(term_queries): queries.append(reduce(operator.or_, term_queries)) # Apply the logical AND of all term inspections if len(queries): queryset = queryset.filter(reduce(operator.and_, queries)) if not sort_fields and not searches: # We can shortcut and speed up the process if all operations are database-backed. object_list = queryset unpaged_total = queryset.count() else: object_list = list(queryset) # Sort the results manually for whatever remaining sort options are left over def data_getter_orm(field_name): def key(obj): try: return reduce(getattr, [obj] + field_name.split('__')) except (AttributeError, ObjectDoesNotExist): return None return key def data_getter_custom(i): def key(obj): rich_value, plain_value = self.get_column_data(i, options.columns[i], obj) return plain_value return key # Sort the list using the manual sort fields, back-to-front. `sort` is a stable # operation, meaning that multiple passes can be made on the list using different # criteria. The only catch is that the passes must be made in reverse order so that # the "first" sort field with the most priority ends up getting applied last. for sort_field in sort_fields[::-1]: if sort_field.startswith('-'): reverse = True sort_field = sort_field[1:] else: reverse = False if sort_field.startswith('!'): key_function = data_getter_custom sort_field = int(sort_field[1:]) else: key_function = data_getter_orm try: object_list.sort(key=key_function(sort_field), reverse=reverse) except TypeError as err: log.error("Unable to sort on {} - {}".format(sort_field, err)) # This is broken until it searches all items in object_list previous to the database # sort. That represents a runtime load that hits every row in code, rather than in the # database. If enabled, this would cripple performance on large datasets. # # Manual searches # for i, obj in enumerate(object_list[::]): # keep = False # for column_info in searches: # for term in search_terms: # column_index = options.columns.index(column_info) # rich_data, plain_data = self.get_column_data(column_index, column_info, obj) # if term in plain_data: # keep = True # break # if keep: # break # # if not keep: # object_list.pop(i) # # print column_info # # print data # # print '====' unpaged_total = len(object_list) if options.page_length != -1: i_begin = options.start_offset i_end = options.start_offset+options.page_length object_list = object_list[i_begin:i_end] return object_list, total_initial_record_count, unpaged_total
def apply_queryset_options(self, queryset): """ Interprets the datatable options. Options requiring manual massaging of the queryset are handled here. The output of this method should be treated as a list, since complex options might convert it out of the original queryset form. """ options = self._get_datatable_options() # These will hold residue queries that cannot be handled in at the database level. Anything # in these variables by the end will be handled manually (read: less efficiently) sort_fields = [] # filters = [] searches = [] # This count is for the benefit of the frontend datatables.js total_initial_record_count = queryset.count() if options.ordering: db_fields, sort_fields = split_real_fields(self.model, options.ordering) queryset = queryset.order_by(*db_fields) # if options.filters: # if isinstance(options.filters, dict): # filters = options.filters.items() # else: # # sequence of 2-tuples # filters = options.filters # # # The first field in a string like "description__icontains" determines if the lookup # # is concrete (can be handled by the database query) or virtual. A query such as # # "foreignkey__virtualfield__icontains" is not supported. A query such as # # "virtualfield__icontains" IS supported but will be handled manually. # key_function = lambda item: item[0].split('__')[0] # # db_filters, filters = filter_real_fields(self.model, filters, key=key_function) # # queryset = queryset.filter(**dict(db_filters)) # if options.search: def key_function(item): """ Converts items in the 'columns' definition to field names for determining if it's concrete or not. """ if isinstance(item, (tuple, list)): item = item[1] if item is None: return item if not isinstance(item, (tuple, list)): item = (item, ) return item[0].split('__')[0] return item db_fields, searches = filter_real_fields(self.model, options.columns, key=key_function) db_fields.extend(options.search_fields) queries = [ ] # Queries generated to search all fields for all terms search_terms = map(unicode.strip, options.search.split()) for term in search_terms: term_queries = [ ] # Queries generated to search all fields for this term # Every concrete database lookup string in 'columns' is followed to its trailing field descriptor. For example, "subdivision__name" terminates in a CharField. The field type determines how it is probed for search. for name in db_fields: if isinstance(name, (tuple, list)): name = name[1] if not isinstance(name, (tuple, list)): name = (name, ) for component_name in name: field_queries = [ ] # Queries generated to search this database field for the search term try: field = resolve_orm_path(self.model, component_name) except models.fields.FieldDoesNotExist: # Virtual columns won't be found on the model, so this is the escape # hatch. # FIXME: Should this even happen if we can promise that this includes # only database fields? continue if isinstance(field, (models.CharField, models.TextField)): field_queries = [{ component_name + '__icontains': term }] elif isinstance(field, models.DateField): try: date_obj = dateutil.parser.parse(term) except ValueError: # This exception is theoretical, but it doesn't seem to raise. pass except TypeError: # Failed conversions can lead to the parser adding ints to None. pass else: field_queries.append( {component_name: date_obj}) # Add queries for more granular date field lookups try: numerical_value = int(term) except ValueError: pass else: if 0 < numerical_value < 3000: field_queries.append({ component_name + '__year': numerical_value }) if 0 < numerical_value <= 12: field_queries.append({ component_name + '__month': numerical_value }) if 0 < numerical_value <= 31: field_queries.append({ component_name + '__day': numerical_value }) elif isinstance(field, models.BooleanField): if term.lower() in ('true', 'yes'): term = True elif term.lower() in ('false', 'no'): term = False else: continue field_queries = [{component_name: term}] elif isinstance(field, models.IntegerField): try: field_queries = [{component_name: int(term)}] except ValueError: pass elif isinstance( field, (models.FloatField, models.DecimalField)): try: field_queries = [{component_name: float(term)}] except ValueError: pass else: raise ValueError( "Unhandled field type for %s (%r) in search." % (name, type(field))) # print field_queries # Append each field inspection for this term term_queries.extend( map(lambda q: Q(**q), field_queries)) # Append the logical OR of all field inspections for this term if len(term_queries): queries.append(reduce(operator.or_, term_queries)) # Apply the logical AND of all term inspections if len(queries): queryset = queryset.filter(reduce(operator.and_, queries)) if not sort_fields and not searches: # We can shortcut and speed up the process if all operations are database-backed. object_list = queryset unpaged_total = queryset.count() else: object_list = list(queryset) # Sort the results manually for whatever remaining sort options are left over def data_getter_orm(field_name): def key(obj): try: return reduce(getattr, [obj] + field_name.split('__')) except (AttributeError, ObjectDoesNotExist): return None return key def data_getter_custom(i): def key(obj): rich_value, plain_value = self.get_column_data( i, options.columns[i], obj) return plain_value return key # Sort the list using the manual sort fields, back-to-front. `sort` is a stable # operation, meaning that multiple passes can be made on the list using different # criteria. The only catch is that the passes must be made in reverse order so that # the "first" sort field with the most priority ends up getting applied last. for sort_field in sort_fields[::-1]: if sort_field.startswith('-'): reverse = True sort_field = sort_field[1:] else: reverse = False if sort_field.startswith('!'): key_function = data_getter_custom sort_field = int(sort_field[1:]) else: key_function = data_getter_orm try: object_list.sort(key=key_function(sort_field), reverse=reverse) except TypeError as err: log.error("Unable to sort on {} - {}".format( sort_field, err)) # This is broken until it searches all items in object_list previous to the database # sort. That represents a runtime load that hits every row in code, rather than in the # database. If enabled, this would cripple performance on large datasets. # # Manual searches # for i, obj in enumerate(object_list[::]): # keep = False # for column_info in searches: # for term in search_terms: # column_index = options.columns.index(column_info) # rich_data, plain_data = self.get_column_data(column_index, column_info, obj) # if term in plain_data: # keep = True # break # if keep: # break # # if not keep: # object_list.pop(i) # # print column_info # # print data # # print '====' unpaged_total = len(object_list) if options.page_length != -1: i_begin = options.start_offset i_end = options.start_offset + options.page_length object_list = object_list[i_begin:i_end] return object_list, total_initial_record_count, unpaged_total
def test_resolve_orm_path_reverse_fk(self): """ Verify that ExampleModel->>>ReverseRelatedModel.name == ReverseRelatedModel.name """ remote_field = resolve_orm_path(ExampleModel, 'reverserelatedmodel__name') self.assertEqual(remote_field, ReverseRelatedModel._meta.get_field('name'))