def add_items_upsert(self, content_type_pk, indexers): compiler = InsertQuery(IndexEntry).get_compiler(connection=self.connection) title_sql = [] autocomplete_sql = [] body_sql = [] data_params = [] for indexer in indexers: data_params.extend((content_type_pk, indexer.id)) # Compile title value value = compiler.prepare_value(IndexEntry._meta.get_field('title'), indexer.title) sql, params = value.as_sql(compiler, self.connection) title_sql.append(sql) data_params.extend(params) # Compile autocomplete value value = compiler.prepare_value(IndexEntry._meta.get_field('autocomplete'), indexer.autocomplete) sql, params = value.as_sql(compiler, self.connection) autocomplete_sql.append(sql) data_params.extend(params) # Compile body value value = compiler.prepare_value(IndexEntry._meta.get_field('body'), indexer.body) sql, params = value.as_sql(compiler, self.connection) body_sql.append(sql) data_params.extend(params) data_sql = ', '.join([ '(%%s, %%s, %s, %s, %s, 1.0)' % (a, b, c) for a, b, c in zip(title_sql, autocomplete_sql, body_sql) ]) with self.connection.cursor() as cursor: cursor.execute(""" INSERT INTO %s (content_type_id, object_id, title, autocomplete, body, title_norm) (VALUES %s) ON CONFLICT (content_type_id, object_id) DO UPDATE SET title = EXCLUDED.title, autocomplete = EXCLUDED.autocomplete, body = EXCLUDED.body """ % (IndexEntry._meta.db_table, data_sql), data_params) self._refresh_title_norms()
def inheritance_bulk_create(models): """A workaround for https://code.djangoproject.com/ticket/28821 Args: models: a list of models to insert Note that this handles less edge cases than the official bulk_create. For relatively simple models this will work well though. """ model_class = models[0]._meta.model # The "chain" is a list of models leading up to and including the provided model chain = model_class._meta.get_parent_list() + [model_class] # Determine if we are setting primary keys from an auto ID or not top_pk_attname = chain[0]._meta.pk.attname if all(getattr(m, top_pk_attname) is None for m in models): pk_given_in_model = False elif all(getattr(m, top_pk_attname) is not None for m in models): pk_given_in_model = True else: raise ValueError("You can set all PKs or no PKs") parent_done = False last_id = 0 for model in chain: meta = model._meta if parent_done: # Assign inherited primary keys pk_attname = meta.pk.attname for i, m in enumerate(models): top_pk = getattr(m, top_pk_attname) if pk_given_in_model: setattr(m, pk_attname, top_pk) else: setattr(m, pk_attname, last_id + i) if pk_given_in_model or last_id: fields = [f for f in meta.local_concrete_fields] else: fields = [f for f in meta.local_concrete_fields if not f.primary_key] query_gen = InsertQuery(model) query_gen.insert_values(fields, models) compiler = query_gen.get_compiler(connection=connection) sql_statements = query_gen.as_sql(compiler, connection) assert ( len(sql_statements) == 1 and len(sql_statements[0]) == 2 ), "We don't know how to deal with multiple queries here." sql_str = sql_statements[0][0] sql_values = sql_statements[0][1] with connection.cursor() as cursor: cursor.execute(sql_str, sql_values) last_id = cursor.lastrowid if not pk_given_in_model: for i, m in enumerate(models): m.pk = last_id + i if not parent_done: parent_done = True return models
def search(self, config, start, stop, score_field=None): normalized_query = normalize(self.query) if isinstance(normalized_query, MatchAll): return self.queryset[start:stop] elif isinstance(normalized_query, Not) and isinstance( normalized_query.subquery, MatchAll ): return self.queryset.none() if isinstance(normalized_query, Not): normalized_query = normalized_query.subquery negated = True else: negated = False search_query = self.build_search_query( normalized_query, config=config ) # We build a search query here, for example: "%s MATCH '(hello AND world)'" vectors = self.get_search_vectors() rank_expression = self._build_rank_expression(vectors, config) combined_vector = vectors[0][ 0 ] # We create a combined vector for the search results queryset. We start with the first vector and build from there. for vector, boost in vectors[1:]: combined_vector = combined_vector._combine( vector, " ", False ) # We add the subsequent vectors to the combined vector. expr = MatchExpression( self.fields or ["title", "body"], search_query ) # Build the FTS match expression. objs = SQLiteFTSIndexEntry.objects.filter(expr).select_related( "index_entry" ) # Perform the FTS search. We'll get entries in the SQLiteFTSIndexEntry model. if self.order_by_relevance: objs = objs.order_by(BM25().desc()) elif not objs.query.order_by: # Adds a default ordering to avoid issue #3729. queryset = objs.order_by("-pk") rank_expression = F("pk") from django.db import connection from django.db.models.sql.subqueries import InsertQuery compiler = InsertQuery(IndexEntry).get_compiler(connection=connection) try: obj_ids = [ obj.index_entry.object_id for obj in objs ] # Get the IDs of the objects that matched. They're stored in the IndexEntry model, so we need to get that first. except OperationalError as e: raise OperationalError( str(e) + " The original query was: " + compiler.compile(objs.query)[0] + str(compiler.compile(objs.query)[1]) ) from e if not negated: queryset = self.queryset.filter( id__in=obj_ids ) # We need to filter the source queryset to get the objects that matched the search query. else: queryset = self.queryset.exclude( id__in=obj_ids ) # We exclude the objects that matched the search query from the source queryset, if the query is negated. if score_field is not None: queryset = queryset.annotate(**{score_field: rank_expression}) return queryset[start:stop]