def query(self, parser): fieldname = self.fieldname or parser.fieldname start = self.start end = self.end if parser.schema and fieldname in parser.schema: field = parser.schema[fieldname] if field.self_parsing(): try: q = field.parse_range(fieldname, start, end, self.startexcl, self.endexcl, boost=self.boost) if q is not None: return attach(q, self) except QueryParserError: e = sys.exc_info()[1] return attach(query.error_query(e), self) if start: start = get_single_text(field, start, tokenize=False, removestops=False) if end: end = get_single_text(field, end, tokenize=False, removestops=False) q = query.TermRange(fieldname, start, end, self.startexcl, self.endexcl, boost=self.boost) return attach(q, self)
def query(self, parser): if self.node: q = self.node.query(parser) else: q = query.NullQuery return attach(query.error_query(self.message, q), self)
def term_query(self, fieldname, text, termclass, boost=1.0, tokenize=True, removestops=True): """Returns the appropriate query object for a single term in the query string. """ if self.schema and fieldname in self.schema: field = self.schema[fieldname] # If this field type wants to parse queries itself, let it do so # and return early if field.self_parsing(): try: q = field.parse_query(fieldname, text, boost=boost) return q except: e = sys.exc_info()[1] return query.error_query(e) # Otherwise, ask the field to process the text into a list of # tokenized strings texts = list(field.process_text(text, mode="query", tokenize=tokenize, removestops=removestops)) # If the analyzer returned more than one token, use the field's # multitoken_query attribute to decide what query class, if any, to # use to put the tokens together if len(texts) > 1: return self.multitoken_query(field.multitoken_query, texts, fieldname, termclass, boost) # It's possible field.process_text() will return an empty list (for # example, on a stop word) if not texts: return None text = texts[0] return termclass(fieldname, text, boost=boost)
def parse_query(self, fieldname, qstring, boost=1.0): from whoosh import query if qstring == "*": return query.Every(fieldname, boost=boost) try: text = self.to_text(qstring) except Exception: e = sys.exc_info()[1] return query.error_query(e) return query.Term(fieldname, text, boost=boost)
def parse_query(self, fieldname, qstring, boost=1.0): from whoosh import query from whoosh.support.times import is_ambiguous try: at = self._parse_datestring(qstring) except: e = sys.exc_info()[1] return query.error_query(e) if is_ambiguous(at): startnum = datetime_to_long(at.floor()) endnum = datetime_to_long(at.ceil()) return query.NumericRange(fieldname, startnum, endnum) else: return query.Term(fieldname, self.to_text(at), boost=boost)
def term_query(self, fieldname, text, termclass, boost=1.0, tokenize=True, removestops=True): """Returns the appropriate query object for a single term in the query string. """ if self.schema and fieldname in self.schema: field = self.schema[fieldname] # If this field type wants to parse queries itself, let it do so # and return early if field.self_parsing(): try: q = field.parse_query(fieldname, text, boost=boost) return q except: e = sys.exc_info()[1] return query.error_query(e) # Otherwise, ask the field to process the text into a list of # tokenized strings texts = list( field.process_text(text, mode="query", tokenize=tokenize, removestops=removestops)) # If the analyzer returned more than one token, use the field's # multitoken_query attribute to decide what query class, if any, to # use to put the tokens together if len(texts) > 1: return self.multitoken_query(field.multitoken_query, texts, fieldname, termclass, boost) # It's possible field.process_text() will return an empty list (for # example, on a stop word) if not texts: return None text = texts[0] return termclass(fieldname, text, boost=boost)