示例#1
0
    def query(self, parser):
        fieldname = self.fieldname or parser.fieldname
        start = self.start
        end = self.end

        if parser.schema and fieldname in parser.schema:
            field = parser.schema[fieldname]
            if field.self_parsing():
                try:
                    q = field.parse_range(fieldname, start, end,
                                          self.startexcl, self.endexcl,
                                          boost=self.boost)
                    if q is not None:
                        return attach(q, self)
                except QueryParserError:
                    e = sys.exc_info()[1]
                    return attach(query.error_query(e), self)

            if start:
                start = get_single_text(field, start, tokenize=False,
                                        removestops=False)
            if end:
                end = get_single_text(field, end, tokenize=False,
                                      removestops=False)

        q = query.TermRange(fieldname, start, end, self.startexcl,
                            self.endexcl, boost=self.boost)
        return attach(q, self)
示例#2
0
 def query(self, parser):
     fieldname = self.fieldname or parser.fieldname
     termclass = self.qclass or parser.termclass
     q = parser.term_query(fieldname, self.text, termclass,
                           boost=self.boost, tokenize=self.tokenize,
                           removestops=self.removestops)
     return attach(q, self)
示例#3
0
    def query(self, parser):
        if self.node:
            q = self.node.query(parser)
        else:
            q = cylleneus.engine.query.qcore.NullQuery

        return attach(whoosh.query.error_query(self.message, q), self)
示例#4
0
    def query(self, parser):
        if self.node:
            q = self.node.query(parser)
        else:
            q = query.NullQuery

        return attach(query.error_query(self.message, q), self)
示例#5
0
    def query(self, parser):
        subs = []
        for node in self.nodes:
            subq = node.query(parser)
            if subq is not None:
                subs.append(subq)

        q = self.qclass(subs, boost=self.boost, **self.kwargs)
        return attach(q, self)
示例#6
0
    def query(self, parser):
        assert len(self.nodes) == 2

        qa = self.nodes[0].query(parser)
        qb = self.nodes[1].query(parser)
        if qa is None and qb is None:
            q = query.NullQuery
        elif qa is None:
            q = qb
        elif qb is None:
            q = qa
        else:
            q = self.qclass(self.nodes[0].query(parser), self.nodes[1].query(parser))

        return attach(q, self)
示例#7
0
    def query(self, parser):
        assert len(self.nodes) == 2

        qa = self.nodes[0].query(parser)
        qb = self.nodes[1].query(parser)
        if qa is None and qb is None:
            q = query.NullQuery
        elif qa is None:
            q = qb
        elif qb is None:
            q = qa
        else:
            q = self.qclass(self.nodes[0].query(parser),
                            self.nodes[1].query(parser))

        return attach(q, self)
示例#8
0
        def query(self, parser):
            text = self.text
            fieldname = self.fieldname or parser.fieldname

            # We want to process the text of the phrase into "words" (tokens),
            # and also record the startchar and endchar of each word

            sc = self.textstartchar
            if parser.schema and fieldname in parser.schema:
                field = parser.schema[fieldname]
                if field.analyzer:
                    # We have a field with an analyzer, so use it to parse
                    # the phrase into tokens
                    tokens = field.tokenize(text, mode="query", chars=True)
                    words = []
                    char_ranges = []
                    for t in tokens:
                        words.append(t.text)
                        char_ranges.append((sc + t.startchar, sc + t.endchar))
                else:
                    # We have a field but it doesn't have a format object,
                    # for some reason (it's self-parsing?), so use process_text
                    # to get the texts (we won't know the start/end chars)
                    words = list(field.process_text(text, mode="query"))
                    char_ranges = [(None, None)] * len(words)
            else:
                # We're parsing without a schema, so just use the default
                # regular expression to break the text into words
                words = []
                char_ranges = []
                for match in PhrasePlugin.wordexpr.finditer(text):
                    words.append(match.group(0))
                    char_ranges.append((sc + match.start(), sc + match.end()))

            qclass = parser.phraseclass
            q = qclass(fieldname,
                       words,
                       slop=self.slop,
                       boost=self.boost,
                       char_ranges=char_ranges)
            return attach(q, self)
示例#9
0
        def query(self, parser):
            text = self.text
            fieldname = self.fieldname or parser.fieldname

            # We want to process the text of the phrase into "words" (tokens),
            # and also record the startchar and endchar of each word

            sc = self.textstartchar
            if parser.schema and fieldname in parser.schema:
                field = parser.schema[fieldname]
                if field.analyzer:
                    # We have a field with an analyzer, so use it to parse
                    # the phrase into tokens
                    tokens = field.tokenize(text, mode="query", chars=True)
                    words = []
                    char_ranges = []
                    for t in tokens:
                        words.append(t.text)
                        char_ranges.append((sc + t.startchar, sc + t.endchar))
                else:
                    # We have a field but it doesn't have a format object,
                    # for some reason (it's self-parsing?), so use process_text
                    # to get the texts (we won't know the start/end chars)
                    words = list(field.process_text(text, mode="query"))
                    char_ranges = [(None, None)] * len(words)
            else:
                # We're parsing without a schema, so just use the default
                # regular expression to break the text into words
                words = []
                char_ranges = []
                for match in PhrasePlugin.wordexpr.finditer(text):
                    words.append(match.group(0))
                    char_ranges.append((sc + match.start(), sc + match.end()))

            qclass = parser.phraseclass
            q = qclass(fieldname, words, slop=self.slop, boost=self.boost,
                       char_ranges=char_ranges)
            return attach(q, self)
示例#10
0
 def query(self, parser):
     return attach(self.qclass(self.nodes[0].query(parser)), self)
示例#11
0
 def query(self, parser):
     assert len(self.nodes) == 2
     q = self.qclass(self.nodes[0].query(parser),
                     self.nodes[1].query(parser))
     return attach(q, self)
示例#12
0
 def query(self, parser):
     q = self.nodes[0].query(parser)
     if q:
         return attach(self.qclass(q), self)
示例#13
0
 def query(self, parser):
     q = self.nodes[0].query(parser)
     if q:
         return attach(self.qclass(q), self)