Beispiel #1
0
 def row_to_index(self, row):
     key = row.get(self.entity_key)
     q = {self.alias_key: key}
     aliases = self.alias.traverse(**q)
     aliases = map(lambda a: a.get(self.alias_text), aliases)
     row['alias'] = aliases
     row['title.n'] = normalize(row.get('title'))
     row['alias.n'] = map(normalize, aliases)
     row['__type__'] = self.name
     row['__key__'] = key
     row['__id__'] = self.name + ':' + key
     sys.stdout.write('.')
     sys.stdout.flush()
     return row
Beispiel #2
0
 def row_to_index(self, row):
     key = row.get(self.entity_key)
     q = {self.alias_key: key}
     aliases = self.alias.traverse(**q)
     aliases = map(lambda a: a.get(self.alias_text), aliases)
     row['alias'] = aliases
     row['title.n'] = normalize(row.get('title'))
     row['alias.n'] = map(normalize, aliases)
     row['__type__'] = self.name
     row['__key__'] = key
     row['__id__'] = self.name + ':' + key
     sys.stdout.write('.')
     sys.stdout.flush()
     return row
Beispiel #3
0
def query(solr, q, filters=(), **kw):
    fq = ['+' + field(k, v) for k, v in filters]
    #fq.append('_collection:%s' % entities.name)
    if len(q) and q != '*:*':
        nq = normalize(q)
        _q = [
             field('title', q, boost=10),
             field('title.n', nq, boost=7),
             field('alias', q, boost=8),
             field('alias.n', nq, boost=5),
             field('text', q, boost=2),
             field('text', nq)
             ]
        q = ' OR '.join(_q)
    result = solr.raw_query(q=q, fq=fq, wt='json',
            sort='score desc, title desc', fl='*,score', **kw)
    result = json.loads(result)
    return result
Beispiel #4
0
 def find_fuzzy(cls, text, filters=(), facet_type=False, **kw):
     fq = ['+' + query_filter(k, v) for k, v in filters]
     if len(text) and text != '*:*':
         ntext = normalize(text)
         _q = [
              query_filter('title', text, boost=10),
              query_filter('title.n', ntext, boost=7),
              query_filter('alias', text, boost=8),
              query_filter('alias.n', ntext, boost=5),
              query_filter('text', text, boost=2),
              query_filter('text', ntext)
              ]
         for token in tokenize(ntext):
             _q.append(query_filter('title', token, fuzzy=True, boost=4))
             _q.append(query_filter('alias', token, fuzzy=True, boost=3))
         text = ' OR '.join(_q)
     if facet_type:
         kw['facet'] = 'true'
         kw['facet.field'] = kw.get('facet.field', []) + ['__type__']
         kw['facet.limit'] = 50
     return cls.find(text, fq, **kw)
Beispiel #5
0
 def find_fuzzy(cls, text, filters=(), facet_type=False, **kw):
     fq = ['+' + query_filter(k, v) for k, v in filters]
     if len(text) and text != '*:*':
         ntext = normalize(text)
         _q = [
             query_filter('title', text, boost=10),
             query_filter('title.n', ntext, boost=7),
             query_filter('alias', text, boost=8),
             query_filter('alias.n', ntext, boost=5),
             query_filter('text', text, boost=2),
             query_filter('text', ntext)
         ]
         for token in tokenize(ntext):
             _q.append(query_filter('title', token, fuzzy=True, boost=4))
             _q.append(query_filter('alias', token, fuzzy=True, boost=3))
         text = ' OR '.join(_q)
     if facet_type:
         kw['facet'] = 'true'
         kw['facet.field'] = kw.get('facet.field', []) + ['__type__']
         kw['facet.limit'] = 50
     return cls.find(text, fq, **kw)