def home(request): template = 'main_content.html' ix = index.open_dir(settings.WHOOSH_INDEX) hits = [] newsfound = [] query = request.GET.get('q', None) if query is not None and query != u"": # Whoosh don't understands '+' or '-' but we can replace # them with 'AND' and 'NOT'. query = query.replace('+', ' AND ').replace(' -', ' NOT ') parser = QueryParser("content", schema=ix.schema, group=OrGroup) try: qry = parser.parse(query) except: # don't show the user weird errors only because we don't # understand the query. # parser.parse("") would return None qry = None if qry is not None: searcher = ix.searcher() hits = searcher.search(qry) for h in hits: if News.objects.filter(pk=int(h["id"])).exists(): newsfound.append(News.objects.get(pk=int(h["id"]))) return render(request, template, {'query': query, 'hits': newsfound})
def search(self, query: str, frequencies=False): """ Get the indices of the documents matching the query :param query: The whoosh query string :param frequencies: If true, return pairs of (docnum, frequency) rather than only docnum :return: sequence of document numbers (and freqs, if frequencies is True) """ with self.index.searcher(weighting=scoring.Frequency) as searcher: if frequencies: ## for some reason, using searcher.search counts all individual occurrences of the terms in a phrase ("term1 term2") ## after the phrase occurs at least once. So for frequencies, we use this lengthy alternative ## (I expect that somewhere a setting is hidden to simply fix this with searcher.search, but no clue yet) results = defaultdict(lambda:float(0)) queries = divide_query(query) for i, q in enumerate(queries): q = QueryParser("text", self.index.schema).parse(q) matcher = q.matcher(searcher) while matcher.is_active(): docnum = searcher.reader().stored_fields(matcher.id())['doc_i'] bd = boostdict(matcher) for s in matcher.spans(): results[docnum] += bd[s] if s in bd else 1 matcher.next() return [(k,v) for k,v in results.items()] else: query = QueryParser("text", self.index.schema).parse(query) results = searcher.search(query, limit=None, scored=False, sortedby=None) return [results[i]['doc_i'] for i in range(len(results))]
def buildQueryParser(self): """ Build the query parser """ contentQueryParser = QueryParser('content', schema=self.indexSchema, group=OrGroup) contentQueryParser.add_plugin(PlusMinusPlugin) return contentQueryParser
def buildQueryParser(self, feature): """ Build the query parser that parses the specified feature @param feature The feature on which this ranking will be based """ contentQueryParser = QueryParser(feature, schema=self.indexSchema, group=OrGroup) contentQueryParser.add_plugin(PlusMinusPlugin) return contentQueryParser
def main(argv): basicConfig(level="INFO") getLogger().setLevel("WARN") logger.setLevel("INFO") config = load_config() index_path = config['index_path'] ix = open_dir(index_path) searcher = ix.searcher() print "Doc count=%d" % searcher.doc_count() while True: try: querystring = raw_input("find something? >") except KeyboardInterrupt: print break with ix.searcher() as searcher: querystring = querystring.strip() if querystring == "": q = query.Every() else: parser = QueryParser("content", ix.schema) q = parser.parse(querystring) results = searcher.search_page(q, 1, pagelen=20) if len(results) == 0: print "No result" else: print "Found %d results" % len(results) quit_ = False for p in range(1, results.pagecount + 1): while not quit_: for i, hit in enumerate(results): print "%d >> %s" % (i + (p - 1) * 20 + 1, hit) inp = raw_input( "Page %d/%d, (Enter: next page|q: quit) ? >" % (p, results.pagecount)) if inp.strip() == 'q': quit_ = True else: if p < results.pagecount: results = searcher.search_page(q, p + 1, pagelen=20) break if quit_: break
def searchTime(dir,query,lim): index = open_dir(dir) class TimeScorer(scoring.BaseScorer): def __init__(self, idfScorer,bm25Scorer): self.idfScorer = idfScorer self.bm25Scorer = bm25Scorer def score(self, matcher): s = self.bm25Scorer.score(matcher)*0.5+self.idfScorer.score(matcher)*0.5 return s class TimeWeight(scoring.WeightingModel): def scorer(self, searcher, fieldname, text, qf=1): # BM25 bm25Scorer = BM25F().scorer(searcher, fieldname, text, qf) tfidfScorer = TF_IDF().scorer(searcher, fieldname, text, qf) return TimeScorer(tfidfScorer,bm25Scorer) res = [] with index.searcher(weighting=TimeScorer()) as searcher: query = QueryParser("content", index.schema, group=OrGroup).parse(unicode(query,"UTF-8")) results = searcher.search(query, limit=lim) for r in results: res.append(r["id"]) return res
def searchPageRank(dir,query,lim,rank): index = open_dir(dir) class PageRankScorer(scoring.BaseScorer): def __init__(self, idf): self.idf = idf def score(self, matcher): doc = str(matcher.id()+1) r = 0 if doc in rank.keys(): r = rank[doc] s = matcher.weight() * self.idf* r # print doc," | ", s return s class pageRankWeight(scoring.WeightingModel): def scorer(self, searcher, fieldname, text, qf=1): # IDF is a global statistic, so get it from the top-level searcher parent = searcher.get_parent() # Returns self if no parent idf = parent.idf(fieldname, text) return PageRankScorer(idf) res = [] with index.searcher(weighting=pageRankWeight()) as searcher: query = QueryParser("content", index.schema, group=OrGroup).parse(unicode(query,"UTF-8")) results = searcher.search(query, limit=lim) for r in results: res.append(r["id"]) return res
def listar(event): lista.delete(0, END) ixc = open_dir(dirindex1) ixa = open_dir(dirindex2) if pattern == "texto": with ixc.searcher() as searcher: query = MultifieldParser(["asunto", "cuerpo"], ixc.schema).parse( unicode(entrada.get())) results = searcher.search(query) for r in results: lista.insert(END, r['remitente']) with ixa.searcher() as namesearch: query = QueryParser('email', ixa.schema).parse( unicode(r['remitente'])) agenda = namesearch.search(query) for name in agenda: lista.insert(END, name['nombre']) lista.insert(END, '') elif pattern == "spam": with ixc.searcher() as searcher: asuntos = entrada.get().strip().replace(" ", " OR ") query = QueryParser("asunto", ixc.schema).parse(unicode(asuntos)) results = searcher.search(query) for r in results: lista.insert(END, r['fichero']) lista.insert(END, '') elif pattern == "fecha": with ixc.searcher() as searcher: #Cogemos la fecha, la pasamos a datetime y como eso da error, la pasamos a solo date (no queremos time) biggerthan = datetime.datetime.strptime( entrada.get().strip(), "%Y%m%d").date() # {*fecha* to] <-- indica conjunto abierto hasta el final query = QueryParser("fecha", ixc.schema).parse( unicode("{" + str(biggerthan) + " to]")) results = searcher.search(query) for r in results: lista.insert(END, r['remitente']) lista.insert(END, r['destinatario']) lista.insert(END, r['asunto']) lista.insert(END, '')
def main(argv): basicConfig(level="INFO") getLogger().setLevel("WARN") logger.setLevel("INFO") config = load_config() index_path = config['index_path'] ix = open_dir(index_path) searcher = ix.searcher() print "Doc count=%d"%searcher.doc_count() while True: try: querystring = raw_input("find something? >") except KeyboardInterrupt: print break with ix.searcher() as searcher: querystring = querystring.strip() if querystring == "": q = query.Every() else: parser = QueryParser("content", ix.schema) q = parser.parse(querystring) results = searcher.search_page(q, 1, pagelen=20) if len(results) == 0: print "No result" else: print "Found %d results"%len(results) quit_ =False for p in range(1, results.pagecount+1): while not quit_: for i, hit in enumerate(results): print "%d >> %s" %(i + (p-1)*20 + 1,hit) inp = raw_input("Page %d/%d, (Enter: next page|q: quit) ? >" % (p, results.pagecount)) if inp.strip() == 'q': quit_ = True else: if p < results.pagecount: results = searcher.search_page(q, p+1, pagelen=20) break if quit_: break
def buscar(pattern, texto): ix = open_dir(dirindex) res = [] with ix.searcher() as searcher: query = QueryParser(pattern, ix.schema).parse(unicode(texto)) results = searcher.search(query) for r in results: a = Anime.objects.get(titulo=r['titulo']) # print r['sinopsis'] res.append(a) return res
def searchCOS(dir,query,lim): index = open_dir(dir) res = [] with index.searcher(weighting=scoring.TF_IDF()) as searcher: query = QueryParser("content", index.schema, group=OrGroup).parse(unicode(query,"UTF-8")) results = searcher.search(query, limit=lim) for r in results: res.append(r["id"]) return res
def get_context(self, query: str, window: int = 30): """ Get the words in the context (n-word window) of all locations of the string :param query: search query :param window: window size (in words) :return: a generator of (id, text) pairs """ def get_window_tokens(tokens, spans): position = -1 for span in spans: for position in range(max(position + 1, span.start - window), min(len(tokens), span.end + window + 1)): yield tokens[position] query = QueryParser("text", self.index.schema).parse(query) with self.index.searcher() as searcher: matcher = query.matcher(searcher) while matcher.is_active(): docnum = searcher.reader().stored_fields(matcher.id())['doc_i'] yield docnum, list(get_window_tokens(self.tokens[docnum], matcher.spans())) matcher.next()
def searchL2R(dir,query,lim,rank,w): index = open_dir(dir) sss = None; class L2RScorer(scoring.BaseScorer): def __init__(self, idfScorer,bm25Scorer): self.idfScorer = idfScorer self.bm25Scorer = bm25Scorer def score(self, matcher): doc = str(sss.stored_fields(matcher.id())["id"]) r = 0 if doc in rank.keys(): r = rank[doc] s = self.bm25Scorer.score(matcher)*w[0]+self.idfScorer.score(matcher)*w[1]+r*w[2] return s class L2RWeight(scoring.WeightingModel): def scorer(self, searcher, fieldname, text, qf=1): # BM25 bm25Scorer = BM25F().scorer(searcher, fieldname, text, qf) tfidfScorer = TF_IDF().scorer(searcher, fieldname, text, qf) return L2RScorer(tfidfScorer,bm25Scorer) res = [] with index.searcher(weighting=L2RWeight()) as searcher: sss = searcher query = QueryParser("content", index.schema, group=OrGroup).parse(unicode(query,"UTF-8")) results = searcher.search(query, limit=lim) for r in results: res.append(r["id"]) return res
def _journal_filter(user_log, search_term): """ Filters sqlalchemy user_log based on search_term with whoosh Query language http://packages.python.org/Whoosh/querylang.html :param user_log: :param search_term: """ log.debug('Initial search term: %r' % search_term) qry = None if search_term: qp = QueryParser('repository', schema=JOURNAL_SCHEMA) qp.add_plugin(DateParserPlugin()) qry = qp.parse(unicode(search_term)) log.debug('Filtering using parsed query %r' % qry) def wildcard_handler(col, wc_term): if wc_term.startswith('*') and not wc_term.endswith('*'): #postfix == endswith wc_term = remove_prefix(wc_term, prefix='*') return func.lower(col).endswith(wc_term) elif wc_term.startswith('*') and wc_term.endswith('*'): #wildcard == ilike wc_term = remove_prefix(wc_term, prefix='*') wc_term = remove_suffix(wc_term, suffix='*') return func.lower(col).contains(wc_term) def get_filterion(field, val, term): if field == 'repository': field = getattr(UserLog, 'repository_name') elif field == 'ip': field = getattr(UserLog, 'user_ip') elif field == 'date': field = getattr(UserLog, 'action_date') elif field == 'username': field = getattr(UserLog, 'username') else: field = getattr(UserLog, field) log.debug('filter field: %s val=>%s' % (field, val)) #sql filtering if isinstance(term, query.Wildcard): return wildcard_handler(field, val) elif isinstance(term, query.Prefix): return func.lower(field).startswith(func.lower(val)) elif isinstance(term, query.DateRange): return and_(field >= val[0], field <= val[1]) return func.lower(field) == func.lower(val) if isinstance(qry, (query.And, query.Term, query.Prefix, query.Wildcard, query.DateRange)): if not isinstance(qry, query.And): qry = [qry] for term in qry: field = term.fieldname val = (term.text if not isinstance(term, query.DateRange) else [term.startdate, term.enddate]) user_log = user_log.filter(get_filterion(field, val, term)) elif isinstance(qry, query.Or): filters = [] for term in qry: field = term.fieldname val = (term.text if not isinstance(term, query.DateRange) else [term.startdate, term.enddate]) filters.append(get_filterion(field, val, term)) user_log = user_log.filter(or_(*filters)) return user_log
def search(request): # Init the query string q = request.GET.get('q') # Where search content or title? search_in = request.GET.get('search_in', 'content') if search_in not in ('content', 'name',): search_in = 'content' # No need show a publication content if q is empty if not q: search_in = 'name' # Init the region search param region_id = request.GET.get('region_search', get_region().id) try: region = Region.objects.get(pk=region_id) except Region.DoesNotExist: region = Region.objects.get(pk=get_region().id) # Init the category search param menu_search = request.GET.get('menu_search', 0) try: menu = Menu.objects.get(pk=menu_search) if menu.region != region: # Change region and find similar category request.session['region_id'] = region.id menu = Menu.objects.root_nodes().filter( region=region).get(name=menu.name) except Menu.DoesNotExist: menu = Menu.objects.root_nodes().filter(region=region)[0] from datetime import datetime # Init date_start and date_end search params date_start = request.GET.get('date_start', '') date_end = request.GET.get('date_end', '') try: if date_start: date_start = datetime.strptime(date_start, '%d.%m.%Y') except ValueError: date_start = '' try: if date_end: date_end = datetime.strptime(date_end, '%d.%m.%Y') except ValueError: date_end = '' # Search in index hits = None if q is not None: # Open index dir ix = whoosh_index.open_dir(settings.PAGE_SEARCH_INDEX) # Make parser parser = QueryParser(search_in, schema=ix.schema) # Configure filter filter = Term('region', region.id) # Make query string qstr = q.replace('+', ' AND ').replace(' -', ' NOT ').replace(' | ', ' OR ') # Parse query string query = parser.parse(qstr) # And... search in index! hits = ix.searcher().search(query, filter=filter, limit=None) pages = Page.objects.filter(region=region).filter(visible=True) # Apply filter of category pages = pages.filter( menu__in=menu.get_descendants(include_self=True)) # Apply filter of date range if date_start and date_end: pages = pages.filter( create_date__gte=date_start ).filter( create_date__lte=date_end) elif date_start and not date_end: pages = pages.filter(create_date__gte=date_start) elif not date_start and date_end: pages = pages.filter(create_date__lte=date_end) from django.utils.html import strip_tags, strip_entities from cms.views.utils import paginate # If not the q param if hits is None and not hits: # Total count hits_count = pages.count() # Numbered for num, page in enumerate(pages): page.num = num + 1 # Paginate it pages = paginate(request, pages, 20) else: # Merge hits and filtered publications pages = pages.filter(pk__in=[h.get('id') for h in hits]) # Numbered for num, page in enumerate(pages): page.num = num + 1 # Total count hits_count = pages.count() # Paginate it pages = paginate(request, pages, 20) # Highlight results for hit in hits: for page in pages: if page.id == hit['id']: if search_in == 'name': page.name = hit.highlights('name', text=strip_entities(strip_tags(page.name))) if search_in == 'content': page.content = hit.highlights('content', text=strip_entities(strip_tags(page.content))) if 'ix' in locals(): ix.close() if date_start: date_start = '%s.%s.%s' % (date_start.day, date_start.month, date_start.year) if date_end: date_end = '%s.%s.%s' % (date_end.day, date_end.month, date_end.year) return render(request, 'search.html', { 'q': q, 'menu_search': menu.id, 'region_search': region.id, 'search_in': search_in, 'date_start': date_start, 'date_end': date_end, 'pages': pages, 'hits_count': hits_count })
try: description = post["d"] title = post["t"] l = post["l"] dpt = title + ". " + description t = time.strftime(post["p"]) print title tags = {} for word in list(set(dpt.split())): with entities_index.searcher() as searcher: parser = QueryParser("name", entities_index.schema, group=OrGroup).parse(word) results = searcher.search(parser, limit=100) for e in results: name = e["name"] url = e["url"] opt = e["opt"] if word == name and len(opt) == 0: tags[name] = url elif name not in tags.keys() and name in dpt and ( len(opt) == 0 or opt in dpt): tags[name] = url print tags news_writer.add_document(id=unicode(str(post['_id'])),
def _journal_filter(user_log, search_term): """ Filters sqlalchemy user_log based on search_term with whoosh Query language http://packages.python.org/Whoosh/querylang.html :param user_log: :param search_term: """ log.debug('Initial search term: %r', search_term) qry = None if search_term: qp = QueryParser('repository', schema=JOURNAL_SCHEMA) qp.add_plugin(DateParserPlugin()) qry = qp.parse(unicode(search_term)) log.debug('Filtering using parsed query %r', qry) def wildcard_handler(col, wc_term): if wc_term.startswith('*') and not wc_term.endswith('*'): #postfix == endswith wc_term = remove_prefix(wc_term, prefix='*') return func.lower(col).endswith(func.lower(wc_term)) elif wc_term.startswith('*') and wc_term.endswith('*'): #wildcard == ilike wc_term = remove_prefix(wc_term, prefix='*') wc_term = remove_suffix(wc_term, suffix='*') return func.lower(col).contains(func.lower(wc_term)) def get_filterion(field, val, term): if field == 'repository': field = getattr(UserLog, 'repository_name') elif field == 'ip': field = getattr(UserLog, 'user_ip') elif field == 'date': field = getattr(UserLog, 'action_date') elif field == 'username': field = getattr(UserLog, 'username') else: field = getattr(UserLog, field) log.debug('filter field: %s val=>%s', field, val) #sql filtering if isinstance(term, query.Wildcard): return wildcard_handler(field, val) elif isinstance(term, query.Prefix): return func.lower(field).startswith(func.lower(val)) elif isinstance(term, query.DateRange): return and_(field >= val[0], field <= val[1]) return func.lower(field) == func.lower(val) if isinstance(qry, (query.And, query.Term, query.Prefix, query.Wildcard, query.DateRange)): if not isinstance(qry, query.And): qry = [qry] for term in qry: field = term.fieldname val = (term.text if not isinstance(term, query.DateRange) else [term.startdate, term.enddate]) user_log = user_log.filter(get_filterion(field, val, term)) elif isinstance(qry, query.Or): filters = [] for term in qry: field = term.fieldname val = (term.text if not isinstance(term, query.DateRange) else [term.startdate, term.enddate]) filters.append(get_filterion(field, val, term)) user_log = user_log.filter(or_(*filters)) return user_log
def search(request, code=None, number=None, operator=None): log.debug('in {0}: code={1}, number={2}, operator={3}'.format(funcname(), code, number, operator)) start_time = time.time() ix = request.registry.settings['ix'] if code and not number and not operator: query = meta.session.query(Record).filter(Record.code.like(code + '%')) return [r.asDict() for r in query], time.time() - start_time if code and number and not operator: if number.find('*') >= 1: results = [] tmpnum = number[0:number.find('*')] full_number = number.replace('*', '.') + ''.join(['.' for c in range(0, 7 - len(number))]) query = meta.session.query(Record).filter(Record.code == code).filter(Record.nfrom.like(tmpnum + '%')) for r in query: for x in range(r.nfrom, r.nto): n = re.match(r'%s' % full_number, str(x)) if n: c = Record() c.code = r.code c.nfrom = x c.nto = x c.region = r.region c.operator = r.operator results.append(c) results1 = [] tmpnum = int(tmpnum + ''.join(['0' for z in range(0, 7 - len(tmpnum))])) query = meta.session.query(Record).filter(Record.code == code).filter( and_(Record.nfrom <= tmpnum, Record.nto >= tmpnum)) for r in query: for x in range(r.nfrom, r.nto): n = re.match(r'%s' % full_number, str(x)) if n: c = Record() c.code = r.code c.nfrom = x c.nto = x c.region = r.region c.operator = r.operator results1.append(c) return [r.asDict() for r in set(results + results1)], time.time() - start_time else: results = [] query = meta.session.query(Record).filter(Record.code == code).filter(Record.nfrom.like(number + '%')) for r in query: results.append(r) full_number = int(number + ''.join(['0' for c in range(0, 7 - len(number))])) query = meta.session.query(Record).filter(Record.code == code).filter( and_(Record.nfrom <= full_number, Record.nto >= full_number)) for r in query: if not r in results: results.append(r) return [r.asDict() for r in results], time.time() - start_time if not code and not number and operator: parser = QueryParser('operator', schema=ix.schema) results = [] s = re.sub(r'[,-/;\'.*:@!#$%^&?()№{}\[\]+=]', ' ', operator).split(' ') tstr1 = ' and '.join(['operator:*%s*' % x for x in s]) query = parser.parse(tstr1) for res in ix.searcher().search(query, limit=None): id = res['id'] r = meta.session.query(Record).filter(Record.id == id).one() results.append(r) return [r.asDict() for r in results], time.time() - start_time if not code and number and operator: ix = request.registry.settings['ix'] parser = QueryParser('operator', schema=ix.schema) results1 = [] full_number = int(number + ''.join(['0' for c in range(0, 7 - len(number))])) query = meta.session.query(Record).filter(and_(Record.nfrom <= full_number, Record.nto >= full_number)) for r in query: results1.append(r) results2 = [] s = re.sub(r'[,-/;\'.*:@!#$%^&?()№{}\[\]+=]', ' ', operator).split(' ') tstr1 = ' and '.join(['operator:*%s*' % x for x in s]) query = parser.parse(tstr1) for res in ix.searcher().search(query, limit=None): id = res['id'] r = meta.session.query(Record).filter(Record.id == id).one() results2.append(r) results = [] results.extend(set(results1).intersection(set(results2))) return [r.asDict() for r in results], time.time() - start_time if not code and number and not operator: results = [] full_number = int(number + ''.join(['0' for c in range(0, 7 - len(number))])) query = meta.session.query(Record).filter(and_(Record.nfrom <= full_number, Record.nto >= full_number)) for r in query: results.append(r) if code and not number and operator: ix = request.registry.settings['ix'] parser = QueryParser('operator', schema=ix.schema) results1 = [] query = meta.session.query(Record).filter(Record.code == code).filter(Record.nfrom.like(number + '%')) for r in query: results1.append(r) results2 = [] s = re.sub(r'[,-/;\'.*:@!#$%^&?()№{}\[\]+=]', ' ', operator).split(' ') tstr1 = ' and '.join(['operator:*%s*' % x for x in s]) query = parser.parse(tstr1) for res in ix.searcher().search(query, limit=None): id = res['id'] r = meta.session.query(Record).filter(Record.id == id).one() results2.append(r) return [r.asDict() for r in [val for val in results1 if val in results2]], time.time - start_time if code and number and operator: if number.find('*') >= 1: results1 = [] tmpnum = number[0:number.find('*')] full_number = number.replace('*', '.') + ''.join(['.' for c in range(0, 7 - len(number))]) query = meta.session.query(Record).filter(Record.code == code).filter(Record.nfrom.like(tmpnum + '%')) for r in query: for x in range(r.nfrom, r.nto): n = re.match(r'%s' % full_number, str(x)) if n: c = Record() c.code = r.code c.nfrom = x c.nto = x c.region = r.region c.operator = r.operator results1.append(c) results2 = [] tmpnum = int(tmpnum + ''.join(['0' for z in range(0, 7 - len(tmpnum))])) query = meta.session.query(Record).filter(Record.code == code).filter( and_(Record.nfrom <= tmpnum, Record.nto >= tmpnum)) for r in query: for x in range(r.nfrom, r.nto): n = re.match(r'%s' % full_number, str(x)) if n: c = Record() c.code = r.code c.nfrom = x c.nto = x c.region = r.region c.operator = r.operator results1.append(c) results = set(results1 + results2) else: results = [] query = meta.session.query(Record).filter(Record.code == code).filter(Record.nfrom.like(number + '%')) for r in query: results.append(r) full_number = int(number + ''.join(['0' for c in range(0, 7 - len(number))])) query = meta.session.query(Record).filter(Record.code == code).filter( and_(Record.nfrom <= full_number, Record.nto >= full_number)) for r in query: if not r in results: results.append(r) log.debug('in {0}: len(results)={1}'.format(funcname(), len(results))) # return [r.asDict() for r in results], time.time() - start_time ix = request.registry.settings['ix'] parser = QueryParser('operator', schema=ix.schema) results3 = [] s = re.sub(r'[,-/;\'.*:@!#$%^&?()№{}\[\]+=]', ' ', operator).split(' ') tstr1 = ' and '.join(['operator:*%s*' % x for x in s]) query = parser.parse(tstr1) for res in ix.searcher().search(query, limit=None): id = res['id'] r = meta.session.query(Record).filter(Record.id == id).one() results3.append(r) log.debug('in {0}: len(results3)={1}'.format(funcname(), len(results3))) return [r.asDict() for r in [val for val in results if val in results3]], time.time() - start_time return [], time.time() - start_time