def search(text): # Split the search text into tokens and escape them properly tokens = text_to_tokens(text) tokens = escape_tokens(tokens) # Create combined search query query = combined_search_query(MODELS, tokens) # Perform query and limit output to 20 items results = query.limit(20).all() process_result_details(MODELS, results) results_json = [] for r in results: result = { 'type': r.model.lower(), 'id': r.id, 'name': r.name, } if hasattr(r, 'website') and r.website: result['website'] = r.website if hasattr(r, 'icao') and r.icao: result['icao'] = r.icao if hasattr(r, 'frequency') and r.frequency: result['frequency'] = '%.3f' % (float(r.frequency)) results_json.append(result) return results_json
def search(text): # Split the search text into tokens and escape them properly tokens = text_to_tokens(text) tokens = escape_tokens(tokens) # Create combined search query query = combined_search_query(MODELS, tokens) # Perform query and limit output to 20 items results = query.limit(20).all() results = process_results_details(MODELS, results) results_json = [] for r in results: result = { 'type': r['model'].lower(), 'id': r['id'], 'name': r['name'], } if r.get('website'): result['website'] = r['website'] if r.get('icao'): result['icao'] = r['icao'] if r.get('frequency'): result['frequency'] = '%.3f' % (float(r['frequency'])) results_json.append(result) return results_json
def search(text): # Split the search text into tokens and escape them properly tokens = text_to_tokens(text) tokens = escape_tokens(tokens) # Create combined search query return combined_search_query(MODELS, tokens)
def search(text): # Split the search text into tokens and escape them properly tokens = text_to_tokens(text) tokens = escape_tokens(tokens) # Create combined search query query = combined_search_query(MODELS, tokens) # Perform query and limit output to 20 items results = query.limit(20).all() results = process_results_details(MODELS, results) results_json = [] for r in results: result = {"type": r["model"].lower(), "id": r["id"], "name": r["name"]} if r.get("website"): result["website"] = r["website"] if r.get("icao"): result["icao"] = r["icao"] if r.get("frequency"): result["frequency"] = "%.3f" % (float(r["frequency"])) results_json.append(result) return results_json
def index(args): search_text = args['q'] # Split the search text into tokens and escape them properly tokens = text_to_tokens(search_text) tokens = escape_tokens(tokens) # Create combined search query query = combined_search_query(MODELS, tokens) # Perform query and limit output to 20 items results = query.limit(20).all() results = map(convert, results) return jsonify(results)
def index(): search_text = request.values.get('text', '').strip() if not search_text: return render_template('search/list.jinja') # Split the search text into tokens and escape them properly tokens = text_to_tokens(search_text) tokens = escape_tokens(tokens) # Create combined search query query = combined_search_query(MODELS, tokens) # Perform query and limit output to 20 items results = query.limit(20).all() process_result_details(MODELS, results) return render_template('search/list.jinja', search_text=search_text, results=results)
def test_tokenizer(): # Check that this does not throw exceptions text_to_tokens(u'\\') text_to_tokens(u'blabla \\') text_to_tokens(u'"') text_to_tokens(u'"blabla \\') # Check that the tokenizer returns expected results assert text_to_tokens(u'a b c') == [u'a', u'b', u'c'] assert text_to_tokens(u'a \'b c\'') == [u'a', u'b c'] assert text_to_tokens(u'a "b c" d') == [u'a', u'b c', u'd'] assert text_to_tokens(u'old "mac donald" has a FARM') == \ [u'old', u'mac donald', u'has', u'a', u'FARM']
def test_tokenizer(): # Check that this does not throw exceptions text_to_tokens(u"\\") text_to_tokens(u"blabla \\") text_to_tokens(u'"') text_to_tokens(u'"blabla \\') # Check that the tokenizer returns expected results assert text_to_tokens(u"a b c") == [u"a", u"b", u"c"] assert text_to_tokens(u"a 'b c'") == [u"a", u"b c"] assert text_to_tokens(u'a "b c" d') == [u"a", u"b c", u"d"] assert text_to_tokens(u'old "mac donald" has a FARM') == [ u"old", u"mac donald", u"has", u"a", u"FARM", ]
def test_tokenizer(): # Check that this does not throw exceptions text_to_tokens('\\') text_to_tokens('blabla \\') text_to_tokens('"') text_to_tokens('"blabla \\') # Check that the tokenizer returns expected results eq_(text_to_tokens('a b c'), ['a', 'b', 'c']) eq_(text_to_tokens('a \'b c\''), ['a', 'b c']) eq_(text_to_tokens('a "b c" d'), ['a', 'b c', 'd']) eq_(text_to_tokens('old "mac donald" has a FARM'), ['old', 'mac donald', 'has', 'a', 'FARM'])