def test_signal_based_update(self): """ Turning on signals will automatically update objects in the autocompleter """ signal_registry.register(Stock) aapl = Stock(symbol='AAPL', name='Apple', market_cap=50) aapl.save() autocomp = Autocompleter("stock") matches = autocomp.suggest('aapl') self.assertEqual(len(matches), 1) aapl.symbol = 'XYZ' aapl.name = 'XYZ & Co.' aapl.save() matches = autocomp.suggest('aapl') self.assertEqual(len(matches), 0) matches = autocomp.suggest('xyz') self.assertEqual(len(matches), 1) aapl.delete() keys = self.redis.keys('djac.test.stock*') self.assertEqual(len(keys), 0) signal_registry.unregister(Stock)
def get(self): # logging.info("request start \n{}".format(time.clock() * 1000 % 1000)) self.response.headers['Content-Type'] = 'text/plain; charset=utf-8' query = self.request.get('q') logging.info(u"Autocomplete search for '{}'".format(query)) autocompleter = Autocompleter() # logging.info("before autocompleter.get_results \n{}".format(time.clock() * 1000 % 1000)) results = autocompleter.get_results(query) # logging.info("after autocompleter.get_results \n{}".format(time.clock() * 1000 % 1000)) json_results = [] for book in results: if book is None: logging.warning( u"Autocompleter result for '{}' returned None.".format( query)) continue assert isinstance(book, BookRecord) suggestion_map = { 'author': book.author, 'title': book.title, 'year': book.year, 'count': book.count, 'item_ids': book.key.string_id() } json_results.append(suggestion_map) json_object = { 'query': query, 'version': AutocompleteJson.CURRENT_VERSION, 'status': 'completed', 'suggestions': json_results } # logging.info("before json.dump \n{}".format(time.clock() * 1000 % 1000)) self.response.write(json.dumps(json_object, indent=2))
def __init__(self, root, filename, text): self.textEditor = text self.root = root self.StepType = {} self.StepDetail = {} self.StepDelete = {} self.StepDTButton = {} self.StepDTTable = {} self.sImages = {} self.sExamplesTable = None self.AddBtn = None self.tags = None self.scrollFrame = None self.filename = filename self.SubmitBtn = None self.CancelBtn = None self.scenarioname = None self.scenarioType = None self.AllStatements = [] oXml = cXml(self.filename) sKeys = self.read_config("XML", "keys") self.autocompl = Autocompleter() df = self.autocompl.import_xml(filename) self.new_df = self.autocompl.process_data(df) self.model_tf, self.tfidf_matrice = self.autocompl.calc_matrice(self.new_df)
def test_hashing_order(self): """ Facets with identical key/values in different order should still have same hash """ facet_1 = [{ 'type': 'or', 'facets': [{ 'key': 'sector', 'value': 'Technology' }, { 'value': 'Software', 'key': 'industry' }] }] facet_2 = [{ 'type': 'or', 'facets': [{ 'key': 'industry', 'value': 'Software' }, { 'key': 'sector', 'value': 'Technology' }] }] facet_1_hash = Autocompleter.hash_facets(facet_1) facet_2_hash = Autocompleter.hash_facets(facet_2) self.assertEqual(facet_1_hash, facet_2_hash)
def test_hash_identical_values_different_facet_type(self): """ Facets with same key/values but different facet type in different order shouldn't have same hash """ facet_1 = [{ 'type': 'and', 'facets': [{ 'key': 'sector', 'value': 'Technology' }, { 'key': 'industry', 'value': 'Software' }] }] facet_2 = [{ 'type': 'or', 'facets': [{ 'key': 'industry', 'value': 'Software' }, { 'key': 'sector', 'value': 'Technology' }] }] facet_1_hash = Autocompleter.hash_facets(facet_1) facet_2_hash = Autocompleter.hash_facets(facet_2) self.assertNotEqual(facet_1_hash, facet_2_hash)
def test_signal_based_update(self): """ Turning on signals will automatically update objects in the autocompleter """ signal_registry.register(Stock) aapl = Stock(symbol='AAPL', name='Apple', market_cap=50) aapl.save() autocomp = Autocompleter("stock") matches = autocomp.suggest('aapl') self.assertEqual(len(matches), 1) aapl.symbol = 'XYZ' aapl.name = 'XYZ & Co.' aapl.save() matches = autocomp.suggest('aapl') self.assertEqual(len(matches), 0) matches = autocomp.suggest('xyz') self.assertEqual(len(matches), 1) aapl.delete() keys = self.redis.keys('djac.stock*') self.assertEqual(len(keys), 0) signal_registry.unregister(Stock)
def test_hashing_order(self): """ Facets with identical key/values in different order should still have same hash """ facet_1 = [ { 'type': 'or', 'facets': [ {'key': 'sector', 'value': 'Technology'}, {'key': 'industry', 'value': 'Software'} ] } ] facet_2 = [ { 'type': 'or', 'facets': [ {'key': 'industry', 'value': 'Software'}, {'key': 'sector', 'value': 'Technology'} ] } ] facet_1_hash = Autocompleter.hash_facets(facet_1) facet_2_hash = Autocompleter.hash_facets(facet_2) self.assertEqual(facet_1_hash, facet_2_hash)
def test_hash_identical_values_different_facet_type(self): """ Facets with same key/values but different facet type in different order shouldn't have same hash """ facet_1 = [ { 'type': 'and', 'facets': [ {'key': 'sector', 'value': 'Technology'}, {'key': 'industry', 'value': 'Software'} ] } ] facet_2 = [ { 'type': 'or', 'facets': [ {'key': 'industry', 'value': 'Software'}, {'key': 'sector', 'value': 'Technology'} ] } ] facet_1_hash = Autocompleter.hash_facets(facet_1) facet_2_hash = Autocompleter.hash_facets(facet_2) self.assertNotEqual(facet_1_hash, facet_2_hash)
def get(self): # logging.info("request start \n{}".format(time.clock() * 1000 % 1000)) self.response.headers['Content-Type'] = 'text/plain; charset=utf-8' query = self.request.get('q') logging.info(u"Autocomplete search for '{}'".format(query)) autocompleter = Autocompleter() # logging.info("before autocompleter.get_results \n{}".format(time.clock() * 1000 % 1000)) results = autocompleter.get_results(query) # logging.info("after autocompleter.get_results \n{}".format(time.clock() * 1000 % 1000)) json_results = [] for book in results: if book is None: logging.warning(u"Autocompleter result for '{}' returned None." .format(query)) continue assert isinstance(book, BookRecord) suggestion_map = {'author': book.author, 'title': book.title, 'year': book.year, 'count': book.count, 'item_ids': book.key.string_id()} json_results.append(suggestion_map) json_object = { 'query': query, 'version': AutocompleteJson.CURRENT_VERSION, 'status': 'completed', 'suggestions': json_results } # logging.info("before json.dump \n{}".format(time.clock() * 1000 % 1000)) self.response.write(json.dumps(json_object, indent=2))
class MixedFacetProvidersMatchingTestCase(AutocompleterTestCase): fixtures = ['stock_test_data_small.json', 'indicator_test_data_small.json'] def setUp(self): super(MixedFacetProvidersMatchingTestCase, self).setUp() self.autocomp = Autocompleter('facet_stock_no_facet_ind') self.autocomp.store_all() def test_autocompleter_with_facet_and_non_facet_providers(self): """ Autocompleter with facet and non-facet providers works correctly """ registry.set_autocompleter_setting('facet_stock_no_facet_ind', 'MAX_RESULTS', 100) facets = [ { 'type': 'and', 'facets': [{'key': 'sector', 'value': 'Financial Services'}] } ] matches = self.autocomp.suggest('a') facet_matches = self.autocomp.suggest('a', facets=facets) # because we are using the faceted stock provider in the 'facet_stock_no_facet_ind' AC, # we expect using facets will decrease the amount of results when searching. self.assertEqual(len(matches['faceted_stock']), 25) self.assertEqual(len(facet_matches['faceted_stock']), 2) # since the indicator provider does not support facets, # we expect the search results from both a facet and non-facet search to be the same. self.assertEqual(len(matches['ind']), 16) self.assertEqual(len(matches['ind']), len(facet_matches['ind'])) registry.del_autocompleter_setting('facet_stock_no_facet_ind', 'MAX_RESULTS')
def test_rounding_works_correctly(self): """ Rounding works correctly """ self.assertEqual(1, Autocompleter.normalize_rounding(.51)) self.assertEqual(0, Autocompleter.normalize_rounding(.49)) self.assertEqual(-1, Autocompleter.normalize_rounding(-.51)) self.assertEqual(0, Autocompleter.normalize_rounding(-.49))
def test_rounding_half(self): """ Rounding a number that ends in .5 should produce a number with a greater absolute value """ self.assertEqual(1, Autocompleter.normalize_rounding(.5)) self.assertEqual(2, Autocompleter.normalize_rounding(1.5)) self.assertEqual(-1, Autocompleter.normalize_rounding(-.5)) self.assertEqual(-2, Autocompleter.normalize_rounding(-1.5))
def suggest(request, name): if settings.SUGGEST_PARAMETER_NAME in request.GET: term = request.GET[settings.SUGGEST_PARAMETER_NAME] ac = Autocompleter(name) results = ac.suggest(term) json_response = json.dumps(results) return HttpResponse(json_response, content_type='application/json') return HttpResponseServerError("Search parameter not found.")
def test_exact_matches_not_stored_by_default(self): """ Exact matches are not stored by default """ autocomp = Autocompleter("stock") autocomp.store_all() keys = self.redis.keys('djac.test.stock.e.*') self.assertEqual(len(keys), 0) self.assertFalse(self.redis.exists('djac.test.stock.es')) autocomp.remove_all()
def get_autocomplete(): query = request.args.get('query') autocompleter = Autocompleter() results = autocompleter.autocomplete(query) formatted = [] for result in results: document = {"value": result, "data": "AE"} formatted.append(document) return jsonify({"suggestions": formatted})
def test_multiple_facets_hashing_order(self): """ A facet list with multiple facets should have same hash when key/values are identical regardless of order """ facet_1 = [{ 'type': 'or', 'facets': [{ 'value': 'Technology', 'key': 'sector' }, { 'value': 'Software', 'key': 'industry' }] }, { 'type': 'and', 'facets': [{ 'key': 'sector', 'value': 'Energy' }, { 'key': 'industry', 'value': 'Oil & Gas Integrated' }] }] facet_2 = [ { 'type': 'and', 'facets': [{ 'key': 'industry', 'value': 'Oil & Gas Integrated' }, { 'key': 'sector', 'value': 'Energy' }] }, { 'type': 'or', 'facets': [{ 'value': 'Technology', 'key': 'sector' }, { 'key': 'industry', 'value': 'Software' }] }, ] facet_1_hash = Autocompleter.hash_facets(facet_1) facet_2_hash = Autocompleter.hash_facets(facet_2) self.assertEqual(facet_1_hash, facet_2_hash)
class DictProviderMatchingTestCase(AutocompleterTestCase): def setUp(self): super(DictProviderMatchingTestCase, self).setUp() self.autocomp = Autocompleter("metric") self.autocomp.store_all() def tearDown(self): self.autocomp.remove_all() def test_basic_match(self): matches = self.autocomp.suggest('m') self.assertEqual(len(matches), 1)
def test_remove_intermediate_results_exact_suggest(self): """ After exact_suggest call, all intermediate result sets are removed """ setattr(auto_settings, 'MAX_EXACT_MATCH_WORDS', 2) autocomp = Autocompleter('stock') autocomp.store_all() autocomp.exact_suggest('aapl') keys = self.redis.keys('djac.results.*') self.assertEqual(len(keys), 0) setattr(auto_settings, 'MAX_EXACT_MATCH_WORDS', 0)
def test_store_all_facet_data(self): """ Calling store_all stores all facet data """ autocomp = Autocompleter("faceted_stock") autocomp.store_all() facet_set_name = base.FACET_SET_BASE_NAME % ('faceted_stock', 'sector', 'Technology',) set_length = self.redis.zcard(facet_set_name) self.assertEqual(set_length, 12) facet_map_name = base.FACET_MAP_BASE_NAME % ('faceted_stock',) keys = self.redis.hkeys(facet_map_name) self.assertEqual(len(keys), 104)
def test_facet_identity_hash(self): """ Hashing a facet should equal the hash of an earlier call """ facets = [ { 'type': 'or', 'facets': [{'key': 'sector', 'value': 'Technology'}] } ] first_hash = Autocompleter.hash_facets(facets) second_hash = Autocompleter.hash_facets(facets) self.assertEqual(first_hash, second_hash)
def test_store_and_remove_all_basic(self): """ Storing and removing items all at once works for a dictionary obj autocompleter. """ autocomp = Autocompleter("stock") autocomp.store_all() keys = self.redis.hkeys('djac.test.stock') self.assertEqual(len(keys), 104) autocomp.remove_all() keys = self.redis.keys('djac.test.stock*') self.assertEqual(len(keys), 0)
def get(self): result = u"<p>Behold autocomplete stuff:</p>" autocompleter = Autocompleter() query = self.request.get('q', default_value=u"Tolkien") suggestions = autocompleter.get_results(query) result += u"<pre>" for suggestion in suggestions: assert isinstance(suggestion, BookRecord) result += u"{} - {} ({})\n".format(suggestion.author, suggestion.title, suggestion.year) result += u"</pre>" render_html(self, "admin_generic.html", u"Testing BQ", result)
def test_store_and_remove_all_basic(self): """ Storing and removing items all the once works for a single-model autocompleter. """ autocomp = Autocompleter("stock") autocomp.store_all() keys = self.redis.hkeys('djac.stock') self.assertEqual(len(keys), 101) autocomp.remove_all() keys = self.redis.keys('djac.stock*') self.assertEqual(len(keys), 0)
class CalcAutocompleteProviderTestCase(AutocompleterTestCase): fixtures = ['indicator_test_data_small.json'] def setUp(self): super(CalcAutocompleteProviderTestCase, self).setUp() self.autocomp = Autocompleter("metric_aliased") self.autocomp.store_all() def tearDown(self): self.autocomp.remove_all() def test_one_way_alias_list_creation(self): """ Test that oneway alias lists are created properly """ provider = registry._providers_by_ac["metric_aliased"][0] aliases = provider.get_norm_phrase_aliases() self.assertTrue('revenue' in aliases) self.assertFalse('turnover' in aliases) def test_one_way_aliasing(self): """ Aliases in get_one_way_phrase_aliases are not aliased both ways. """ matches = self.autocomp.suggest('revenue') self.assertEqual(len(matches), 1) matches = self.autocomp.suggest('Turnover') self.assertEqual(len(matches), 2) def test_one_way_with_two_way_alias_list_creation(self): """ Two way and one way aliases are both included/treated properly """ provider = registry._providers_by_ac["metric_aliased"][0] aliases = provider.get_norm_phrase_aliases() self.assertTrue('ev' in aliases) self.assertTrue('enterprise value' in aliases) self.assertTrue('revenue' in aliases) self.assertFalse('turnover' in aliases) def test_one_way_with_two_way_aliasing(self): """ Aliases in get_one_way_phrase_aliases are not aliased both ways. """ rev_matches = self.autocomp.suggest('revenue') turn_matches = self.autocomp.suggest('Turnover') self.assertFalse(rev_matches == turn_matches) ev_matches = self.autocomp.suggest('EV') ent_val_matches = self.autocomp.suggest('Enterprise Value') self.assertEqual(ev_matches, ent_val_matches)
def test_facet_match_with_move_exact_matches(self): """ Exact matching still works with facet suggest """ setattr(auto_settings, 'MAX_EXACT_MATCH_WORDS', 10) temp_autocomp = Autocompleter('faceted_stock') temp_autocomp.store_all() facets = [ { 'type': 'or', 'facets': [ {'key': 'sector', 'value': 'Technology'}, {'key': 'industry', 'value': 'Software'} ] } ] matches = temp_autocomp.suggest('Ma', facets=facets) setattr(auto_settings, 'MOVE_EXACT_MATCHES_TO_TOP', True) matches2 = temp_autocomp.suggest('Ma', facets=facets) self.assertNotEqual(matches[0]['search_name'], matches2[0]['search_name']) # Must set the setting back to where it was as it will persist setattr(auto_settings, 'MOVE_EXACT_MATCHES_TO_TOP', False) temp_autocomp.remove_all()
def test_facet_identity_hash(self): """ Hashing a facet should equal the hash of an earlier call """ facets = [{ 'type': 'or', 'facets': [{ 'key': 'sector', 'value': 'Technology' }] }] first_hash = Autocompleter.hash_facets(facets) second_hash = Autocompleter.hash_facets(facets) self.assertEqual(first_hash, second_hash)
class MultiExactMatchTestCase(AutocompleterTestCase): fixtures = ['stock_test_data_small.json', 'indicator_test_data_small.json'] def setUp(self): super(MultiExactMatchTestCase, self).setUp() setattr(auto_settings, 'MAX_EXACT_MATCH_WORDS', 10) self.autocomp = Autocompleter("mixed") self.autocomp.store_all() def tearDown(self): setattr(auto_settings, 'MAX_EXACT_MATCH_WORDS', 0) self.autocomp.remove_all() def test_exact_suggest(self): """ Exact matching works in multi-provider autocompleters """ matches = self.autocomp.exact_suggest('ma') self.assertEqual(len(matches['stock']), 1) matches = self.autocomp.exact_suggest('US Unemployment Rate') self.assertEqual(len(matches['ind']), 1) def test_move_exact_matches_to_top(self): """ MOVE_EXACT_MATCHES_TO_TOP works in multi-provider autocompleters """ matches = self.autocomp.suggest('Ma') setattr(auto_settings, 'MOVE_EXACT_MATCHES_TO_TOP', True) matches2 = self.autocomp.suggest('Ma') self.assertNotEqual(matches['stock'][0]['search_name'], matches2['stock'][0]['search_name']) setattr(auto_settings, 'MOVE_EXACT_MATCHES_TO_TOP', False)
class IndicatorAliasedMatchTestCase(AutocompleterTestCase): fixtures = ['indicator_test_data_small.json'] def setUp(self): self.autocomp = Autocompleter("indicator_aliased") self.autocomp.store_all() super(IndicatorAliasedMatchTestCase, self).setUp() def tearDown(self): self.autocomp.remove_all() def test_aliasing(self): """ Various permutations of aliased matching work """ matches = self.autocomp.suggest('us consumer price index') self.assertNotEqual(len(matches), 0) matches = self.autocomp.suggest('united states consumer price index') self.assertNotEqual(len(matches), 0) matches = self.autocomp.suggest('us cpi') self.assertNotEqual(len(matches), 0) matches = self.autocomp.suggest('united states consumer price index') self.assertNotEqual(len(matches), 0)
def handle(self, *args, **options): # Configure loggingin level = { 0: logging.WARN, 1: logging.INFO, 2: logging.DEBUG }[options.get('verbosity', 0)] logging.basicConfig(level=level, format="%(name)s: %(levelname)s: %(message)s") self.log = logging.getLogger('commands.autocompleter_init') autocomp = Autocompleter(options["name"]) if options['remove']: self.log.info("Removing all objects for autocompleter: %s" % (options['name'])) autocomp.remove_all() if options['store']: delete_old = options['delete_old'] self.log.info("Storing all objects for autocompleter: %s" % (options['name'])) autocomp.store_all(delete_old=delete_old) if options['clear_cache']: self.log.info("Clearing cache for autocompleter: %s" % (options['name'])) autocomp.clear_cache()
def test_exact_matches_stored_when_turned_on(self): """ We store exact matches when MAX_EXACT_MATCH_WORDS is turned on """ setattr(auto_settings, 'MAX_EXACT_MATCH_WORDS', 10) autocomp = Autocompleter("stock") autocomp.store_all() keys = self.redis.keys('djac.test.stock.e.*') self.assertNotEqual(len(keys), 0) self.assertTrue(self.redis.exists('djac.test.stock.es')) autocomp.remove_all() # Must set the setting back to where it was as it will persist setattr(auto_settings, 'MAX_EXACT_MATCH_WORDS', 0)
def get(self, request, name): if settings.SUGGEST_PARAMETER_NAME in request.GET: term = request.GET[settings.SUGGEST_PARAMETER_NAME] ac = Autocompleter(name) if settings.FACET_PARAMETER_NAME in request.GET: facets = request.GET[settings.FACET_PARAMETER_NAME] facets = json.loads(facets) if not self.validate_facets(facets): return HttpResponseBadRequest('Malformed facet parameter.') results = ac.suggest(term, facets=facets) else: results = ac.suggest(term) json_response = json.dumps(results) return HttpResponse(json_response, content_type='application/json') return HttpResponseServerError('Search parameter not found.')
def save_consolidated_autocomplete_data(data_slice, offset): consolidated_records = _ConsolidationHashMap.query().fetch(1000) consolidated_books = {} for rec in consolidated_records: consolidated_books.update(rec.hashmap) logging.info("Running save subprocess for {} records, offset={}. " "consolidated_books={}".format(len(data_slice), offset, len(consolidated_books))) autocompleter = Autocompleter() docs = [] records = [] for book_hash in consolidated_books: book = consolidated_books[book_hash] # assert isinstance(book, tuple) assert len(book) == 3 index = book[0] - offset if not (0 <= index < len(data_slice)): continue # not found in slice - a sibling process will take care of this row = data_slice[index] year = None try: year = int(unicode(row[3])) except: pass doc, record = Autocompleter.create_instances_to_be_saved( book[1], # item_ids row[1], # author row[2], # title year, # year book[2] # count ) docs.append(doc) if len(docs) >= 200: autocompleter.index.put(docs) logging.info("{} docs were put into index".format(len(docs))) docs = [] records.append(record) if len(records) >= 200: ndb.put_multi(records) logging.info("{} records were put into storage".format( len(records))) records = [] autocompleter.index.put(docs) logging.info("{} docs were put into index".format(len(docs))) ndb.put_multi(records) logging.info("{} records were put into storage".format(len(records)))
class StockExactMatchTestCase(AutocompleterTestCase): fixtures = ['stock_test_data_small.json'] def setUp(self): super(StockExactMatchTestCase, self).setUp() setattr(auto_settings, 'MAX_EXACT_MATCH_WORDS', 10) self.autocomp = Autocompleter("stock") self.autocomp.store_all() def tearDown(self): setattr(auto_settings, 'MAX_EXACT_MATCH_WORDS', 0) self.autocomp.remove_all() def test_exact_suggest(self): """ Exact matching works """ matches_symbol = self.autocomp.exact_suggest('ma') self.assertEqual(len(matches_symbol), 1) def test_move_exact_matches_to_top_setting(self): """ MOVE_EXACT_MATCHES_TO_TOP works """ matches = self.autocomp.suggest('Ma') setattr(auto_settings, 'MOVE_EXACT_MATCHES_TO_TOP', True) matches2 = self.autocomp.suggest('Ma') self.assertNotEqual(matches[0]['search_name'], matches2[0]['search_name']) # Must set the setting back to where it was as it will persist setattr(auto_settings, 'MOVE_EXACT_MATCHES_TO_TOP', False) def test_exact_caching(self): """ Exact caching works """ matches = self.autocomp.exact_suggest('aapl') setattr(auto_settings, 'CACHE_TIMEOUT', 3600) for i in range(0, 10): matches2 = self.autocomp.exact_suggest('aapl') self.assertEqual(len(matches), len(matches2)) # Must set the setting back to where it was as it will persist setattr(auto_settings, 'CACHE_TIMEOUT', 0)
def test_provider_specific_max_exact_match_words_setting(self): """ We can store exact matches for 1 individual provider, and not others """ setattr(auto_settings, 'MAX_EXACT_MATCH_WORDS', 10) registry.set_provider_setting(IndicatorAutocompleteProvider, 'MAX_EXACT_MATCH_WORDS', 0) autocomp = Autocompleter("mixed") autocomp.store_all() keys = self.redis.keys('djac.test.stock.e.*') self.assertNotEqual(len(keys), 0) self.assertTrue(self.redis.exists('djac.test.stock.es')) keys = self.redis.keys('djac.test.ind.e.*') self.assertEqual(len(keys), 0) self.assertFalse(self.redis.exists('djac.test.ind.es')) autocomp.remove_all() registry.del_provider_setting(IndicatorAutocompleteProvider, 'MAX_EXACT_MATCH_WORDS') setattr(auto_settings, 'MAX_EXACT_MATCH_WORDS', 0)
def test_store_and_remove_all_multi(self): """ Storing and removing items all the once works for a multi-model autocompleter. """ autocomp = Autocompleter("mixed") autocomp.store_all() keys = self.redis.hkeys('djac.stock') self.assertEqual(len(keys), 101) keys = self.redis.hkeys('djac.ind') self.assertEqual(len(keys), 100) autocomp.remove_all() keys = self.redis.keys('djac.stock*') self.assertEqual(len(keys), 0) keys = self.redis.keys('djac.ind*') self.assertEqual(len(keys), 0) keys = self.redis.keys('djac.mixed*') self.assertEqual(len(keys), 0)
def test_hashing_facet_type(self): """ Facet list with same sub facets but different type should not have equal hashes """ and_facet = [ { 'type': 'and', 'facets': [{'key': 'sector', 'value': 'Technology'}] } ] or_facet = [ { 'type': 'or', 'facets': [{'key': 'sector', 'value': 'Technology'}] } ] and_hash = Autocompleter.hash_facets(and_facet) or_hash = Autocompleter.hash_facets(or_facet) self.assertNotEqual(and_hash, or_hash)
def test_multiple_facets_hashing_order(self): """ A facet list with multiple facets should have same hash when key/values are identical regardless of order """ facet_1 = [ { 'type': 'or', 'facets': [ {'key': 'sector', 'value': 'Technology'}, {'key': 'industry', 'value': 'Software'} ] }, { 'type': 'and', 'facets': [ {'key': 'sector', 'value': 'Energy'}, {'key': 'industry', 'value': 'Oil & Gas Integrated'} ] } ] facet_2 = [ { 'type': 'and', 'facets': [ {'key': 'industry', 'value': 'Oil & Gas Integrated'}, {'key': 'sector', 'value': 'Energy'} ] }, { 'type': 'or', 'facets': [ {'key': 'sector', 'value': 'Technology'}, {'key': 'industry', 'value': 'Software'} ] }, ] facet_1_hash = Autocompleter.hash_facets(facet_1) facet_2_hash = Autocompleter.hash_facets(facet_2) self.assertEqual(facet_1_hash, facet_2_hash)
def decompress(self, value): """ Decompress the field's DB value to both widgets <input> fields. returns [`display_name_field`, `database_field`] """ if not value: # if DB field is empty, return blank values. return [None, None] provider = self._get_provider() object_data = Autocompleter(self.autocompleter_name).get_provider_result_from_id( provider_name=provider.provider_name, object_id=self._get_object_id(value, provider) ) if not object_data: raise forms.ValidationError('Unable to retrieve data for "{}"'.format(value)) # show the `display_name_field` value in the search field. name = object_data.get(self.display_name_field) return [name, value]
def decompress(self, value): """ Decompress the field's DB value to both widgets <input> fields. returns [`display_name_field`, `database_field`] """ if not value: # if DB field is empty, return blank values. return [None, None] provider = self._get_provider() object_data = Autocompleter( self.autocompleter_name).get_provider_result_from_id( provider_name=provider.provider_name, object_id=self._get_object_id(value, provider)) if not object_data: raise forms.ValidationError( 'Unable to retrieve data for "{}"'.format(value)) # show the `display_name_field` value in the search field. name = object_data.get(self.display_name_field) return [name, value]
def test_hashing_facet_type(self): """ Facet list with same sub facets but different type should not have equal hashes """ and_facet = [{ 'type': 'and', 'facets': [{ 'key': 'sector', 'value': 'Technology' }] }] or_facet = [{ 'type': 'or', 'facets': [{ 'key': 'sector', 'value': 'Technology' }] }] and_hash = Autocompleter.hash_facets(and_facet) or_hash = Autocompleter.hash_facets(or_facet) self.assertNotEqual(and_hash, or_hash)
def test_facet_mismatch_with_move_exact_matches(self): """ Exact matching shouldn't move an object that doesn't have a matching facet value """ # This test case depends on very specific data, which is why this test # issues multiple asserts to check our assumptions setattr(auto_settings, 'MAX_EXACT_MATCH_WORDS', 10) temp_autocomp = Autocompleter('faceted_stock') temp_autocomp.store_all() facets = [ { 'type': 'and', 'facets': [ {'key': 'sector', 'value': 'Healthcare'}, {'key': 'industry', 'value': 'Healthcare Plans'}, ] } ] # When gathering suggestions for 'Un', based on the stock_data_small.json fixture, # the only match should be UnitedHealth Group Inc. when using the Healthcare sector facet matches = temp_autocomp.suggest('Un', facets=facets) self.assertEqual(len(matches), 1) self.assertEqual(matches[0]['search_name'], "UNH") # When MOVE_EXACT_MATCHES_TO_TOP is set to True and not using facets, # we are expecting Unilever to be moved to the top. setattr(auto_settings, 'MOVE_EXACT_MATCHES_TO_TOP', True) matches = temp_autocomp.suggest('Un') self.assertEqual(matches[0]['search_name'], "UN") # When MOVE_EXACT_MATCHES_TO_TOP is set to True and we are using the # Healthcare sector facet, we are expecting to see UnitedHealth group # since Unilever belongs to the Consumer Defensive sector matches = temp_autocomp.suggest('Un', facets=facets) self.assertEqual(matches[0]['search_name'], "UNH") # Must set the setting back to where it was as it will persist setattr(auto_settings, 'MOVE_EXACT_MATCHES_TO_TOP', False) temp_autocomp.remove_all()
def test_remove_intermediate_results_suggest(self): """ After suggest call, all intermediate result sets are removed """ autocomp = Autocompleter('stock') autocomp.store_all() autocomp.suggest('aapl') keys = self.redis.keys('djac.results.*') self.assertEqual(len(keys), 0)
def test_orphan_removal(self): """ test orphan removal """ signal_registry.register(Indicator) autocomp = Autocompleter("indicator") autocomp.store_all() unemployment = Indicator.objects.get(internal_name='unemployment_rate') unemployment.name = 'free parking' unemployment.save() self.assertTrue(autocomp.suggest('free parking')[0]['id'] == 1) self.assertTrue(len(autocomp.suggest('US Unemployment Rate')) == 0) autocomp.remove_all() signal_registry.unregister(Indicator)
def test_store_and_remove_all_basic(self): """ Storing and removing items all at once works for a dictionary obj autocompleter. """ autocomp = Autocompleter("stock") autocomp.store_all() keys = self.redis.hkeys('djac.test.stock') self.assertEqual(len(keys), 101) autocomp.remove_all() keys = self.redis.keys('djac.test.stock*') self.assertEqual(len(keys), 0)
def test_dict_store_and_remove_all_basic(self): """ Storing and removing items all at once works for a single-model autocompleter. """ autocomp = Autocompleter("metric") autocomp.store_all() keys = self.redis.hkeys('djac.test.metric') self.assertEqual(len(keys), 8) autocomp.remove_all() keys = self.redis.keys('djac.test.metric') self.assertEqual(len(keys), 0)