def do_test(self): fulltext = Fulltext() fulltext.storage = self.storage sindex = fulltext.get_source_index() self.assertIsNotNone(sindex) tindex = fulltext.get_target_index('cs') self.assertIsNotNone(tindex) writer = sindex.writer() writer.update_document( pk=1, source="source", context="context", location="location", ) writer.commit() writer = tindex.writer() writer.update_document( pk=1, target="target", comment="comment" ) writer.commit() for item in ('source', 'context', 'location', 'target'): self.assertEqual( fulltext.search(item, ['cs'], {item: True}), set([1]) )
def handle(self, *args, **options): fulltext = Fulltext() # Optimize index if options['optimize']: self.optimize_index(fulltext) return # Optionally rebuild indices from scratch if options['clean']: fulltext.cleanup() # Open writer source_writer = fulltext.get_source_index().writer() target_writers = {} try: # Process all units for unit in self.iterate_units(**options): lang = unit.translation.language.code # Lazy open writer if lang not in target_writers: target_writers[lang] = fulltext.get_target_index( lang).writer() # Update target index if unit.translation: fulltext.update_target_unit_index(target_writers[lang], unit) # Update source index fulltext.update_source_unit_index(source_writer, unit) finally: # Close all writers source_writer.commit() for code in target_writers: target_writers[code].commit()
def optimize_fulltext(): fulltext = Fulltext() index = fulltext.get_source_index() index.optimize() languages = Language.objects.have_translation() for lang in languages: index = fulltext.get_target_index(lang.code) index.optimize()
def optimize_fulltext(): SEARCH_LOGGER.info("starting optimizing source index") fulltext = Fulltext() index = fulltext.get_source_index() index.optimize() SEARCH_LOGGER.info("completed optimizing source index") languages = Language.objects.have_translation() for lang in languages: SEARCH_LOGGER.info("starting optimizing %s index", lang.code) index = fulltext.get_target_index(lang.code) index.optimize() SEARCH_LOGGER.info("completed optimizing %s index", lang.code)
def test_cleanup(self): orig_fake = Fulltext.FAKE Fulltext.FAKE = False fulltext = Fulltext() try: self.create_component() index = fulltext.get_source_index() self.assertEqual(len(list(index.reader().all_stored_fields())), 16) # Remove all translations Translation.objects.all().delete() call_command('cleanuptrans') self.assertEqual(len(list(index.reader().all_stored_fields())), 0) finally: Fulltext.FAKE = orig_fake
def cleanup_fulltext(): """Remove stale units from fulltext""" fulltext = Fulltext() languages = list(Language.objects.values_list('code', flat=True)) + [None] # We operate only on target indexes as they will have all IDs anyway for lang in languages: if lang is None: index = fulltext.get_source_index() else: index = fulltext.get_target_index(lang) try: fields = index.reader().all_stored_fields() except EmptyIndexError: continue for item in fields: if Unit.objects.filter(pk=item['pk']).exists(): continue fulltext.clean_search_unit(item['pk'], lang)
def test_cleanup(self): orig_fake = Fulltext.FAKE Fulltext.FAKE = False fulltext = Fulltext() try: component = self.create_component() index = fulltext.get_source_index() self.assertEqual(len(list(index.reader().all_stored_fields())), 16) # Create dangling suggestion Suggestion.objects.create( project=component.project, content_hash=1, language=component.translation_set.all()[0].language, ) # Remove all translations Translation.objects.all().delete() call_command('cleanuptrans') self.assertEqual(Suggestion.objects.count(), 0) self.assertEqual(len(list(index.reader().all_stored_fields())), 0) finally: Fulltext.FAKE = orig_fake
def test_cleanup(self): orig_fake = Fulltext.FAKE Fulltext.FAKE = False fulltext = Fulltext() try: component = self.create_component() index = fulltext.get_source_index() self.assertEqual(len(list(index.reader().all_stored_fields())), 12) # Create dangling suggestion Suggestion.objects.create( project=component.project, content_hash=1, language=component.translation_set.all()[0].language, ) # Remove all translations Translation.objects.all().delete() call_command('cleanuptrans') self.assertEqual(Suggestion.objects.count(), 0) self.assertEqual(Source.objects.count(), 0) self.assertEqual(len(list(index.reader().all_stored_fields())), 0) finally: Fulltext.FAKE = orig_fake
def handle(self, *args, **options): fulltext = Fulltext() # Optimize index if options['optimize']: self.optimize_index(fulltext) return # Optionally rebuild indices from scratch if options['clean']: fulltext.cleanup() # Open writer source_writer = fulltext.get_source_index().writer() target_writers = {} try: # Process all units for unit in self.iterate_units(**options): lang = unit.translation.language.code # Lazy open writer if lang not in target_writers: target_writers[lang] = fulltext.get_target_index( lang ).writer() # Update target index if unit.translation: fulltext.update_target_unit_index( target_writers[lang], unit ) # Update source index fulltext.update_source_unit_index(source_writer, unit) finally: # Close all writers source_writer.commit() for code in target_writers: target_writers[code].commit()