def refresh(self, timesleep=0): index = get_index() # Any time we're doing a refresh, we're making sure that the # index is ready to be queried. Given that, it's almost # always the case that we want to run all the generated tasks, # then refresh. # TODO: uncomment this when we have live indexing. # generate_tasks() get_indexing_es().refresh(index, timesleep=timesleep)
def setup_indexes(self, empty=False, wait=True): """(Re-)create ES indexes.""" from fjord.search.index import es_reindex_cmd if empty: # Removes the index and creates a new one with nothing in # it (by abusing the percent argument). es_reindex_cmd(percent=0) else: # Removes the index, creates a new one, and indexes # existing data into it. es_reindex_cmd() self.refresh() if wait: get_indexing_es().health(wait_for_status='yellow')
def setUpClass(cls): super(ElasticTestCase, cls).setUpClass() if not getattr(settings, 'ES_URLS', None): cls.skipme = True return # try to connect to ES and if it fails, skip ElasticTestCases. try: get_indexing_es().health() except (Timeout, ConnectionError): cls.skipme = True return cls._old_es_index_prefix = settings.ES_INDEX_PREFIX settings.ES_INDEX_PREFIX = settings.ES_INDEX_PREFIX + 'test'
def setUpClass(cls): super(ElasticTestCase, cls).setUpClass() if not getattr(settings, 'ES_HOSTS'): cls.skipme = True return # try to connect to ES and if it fails, skip ElasticTestCases. try: get_indexing_es().collect_info() except pyes.urllib3.MaxRetryError: cls.skipme = True return cls._old_es_index_prefix = settings.ES_INDEX_PREFIX settings.ES_INDEX_PREFIX = settings.ES_INDEX_PREFIX + 'test'
def teardown_indexes(self): es = get_indexing_es() try: es.delete_index(get_index()) except ElasticHttpNotFoundError: # If we get this error, it means the index didn't exist # so there's nothing to delete. pass
def teardown_indexes(self): es = get_indexing_es() es.delete_index_if_exists(get_index())