def reindex(doc_type=None): '''Reindex models''' doc_types_names = [m.__name__.lower() for m in adapter_catalog.keys()] if doc_type and not doc_type.lower() in doc_types_names: log.error('Unknown type %s', doc_type) index_name = default_index_name() log.info('Initiliazing index "{0}"'.format(index_name)) es.initialize(index_name) for adapter in iter_adapters(): if adapter.doc_type().lower() == doc_type.lower(): index_model(index_name, adapter) else: log.info('Copying {0} objects to the new index'.format( adapter.model.__name__)) # Need upgrade to Elasticsearch-py 5.0.0 # es.reindex({ # 'source': {'index': es.index_name, 'type': adapter.doc_type()}, # 'dest': {'index': index_name} # }) # http://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.reindex # This method (introduced in Elasticsearch 2.3 but only in Elasticsearch-py 5.0.0) # triggers a server-side documents copy. # Instead we use this helper for meant for backward compatibility # but with poor performance as copy is client-side (scan+bulk) es_reindex(es.client, es.index_name, index_name, scan_kwargs={'doc_type': adapter.doc_type()}) set_alias(index_name)
def init(): '''Initialize or update data and indexes''' print 'Initialize or update ElasticSearch mappings' es.initialize() print 'TODO: Apply DB migrations if needed' print 'TODO: Feed initial data if needed' print 'TODO: create an user if needed'
def autoindex(app, clean_db): app.config['AUTO_INDEX'] = True _clean_es() es.initialize() es.cluster.health(wait_for_status='yellow', request_timeout=10) yield AutoIndex() _clean_es()
def autoindex(app, clean_db): app.config['AUTO_INDEX'] = True es.initialize() es.cluster.health(wait_for_status='yellow', request_timeout=10) yield AutoIndex() if es.indices.exists(index=es.index_name): es.indices.delete(index=es.index_name)
def run(self): print('Deleting index {0}'.format(es.index_name)) if es.indices.exists(es.index_name): es.indices.delete(index=es.index_name) es.initialize() for model, adapter in adapter_catalog.items(): print 'Reindexing {0} objects'.format(model.__name__) for obj in model.objects: es.index(index=es.index_name, doc_type=adapter.doc_type(), id=obj.id, body=adapter.serialize(obj)) es.indices.refresh(index=es.index_name)
def init(): '''Initialize or update data and indexes''' log.info('Apply DB migrations if needed') migrate(record=True) log.info('Initialize or update ElasticSearch mappings') es.initialize() log.info('%s: Feed initial data if needed', yellow('TODO')) log.info('%s: Create an administrator', yellow('TODO'))
def index(models=None, name=None, force=False, keep=False, timeout=None): ''' Initialize or rebuild the search index Models to reindex can optionally be specified as arguments. If not, all models are reindexed. ''' index_name = name or default_index_name() timeout = timeout or current_app.config['ELASTICSEARCH_INDEX_TIMEOUT'] doc_types_names = [m.__name__.lower() for m in adapter_catalog.keys()] models = [model.lower().rstrip('s') for model in (models or [])] for model in models: if model not in doc_types_names: log.error('Unknown model %s', model) sys.exit(-1) log.info('Initiliazing index "%s"', index_name) if es.indices.exists(index_name): if IS_TTY and not force: msg = 'Index {0} will be deleted, are you sure?' click.confirm(msg.format(index_name), abort=True) es.indices.delete(index_name, request_timeout=timeout) es.initialize(index_name) with handle_error(index_name, keep, timeout): disable_refresh(index_name, timeout) for adapter in iter_adapters(): if not models or adapter.doc_type().lower() in models: index_model(index_name, adapter, timeout) else: log.info('Copying %s objects to the new index', adapter.model.__name__) # Need upgrade to Elasticsearch-py 5.0.0 to write: # es.reindex({ # 'source': {'index': es.index_name, 'type': adapter.doc_type()}, # 'dest': {'index': index_name} # }) # # http://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.reindex # This method (introduced in Elasticsearch 2.3 but only in Elasticsearch-py 5.0.0) # triggers a server-side documents copy. # Instead we use this helper for meant for backward compatibility # but with poor performance as copy is client-side (scan+bulk) es_reindex(es.client, es.index_name, index_name, scan_kwargs={'doc_type': adapter.doc_type()}, bulk_kwargs={'request_timeout': timeout}) enable_refresh(index_name, timeout) # At this step, we don't want error handler to delete the index # in case of error set_alias(index_name, delete=not keep, timeout=timeout)
def index(models=None, name=None, force=False, keep=False): '''Initialize or rebuild the search index''' index_name = name or default_index_name() doc_types_names = [m.__name__.lower() for m in adapter_catalog.keys()] models = [model.lower().rstrip('s') for model in (models or [])] for model in models: if model not in doc_types_names: log.error('Unknown model %s', model) sys.exit(-1) log.info('Initiliazing index "{0}"'.format(index_name)) if es.indices.exists(index_name): if IS_INTERACTIVE and not force: msg = 'Index {0} will be deleted, are you sure?' delete = prompt_bool(msg.format(index_name)) else: delete = True if delete: es.indices.delete(index_name) else: sys.exit(-1) es.initialize(index_name) with handle_error(index_name, keep): disable_refresh(index_name) for adapter in iter_adapters(): if not models or adapter.doc_type().lower() in models: index_model(index_name, adapter) else: log.info('Copying {0} objects to the new index'.format( adapter.model.__name__)) # Need upgrade to Elasticsearch-py 5.0.0 to write: # es.reindex({ # 'source': {'index': es.index_name, 'type': adapter.doc_type()}, # 'dest': {'index': index_name} # }) # # http://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.reindex # This method (introduced in Elasticsearch 2.3 but only in Elasticsearch-py 5.0.0) # triggers a server-side documents copy. # Instead we use this helper for meant for backward compatibility # but with poor performance as copy is client-side (scan+bulk) es_reindex(es.client, es.index_name, index_name, scan_kwargs={'doc_type': adapter.doc_type()}) enable_refresh(index_name) # At this step, we don't want error handler to delete the index # in case of error set_alias(index_name, delete=not keep)
def index(models=None, name=None, force=False, keep=False): ''' Initialize or rebuild the search index Models to reindex can optionally be specified as arguments. If not, all models are reindexed. ''' index_name = name or default_index_name() doc_types_names = [m.__name__.lower() for m in adapter_catalog.keys()] models = [model.lower().rstrip('s') for model in (models or [])] for model in models: if model not in doc_types_names: log.error('Unknown model %s', model) sys.exit(-1) log.info('Initiliazing index "{0}"'.format(index_name)) if es.indices.exists(index_name): if IS_TTY and not force: msg = 'Index {0} will be deleted, are you sure?' click.confirm(msg.format(index_name), abort=True) es.indices.delete(index_name) es.initialize(index_name) with handle_error(index_name, keep): disable_refresh(index_name) for adapter in iter_adapters(): if not models or adapter.doc_type().lower() in models: index_model(index_name, adapter) else: log.info('Copying {0} objects to the new index'.format( adapter.model.__name__)) # Need upgrade to Elasticsearch-py 5.0.0 to write: # es.reindex({ # 'source': {'index': es.index_name, 'type': adapter.doc_type()}, # 'dest': {'index': index_name} # }) # # http://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.reindex # This method (introduced in Elasticsearch 2.3 but only in Elasticsearch-py 5.0.0) # triggers a server-side documents copy. # Instead we use this helper for meant for backward compatibility # but with poor performance as copy is client-side (scan+bulk) es_reindex(es.client, es.index_name, index_name, scan_kwargs={ 'doc_type': adapter.doc_type() }) enable_refresh(index_name) # At this step, we don't want error handler to delete the index # in case of error set_alias(index_name, delete=not keep)
def init(): '''Initialize or update data and indexes''' log.info('Initialize or update ElasticSearch mappings') es.initialize() log.info('Build sample fixture data') generate_fixtures() log.info('Apply DB migrations if needed') migrate(record=True) log.info('%s: Feed initial data if needed', yellow('TODO'))
def autoindex(app, clean_db): app.config['AUTO_INDEX'] = True es.initialize() es.cluster.health(wait_for_status='yellow', request_timeout=10) @contextmanager def cm(): yield es.indices.refresh(index=es.index_name) yield cm if es.indices.exists(index=es.index_name): es.indices.delete(index=es.index_name)
def init(name=None, delete=False, force=False): '''Initialize or rebuild the search index''' index_name = name or default_index_name() log.info('Initiliazing index "{0}"'.format(index_name)) if es.indices.exists(index_name): if force or prompt_bool( ('Index {0} will be deleted, are you sure ?'.format(index_name))): es.indices.delete(index_name) else: exit(-1) es.initialize(index_name) for adapter in iter_adapters(): index_model(index_name, adapter) set_alias(index_name, delete=delete)
def init_search(self): self._used_search = True self.app.config['AUTO_INDEX'] = True es.initialize() es.cluster.health(wait_for_status='yellow', request_timeout=10)
def autoindex(self): self._used_search = True self.app.config['AUTO_INDEX'] = True es.initialize() yield es.indices.refresh(index=es.index_name)
def init_search(self): self._used_search = True self.app.config['AUTO_INDEX'] = True es.initialize()