def refresh(self, run_tasks=True): es = get_es() if run_tasks: # Any time we're doing a refresh, we're making sure that # the index is ready to be queried. Given that, it's # almost always the case that we want to run all the # generated tasks, then refresh. generate_tasks() for index in es_utils.all_write_indexes(): es.indices.refresh(index=index) es.cluster.health(wait_for_status='yellow')
def teardown_indexes(self): """Tear down write index""" for index in es_utils.all_write_indexes(): es_utils.delete_index(index)
def search(request): """Render the admin view containing search tools""" if not request.user.has_perm('search.reindex'): raise PermissionDenied error_messages = [] stats = {} if 'reset' in request.POST: try: return handle_reset(request) except ReindexError as e: error_messages.append(u'Error: %s' % e.message) if 'reindex' in request.POST: try: return handle_reindex(request) except ReindexError as e: error_messages.append(u'Error: %s' % e.message) if 'recreate_index' in request.POST: try: return handle_recreate_index(request) except ReindexError as e: error_messages.append(u'Error: %s' % e.message) if 'delete_index' in request.POST: try: return handle_delete(request) except DeleteError as e: error_messages.append(u'Error: %s' % e.message) except ES_EXCEPTIONS as e: error_messages.append('Error: {0}'.format(repr(e))) stats = None write_stats = None es_deets = None indexes = [] outstanding_chunks = None try: # TODO: SUMO has a single ES_URL and that's the ZLB and does # the balancing. If that ever changes and we have multiple # ES_URLs, then this should get fixed. es_deets = requests.get(settings.ES_URLS[0]).json() except requests.exceptions.RequestException: pass stats = {} for index in all_read_indexes(): try: stats[index] = get_doctype_stats(index) except ES_EXCEPTIONS: stats[index] = None write_stats = {} for index in all_write_indexes(): try: write_stats[index] = get_doctype_stats(index) except ES_EXCEPTIONS: write_stats[index] = None try: indexes = get_indexes() indexes.sort(key=lambda m: m[0]) except ES_EXCEPTIONS as e: error_messages.append('Error: {0}'.format(repr(e))) try: client = redis_client('default') outstanding_chunks = int(client.get(OUTSTANDING_INDEX_CHUNKS)) except (RedisError, TypeError): pass recent_records = Record.uncached.order_by('-starttime')[:100] outstanding_records = (Record.uncached.filter(endtime__isnull=True) .order_by('-starttime')) index_groups = set(settings.ES_INDEXES.keys()) index_groups |= set(settings.ES_WRITE_INDEXES.keys()) index_group_data = [[group, read_index(group), write_index(group)] for group in index_groups] return render( request, 'admin/search_maintenance.html', {'title': 'Search', 'es_deets': es_deets, 'doctype_stats': stats, 'doctype_write_stats': write_stats, 'indexes': indexes, 'index_groups': index_groups, 'index_group_data': index_group_data, 'read_indexes': all_read_indexes, 'write_indexes': all_write_indexes, 'error_messages': error_messages, 'recent_records': recent_records, 'outstanding_records': outstanding_records, 'outstanding_chunks': outstanding_chunks, 'now': datetime.now(), 'read_index': read_index, 'write_index': write_index, })
def search(request): """Render the admin view containing search tools""" if not request.user.has_perm('search.reindex'): raise PermissionDenied error_messages = [] stats = {} if 'reset' in request.POST: try: return handle_reset(request) except ReindexError as e: error_messages.append('Error: %s' % e.message) if 'reindex' in request.POST: try: return handle_reindex(request) except ReindexError as e: error_messages.append('Error: %s' % e.message) if 'recreate_index' in request.POST: try: return handle_recreate_index(request) except ReindexError as e: error_messages.append('Error: %s' % e.message) if 'delete_index' in request.POST: try: return handle_delete(request) except DeleteError as e: error_messages.append('Error: %s' % e.message) except ES_EXCEPTIONS as e: error_messages.append('Error: {0}'.format(repr(e))) stats = None write_stats = None es_deets = None indexes = [] try: # TODO: SUMO has a single ES_URL and that's the ZLB and does # the balancing. If that ever changes and we have multiple # ES_URLs, then this should get fixed. es_deets = requests.get(settings.ES_URLS[0]).json() except requests.exceptions.RequestException: pass stats = {} for index in all_read_indexes(): try: stats[index] = get_doctype_stats(index) except ES_EXCEPTIONS: stats[index] = None write_stats = {} for index in all_write_indexes(): try: write_stats[index] = get_doctype_stats(index) except ES_EXCEPTIONS: write_stats[index] = None try: indexes = get_indexes() indexes.sort(key=lambda m: m[0]) except ES_EXCEPTIONS as e: error_messages.append('Error: {0}'.format(repr(e))) recent_records = Record.objects.all()[:100] outstanding_records = Record.objects.outstanding() index_groups = set(settings.ES_INDEXES.keys()) index_groups |= set(settings.ES_WRITE_INDEXES.keys()) index_group_data = [[group, read_index(group), write_index(group)] for group in index_groups] return render( request, 'admin/search_maintenance.html', { 'title': 'Search', 'es_deets': es_deets, 'doctype_stats': stats, 'doctype_write_stats': write_stats, 'indexes': indexes, 'index_groups': index_groups, 'index_group_data': index_group_data, 'read_indexes': all_read_indexes, 'write_indexes': all_write_indexes, 'error_messages': error_messages, 'recent_records': recent_records, 'outstanding_records': outstanding_records, 'now': datetime.now(), 'read_index': read_index, 'write_index': write_index, })
def search(request): """Render the admin view containing search tools""" if not request.user.has_perm("search.reindex"): raise PermissionDenied error_messages = [] stats = {} if "reset" in request.POST: try: return handle_reset(request) except ReindexError as e: error_messages.append(u"Error: %s" % e.message) if "reindex" in request.POST: try: return handle_reindex(request) except ReindexError as e: error_messages.append(u"Error: %s" % e.message) if "recreate_index" in request.POST: try: return handle_recreate_index(request) except ReindexError as e: error_messages.append(u"Error: %s" % e.message) if "delete_index" in request.POST: try: return handle_delete(request) except DeleteError as e: error_messages.append(u"Error: %s" % e.message) except ES_EXCEPTIONS as e: error_messages.append("Error: {0}".format(repr(e))) stats = None write_stats = None es_deets = None indexes = [] outstanding_chunks = None try: # TODO: SUMO has a single ES_URL and that's the ZLB and does # the balancing. If that ever changes and we have multiple # ES_URLs, then this should get fixed. es_deets = requests.get(settings.ES_URLS[0]).json() except requests.exceptions.RequestException: pass stats = {} for index in all_read_indexes(): try: stats[index] = get_doctype_stats(index) except ES_EXCEPTIONS: stats[index] = None write_stats = {} for index in all_write_indexes(): try: write_stats[index] = get_doctype_stats(index) except ES_EXCEPTIONS: write_stats[index] = None try: indexes = get_indexes() indexes.sort(key=lambda m: m[0]) except ES_EXCEPTIONS as e: error_messages.append("Error: {0}".format(repr(e))) try: client = redis_client("default") outstanding_chunks = int(client.get(OUTSTANDING_INDEX_CHUNKS)) except (RedisError, TypeError): pass recent_records = Record.objects.order_by("-starttime")[:100] outstanding_records = Record.objects.filter(endtime__isnull=True).order_by("-starttime") index_groups = set(settings.ES_INDEXES.keys()) index_groups |= set(settings.ES_WRITE_INDEXES.keys()) index_group_data = [[group, read_index(group), write_index(group)] for group in index_groups] return render( request, "admin/search_maintenance.html", { "title": "Search", "es_deets": es_deets, "doctype_stats": stats, "doctype_write_stats": write_stats, "indexes": indexes, "index_groups": index_groups, "index_group_data": index_group_data, "read_indexes": all_read_indexes, "write_indexes": all_write_indexes, "error_messages": error_messages, "recent_records": recent_records, "outstanding_records": outstanding_records, "outstanding_chunks": outstanding_chunks, "now": datetime.now(), "read_index": read_index, "write_index": write_index, }, )