Exemple #1
0
    def test_mappings(self):
        # This is more of a linter than a test. If it passes, then
        # everything is fine. If it fails, then it means things are
        # not fine. Not fine? Yeah, it means that there are two fields
        # with the same name, but different types in the
        # mappings that share an index. That doesn't work in ES.

        # Doing it as a test seemed like a good idea since
        # it's likely to catch epic problems, but isn't in the runtime
        # code.

        # Verify mappings that share the same index don't conflict
        for index in es_utils.all_read_indexes():
            merged_mapping = {}

            for cls_name, mapping in list(es_utils.get_mappings(index).items()):
                mapping = mapping['properties']

                for key, val in list(mapping.items()):
                    if key not in merged_mapping:
                        merged_mapping[key] = (val, [cls_name])
                        continue

                    # FIXME - We're comparing two dicts here. This might
                    # not work for non-trivial dicts.
                    if merged_mapping[key][0] != val:
                        raise es_utils.MappingMergeError(
                            '%s key different for %s and %s' %
                            (key, cls_name, merged_mapping[key][1]))

                    merged_mapping[key][1].append(cls_name)
Exemple #2
0
 def test_delete(self, _out):
     # Note: The read indexes and the write indexes are the same in
     # the tests, so we only have to do this once.
     indexes = es_utils.all_read_indexes()
     indexes.append('cupcakerainbow_index')
     for index in indexes:
         call_command('esdelete', index, noinput=True)
Exemple #3
0
    def test_mappings(self):
        # This is more of a linter than a test. If it passes, then
        # everything is fine. If it fails, then it means things are
        # not fine. Not fine? Yeah, it means that there are two fields
        # with the same name, but different types in the
        # mappings that share an index. That doesn't work in ES.

        # Doing it as a test seemed like a good idea since
        # it's likely to catch epic problems, but isn't in the runtime
        # code.

        # Verify mappings that share the same index don't conflict
        for index in es_utils.all_read_indexes():
            merged_mapping = {}

            for cls_name, mapping in es_utils.get_mappings(index).items():
                mapping = mapping['properties']

                for key, val in mapping.items():
                    if key not in merged_mapping:
                        merged_mapping[key] = (val, [cls_name])
                        continue

                    # FIXME - We're comparing two dicts here. This might
                    # not work for non-trivial dicts.
                    if merged_mapping[key][0] != val:
                        raise es_utils.MappingMergeError(
                            '%s key different for %s and %s' %
                            (key, cls_name, merged_mapping[key][1]))

                    merged_mapping[key][1].append(cls_name)
Exemple #4
0
 def test_delete(self, _out):
     # Note: The read indexes and the write indexes are the same in
     # the tests, so we only have to do this once.
     indexes = es_utils.all_read_indexes()
     indexes.append('cupcakerainbow_index')
     for index in indexes:
         call_command('esdelete', index, noinput=True)
Exemple #5
0
def monitor(request):
    """View for services monitor."""
    status = {}

    # Note: To add a new component to the services monitor, do your
    # testing and then add a name -> list of output tuples map to
    # status.

    # Check memcached.
    memcache_results = []
    try:
        for cache_name, cache_props in settings.CACHES.items():
            result = True
            backend = cache_props['BACKEND']
            location = cache_props['LOCATION']

            # LOCATION can be a string or a list of strings
            if isinstance(location, basestring):
                location = location.split(';')

            if 'memcache' in backend:
                for loc in location:
                    # TODO: this doesn't handle unix: variant
                    ip, port = loc.split(':')
                    result = test_memcached(ip, int(port))
                    memcache_results.append(
                        (INFO, '%s:%s %s' % (ip, port, result)))

        if not memcache_results:
            memcache_results.append((ERROR, 'memcache is not configured.'))

        elif len(memcache_results) < 2:
            memcache_results.append(
                (ERROR, ('You should have at least 2 memcache servers. '
                         'You have %s.' % len(memcache_results))))

        else:
            memcache_results.append((INFO, 'memcached servers look good.'))

    except Exception as exc:
        memcache_results.append(
            (ERROR, 'Exception while looking at memcached: %s' % str(exc)))

    status['memcached'] = memcache_results

    # Check Libraries and versions
    libraries_results = []
    try:
        Image.new('RGB', (16, 16)).save(StringIO.StringIO(), 'JPEG')
        libraries_results.append((INFO, 'PIL+JPEG: Got it!'))
    except Exception as exc:
        libraries_results.append(
            (ERROR,
             'PIL+JPEG: Probably missing: '
             'Failed to create a jpeg image: %s' % exc))

    status['libraries'] = libraries_results

    # Check file paths.
    msg = 'We want read + write.'
    filepaths = (
        (settings.USER_AVATAR_PATH, os.R_OK | os.W_OK, msg),
        (settings.IMAGE_UPLOAD_PATH, os.R_OK | os.W_OK, msg),
        (settings.THUMBNAIL_UPLOAD_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_IMAGE_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_IMAGE_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_VIDEO_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_VIDEO_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg),
        (settings.GROUP_AVATAR_PATH, os.R_OK | os.W_OK, msg),
    )

    filepath_results = []
    for path, perms, notes in filepaths:
        path = os.path.join(settings.MEDIA_ROOT, path)
        path_exists = os.path.isdir(path)
        path_perms = os.access(path, perms)

        if path_exists and path_perms:
            filepath_results.append(
                (INFO, '%s: %s %s %s' % (path, path_exists, path_perms,
                                         notes)))

    status['filepaths'] = filepath_results

    # Check RabbitMQ.
    rabbitmq_results = []
    try:
        rabbit_conn = establish_connection(connect_timeout=2)
        rabbit_conn.connect()
        rabbitmq_results.append(
            (INFO, 'Successfully connected to RabbitMQ.'))

        rabbitmq_results.append(
            (INFO, 'Queue size: %s' % rabbitmq_queue_size()))
    except (socket.error, IOError) as exc:
        rabbitmq_results.append(
            (ERROR, 'Error connecting to RabbitMQ: %s' % str(exc)))

    except Exception as exc:
        rabbitmq_results.append(
            (ERROR, 'Exception while looking at RabbitMQ: %s' % str(exc)))

    status['RabbitMQ'] = rabbitmq_results

    # Check ES.
    es_results = []
    try:
        es_utils.get_doctype_stats(es_utils.all_read_indexes()[0])
        es_results.append(
            (INFO, ('Successfully connected to ElasticSearch and index '
                    'exists.')))

    except es_utils.ES_EXCEPTIONS as exc:
        es_results.append(
            (ERROR, 'ElasticSearch problem: %s' % str(exc)))

    except Exception as exc:
        es_results.append(
            (ERROR, 'Exception while looking at ElasticSearch: %s' % str(exc)))

    status['ElasticSearch'] = es_results

    # Check Celery.
    # start = time.time()
    # pong = celery.task.ping()
    # rabbit_results = r = {'duration': time.time() - start}
    # status_summary['rabbit'] = pong == 'pong' and r['duration'] < 1

    # Check Redis.
    redis_results = []
    if hasattr(settings, 'REDIS_BACKENDS'):
        for backend in settings.REDIS_BACKENDS:
            try:
                redis_client(backend)
                redis_results.append((INFO, '%s: Pass!' % backend))
            except RedisError:
                redis_results.append((ERROR, '%s: Fail!' % backend))
    status['Redis'] = redis_results

    status_code = 200

    status_summary = {}
    for component, output in status.items():
        if ERROR in [item[0] for item in output]:
            status_code = 500
            status_summary[component] = False
        else:
            status_summary[component] = True

    return render(request, 'services/monitor.html', {
        'component_status': status,
        'status_summary': status_summary},
        status=status_code)
Exemple #6
0
def monitor(request):
    """View for services monitor."""
    status = {}

    # Note: To add a new component to the services monitor, do your
    # testing and then add a name -> list of output tuples map to
    # status.

    # Check memcached.
    memcache_results = []
    try:
        for cache_name, cache_props in settings.CACHES.items():
            result = True
            backend = cache_props['BACKEND']
            location = cache_props['LOCATION']

            # LOCATION can be a string or a list of strings
            if isinstance(location, basestring):
                location = location.split(';')

            if 'memcache' in backend:
                for loc in location:
                    # TODO: this doesn't handle unix: variant
                    ip, port = loc.split(':')
                    result = test_memcached(ip, int(port))
                    memcache_results.append(
                        (INFO, '%s:%s %s' % (ip, port, result)))

        if not memcache_results:
            memcache_results.append((ERROR, 'memcache is not configured.'))

        elif len(memcache_results) < 2:
            memcache_results.append(
                (ERROR, ('You should have at least 2 memcache servers. '
                         'You have %s.' % len(memcache_results))))

        else:
            memcache_results.append((INFO, 'memcached servers look good.'))

    except Exception as exc:
        memcache_results.append(
            (ERROR, 'Exception while looking at memcached: %s' % str(exc)))

    status['memcached'] = memcache_results

    # Check Libraries and versions
    libraries_results = []
    try:
        Image.new('RGB', (16, 16)).save(StringIO.StringIO(), 'JPEG')
        libraries_results.append((INFO, 'PIL+JPEG: Got it!'))
    except Exception as exc:
        libraries_results.append((ERROR, 'PIL+JPEG: Probably missing: '
                                  'Failed to create a jpeg image: %s' % exc))

    status['libraries'] = libraries_results

    # Check file paths.
    msg = 'We want read + write.'
    filepaths = (
        (settings.USER_AVATAR_PATH, os.R_OK | os.W_OK, msg),
        (settings.IMAGE_UPLOAD_PATH, os.R_OK | os.W_OK, msg),
        (settings.THUMBNAIL_UPLOAD_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_IMAGE_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_IMAGE_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_VIDEO_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_VIDEO_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg),
        (settings.GROUP_AVATAR_PATH, os.R_OK | os.W_OK, msg),
    )

    filepath_results = []
    for path, perms, notes in filepaths:
        path = os.path.join(settings.MEDIA_ROOT, path)
        path_exists = os.path.isdir(path)
        path_perms = os.access(path, perms)

        if path_exists and path_perms:
            filepath_results.append(
                (INFO,
                 '%s: %s %s %s' % (path, path_exists, path_perms, notes)))

    status['filepaths'] = filepath_results

    # Check RabbitMQ.
    rabbitmq_results = []
    try:
        rabbit_conn = establish_connection(connect_timeout=5)
        rabbit_conn.connect()
        rabbitmq_results.append((INFO, 'Successfully connected to RabbitMQ.'))
    except (socket.error, IOError) as exc:
        rabbitmq_results.append(
            (ERROR, 'Error connecting to RabbitMQ: %s' % str(exc)))

    except Exception as exc:
        rabbitmq_results.append(
            (ERROR, 'Exception while looking at RabbitMQ: %s' % str(exc)))

    status['RabbitMQ'] = rabbitmq_results

    # Check ES.
    es_results = []
    try:
        es_utils.get_doctype_stats(es_utils.all_read_indexes()[0])
        es_results.append(
            (INFO, ('Successfully connected to ElasticSearch and index '
                    'exists.')))

    except es_utils.ES_EXCEPTIONS as exc:
        es_results.append((ERROR, 'ElasticSearch problem: %s' % str(exc)))

    except Exception as exc:
        es_results.append(
            (ERROR, 'Exception while looking at ElasticSearch: %s' % str(exc)))

    status['ElasticSearch'] = es_results

    # Check Celery.
    # start = time.time()
    # pong = celery.task.ping()
    # rabbit_results = r = {'duration': time.time() - start}
    # status_summary['rabbit'] = pong == 'pong' and r['duration'] < 1

    # Check Redis.
    redis_results = []
    if hasattr(settings, 'REDIS_BACKENDS'):
        for backend in settings.REDIS_BACKENDS:
            try:
                redis_client(backend)
                redis_results.append((INFO, '%s: Pass!' % backend))
            except RedisError:
                redis_results.append((ERROR, '%s: Fail!' % backend))
    status['Redis'] = redis_results

    status_code = 200

    status_summary = {}
    for component, output in status.items():
        if ERROR in [item[0] for item in output]:
            status_code = 500
            status_summary[component] = False
        else:
            status_summary[component] = True

    return render(request,
                  'services/monitor.html', {
                      'component_status': status,
                      'status_summary': status_summary
                  },
                  status=status_code)
Exemple #7
0
def search(request):
    """Render the admin view containing search tools"""
    if not request.user.has_perm('search.reindex'):
        raise PermissionDenied

    error_messages = []
    stats = {}

    if 'reset' in request.POST:
        try:
            return handle_reset(request)
        except ReindexError as e:
            error_messages.append(u'Error: %s' % e.message)

    if 'reindex' in request.POST:
        try:
            return handle_reindex(request)
        except ReindexError as e:
            error_messages.append(u'Error: %s' % e.message)

    if 'recreate_index' in request.POST:
        try:
            return handle_recreate_index(request)
        except ReindexError as e:
            error_messages.append(u'Error: %s' % e.message)

    if 'delete_index' in request.POST:
        try:
            return handle_delete(request)
        except DeleteError as e:
            error_messages.append(u'Error: %s' % e.message)
        except ES_EXCEPTIONS as e:
            error_messages.append('Error: {0}'.format(repr(e)))

    stats = None
    write_stats = None
    es_deets = None
    indexes = []
    outstanding_chunks = None

    try:
        # TODO: SUMO has a single ES_URL and that's the ZLB and does
        # the balancing. If that ever changes and we have multiple
        # ES_URLs, then this should get fixed.
        es_deets = requests.get(settings.ES_URLS[0]).json()
    except requests.exceptions.RequestException:
        pass

    stats = {}
    for index in all_read_indexes():
        try:
            stats[index] = get_doctype_stats(index)
        except ES_EXCEPTIONS:
            stats[index] = None

    write_stats = {}
    for index in all_write_indexes():
        try:
            write_stats[index] = get_doctype_stats(index)
        except ES_EXCEPTIONS:
            write_stats[index] = None

    try:
        indexes = get_indexes()
        indexes.sort(key=lambda m: m[0])
    except ES_EXCEPTIONS as e:
        error_messages.append('Error: {0}'.format(repr(e)))

    try:
        client = redis_client('default')
        outstanding_chunks = int(client.get(OUTSTANDING_INDEX_CHUNKS))
    except (RedisError, TypeError):
        pass

    recent_records = Record.uncached.order_by('-starttime')[:100]

    outstanding_records = (Record.uncached.filter(endtime__isnull=True)
                                          .order_by('-starttime'))

    index_groups = set(settings.ES_INDEXES.keys())
    index_groups |= set(settings.ES_WRITE_INDEXES.keys())

    index_group_data = [[group, read_index(group), write_index(group)]
                        for group in index_groups]

    return render(
        request,
        'admin/search_maintenance.html',
        {'title': 'Search',
         'es_deets': es_deets,
         'doctype_stats': stats,
         'doctype_write_stats': write_stats,
         'indexes': indexes,
         'index_groups': index_groups,
         'index_group_data': index_group_data,
         'read_indexes': all_read_indexes,
         'write_indexes': all_write_indexes,
         'error_messages': error_messages,
         'recent_records': recent_records,
         'outstanding_records': outstanding_records,
         'outstanding_chunks': outstanding_chunks,
         'now': datetime.now(),
         'read_index': read_index,
         'write_index': write_index,
         })
Exemple #8
0
def search(request):
    """Render the admin view containing search tools"""
    if not request.user.has_perm('search.reindex'):
        raise PermissionDenied

    error_messages = []
    stats = {}

    if 'reset' in request.POST:
        try:
            return handle_reset(request)
        except ReindexError as e:
            error_messages.append('Error: %s' % e.message)

    if 'reindex' in request.POST:
        try:
            return handle_reindex(request)
        except ReindexError as e:
            error_messages.append('Error: %s' % e.message)

    if 'recreate_index' in request.POST:
        try:
            return handle_recreate_index(request)
        except ReindexError as e:
            error_messages.append('Error: %s' % e.message)

    if 'delete_index' in request.POST:
        try:
            return handle_delete(request)
        except DeleteError as e:
            error_messages.append('Error: %s' % e.message)
        except ES_EXCEPTIONS as e:
            error_messages.append('Error: {0}'.format(repr(e)))

    stats = None
    write_stats = None
    es_deets = None
    indexes = []

    try:
        # TODO: SUMO has a single ES_URL and that's the ZLB and does
        # the balancing. If that ever changes and we have multiple
        # ES_URLs, then this should get fixed.
        es_deets = requests.get(settings.ES_URLS[0]).json()
    except requests.exceptions.RequestException:
        pass

    stats = {}
    for index in all_read_indexes():
        try:
            stats[index] = get_doctype_stats(index)
        except ES_EXCEPTIONS:
            stats[index] = None

    write_stats = {}
    for index in all_write_indexes():
        try:
            write_stats[index] = get_doctype_stats(index)
        except ES_EXCEPTIONS:
            write_stats[index] = None

    try:
        indexes = get_indexes()
        indexes.sort(key=lambda m: m[0])
    except ES_EXCEPTIONS as e:
        error_messages.append('Error: {0}'.format(repr(e)))

    recent_records = Record.objects.all()[:100]
    outstanding_records = Record.objects.outstanding()

    index_groups = set(settings.ES_INDEXES.keys())
    index_groups |= set(settings.ES_WRITE_INDEXES.keys())

    index_group_data = [[group, read_index(group),
                         write_index(group)] for group in index_groups]

    return render(
        request, 'admin/search_maintenance.html', {
            'title': 'Search',
            'es_deets': es_deets,
            'doctype_stats': stats,
            'doctype_write_stats': write_stats,
            'indexes': indexes,
            'index_groups': index_groups,
            'index_group_data': index_group_data,
            'read_indexes': all_read_indexes,
            'write_indexes': all_write_indexes,
            'error_messages': error_messages,
            'recent_records': recent_records,
            'outstanding_records': outstanding_records,
            'now': datetime.now(),
            'read_index': read_index,
            'write_index': write_index,
        })
Exemple #9
0
def search(request):
    """Render the admin view containing search tools"""
    if not request.user.has_perm("search.reindex"):
        raise PermissionDenied

    error_messages = []
    stats = {}

    if "reset" in request.POST:
        try:
            return handle_reset(request)
        except ReindexError as e:
            error_messages.append(u"Error: %s" % e.message)

    if "reindex" in request.POST:
        try:
            return handle_reindex(request)
        except ReindexError as e:
            error_messages.append(u"Error: %s" % e.message)

    if "recreate_index" in request.POST:
        try:
            return handle_recreate_index(request)
        except ReindexError as e:
            error_messages.append(u"Error: %s" % e.message)

    if "delete_index" in request.POST:
        try:
            return handle_delete(request)
        except DeleteError as e:
            error_messages.append(u"Error: %s" % e.message)
        except ES_EXCEPTIONS as e:
            error_messages.append("Error: {0}".format(repr(e)))

    stats = None
    write_stats = None
    es_deets = None
    indexes = []
    outstanding_chunks = None

    try:
        # TODO: SUMO has a single ES_URL and that's the ZLB and does
        # the balancing. If that ever changes and we have multiple
        # ES_URLs, then this should get fixed.
        es_deets = requests.get(settings.ES_URLS[0]).json()
    except requests.exceptions.RequestException:
        pass

    stats = {}
    for index in all_read_indexes():
        try:
            stats[index] = get_doctype_stats(index)
        except ES_EXCEPTIONS:
            stats[index] = None

    write_stats = {}
    for index in all_write_indexes():
        try:
            write_stats[index] = get_doctype_stats(index)
        except ES_EXCEPTIONS:
            write_stats[index] = None

    try:
        indexes = get_indexes()
        indexes.sort(key=lambda m: m[0])
    except ES_EXCEPTIONS as e:
        error_messages.append("Error: {0}".format(repr(e)))

    try:
        client = redis_client("default")
        outstanding_chunks = int(client.get(OUTSTANDING_INDEX_CHUNKS))
    except (RedisError, TypeError):
        pass

    recent_records = Record.objects.order_by("-starttime")[:100]

    outstanding_records = Record.objects.filter(endtime__isnull=True).order_by("-starttime")

    index_groups = set(settings.ES_INDEXES.keys())
    index_groups |= set(settings.ES_WRITE_INDEXES.keys())

    index_group_data = [[group, read_index(group), write_index(group)] for group in index_groups]

    return render(
        request,
        "admin/search_maintenance.html",
        {
            "title": "Search",
            "es_deets": es_deets,
            "doctype_stats": stats,
            "doctype_write_stats": write_stats,
            "indexes": indexes,
            "index_groups": index_groups,
            "index_group_data": index_group_data,
            "read_indexes": all_read_indexes,
            "write_indexes": all_write_indexes,
            "error_messages": error_messages,
            "recent_records": recent_records,
            "outstanding_records": outstanding_records,
            "outstanding_chunks": outstanding_chunks,
            "now": datetime.now(),
            "read_index": read_index,
            "write_index": write_index,
        },
    )
Exemple #10
0
def monitor(request):
    """View for services monitor."""
    status = {}

    # Note: To add a new component to the services monitor, do your
    # testing and then add a name -> list of output tuples map to
    # status.

    # Check memcached.
    memcache_results = []
    try:
        for cache_name, cache_props in settings.CACHES.items():
            result = True
            backend = cache_props["BACKEND"]
            location = cache_props["LOCATION"]

            # LOCATION can be a string or a list of strings
            if isinstance(location, basestring):
                location = location.split(";")

            if "memcache" in backend:
                for loc in location:
                    # TODO: this doesn't handle unix: variant
                    ip, port = loc.split(":")
                    result = test_memcached(ip, int(port))
                    memcache_results.append((INFO, "{0!s}:{1!s} {2!s}".format(ip, port, result)))

        if not memcache_results:
            memcache_results.append((ERROR, "memcache is not configured."))

        elif len(memcache_results) < 2:
            memcache_results.append(
                (ERROR, ("You should have at least 2 memcache servers. " "You have %s." % len(memcache_results)))
            )

        else:
            memcache_results.append((INFO, "memcached servers look good."))

    except Exception as exc:
        memcache_results.append((ERROR, "Exception while looking at memcached: {0!s}".format(str(exc))))

    status["memcached"] = memcache_results

    # Check Libraries and versions
    libraries_results = []
    try:
        Image.new("RGB", (16, 16)).save(StringIO.StringIO(), "JPEG")
        libraries_results.append((INFO, "PIL+JPEG: Got it!"))
    except Exception as exc:
        libraries_results.append((ERROR, "PIL+JPEG: Probably missing: " "Failed to create a jpeg image: %s" % exc))

    status["libraries"] = libraries_results

    # Check file paths.
    msg = "We want read + write."
    filepaths = (
        (settings.USER_AVATAR_PATH, os.R_OK | os.W_OK, msg),
        (settings.IMAGE_UPLOAD_PATH, os.R_OK | os.W_OK, msg),
        (settings.THUMBNAIL_UPLOAD_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_IMAGE_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_IMAGE_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_VIDEO_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_VIDEO_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg),
        (settings.GROUP_AVATAR_PATH, os.R_OK | os.W_OK, msg),
    )

    filepath_results = []
    for path, perms, notes in filepaths:
        path = os.path.join(settings.MEDIA_ROOT, path)
        path_exists = os.path.isdir(path)
        path_perms = os.access(path, perms)

        if path_exists and path_perms:
            filepath_results.append((INFO, "{0!s}: {1!s} {2!s} {3!s}".format(path, path_exists, path_perms, notes)))

    status["filepaths"] = filepath_results

    # Check RabbitMQ.
    rabbitmq_results = []
    try:
        rabbit_conn = establish_connection(connect_timeout=5)
        rabbit_conn.connect()
        rabbitmq_results.append((INFO, "Successfully connected to RabbitMQ."))
    except (socket.error, IOError) as exc:
        rabbitmq_results.append((ERROR, "Error connecting to RabbitMQ: {0!s}".format(str(exc))))

    except Exception as exc:
        rabbitmq_results.append((ERROR, "Exception while looking at RabbitMQ: {0!s}".format(str(exc))))

    status["RabbitMQ"] = rabbitmq_results

    # Check ES.
    es_results = []
    try:
        es_utils.get_doctype_stats(es_utils.all_read_indexes()[0])
        es_results.append((INFO, ("Successfully connected to ElasticSearch and index " "exists.")))

    except es_utils.ES_EXCEPTIONS as exc:
        es_results.append((ERROR, "ElasticSearch problem: {0!s}".format(str(exc))))

    except Exception as exc:
        es_results.append((ERROR, "Exception while looking at ElasticSearch: {0!s}".format(str(exc))))

    status["ElasticSearch"] = es_results

    # Check Celery.
    # start = time.time()
    # pong = celery.task.ping()
    # rabbit_results = r = {'duration': time.time() - start}
    # status_summary['rabbit'] = pong == 'pong' and r['duration'] < 1

    # Check Redis.
    redis_results = []
    if hasattr(settings, "REDIS_BACKENDS"):
        for backend in settings.REDIS_BACKENDS:
            try:
                redis_client(backend)
                redis_results.append((INFO, "{0!s}: Pass!".format(backend)))
            except RedisError:
                redis_results.append((ERROR, "{0!s}: Fail!".format(backend)))
    status["Redis"] = redis_results

    status_code = 200

    status_summary = {}
    for component, output in status.items():
        if ERROR in [item[0] for item in output]:
            status_code = 500
            status_summary[component] = False
        else:
            status_summary[component] = True

    return render(
        request,
        "services/monitor.html",
        {"component_status": status, "status_summary": status_summary},
        status=status_code,
    )