Beispiel #1
0
def search(request):
    """Render the admin view containing search tools.

    It's a dirty little secret that you can fire off 2 concurrent reindexing
    jobs; the disabling of the buttons while one is running is advisory only.
    This lets us recover if celery crashes and doesn't clear the memcached
    token.

    """
    if not request.user.has_perm('search.reindex'):
        raise PermissionDenied

    reindex_requested = 'reindex' in request.POST
    if reindex_requested:
        reindex_with_progress.delay(
            waffle_when_done='waffle_when_done' in request.POST)

    es_error_message = ''
    stats = {}
    try:
        # This gets index stats, but also tells us whether ES is in
        # a bad state.
        stats = get_doctype_stats()
    except ESMaxRetryError:
        es_error_message = ('Elastic Search is not set up on this machine '
                            'or is not responding. (MaxRetryError)')
    except ESIndexMissingException:
        es_error_message = ('Index is missing. Press the reindex button '
                            'below. (IndexMissingException)')
    except ESTimeoutError:
        es_error_message = ('Connection to Elastic Search timed out. '
                            '(TimeoutError)')

    return render_to_response(
        'search/admin/search.html',
        {
            'title':
            'Search',
            'doctype_stats':
            stats,
            'es_error_message':
            es_error_message,
            # Dim the buttons even if the form loads before the task fires:
            'progress':
            cache.get(ES_REINDEX_PROGRESS,
                      '0.001' if reindex_requested else ''),
            'waffle_when_done':
            request.POST.get('waffle_when_done')
            if reindex_requested else cache.get(ES_WAFFLE_WHEN_DONE),
            'progress_url':
            reverse('search.reindex_progress'),
            'interval':
            settings.ES_REINDEX_PROGRESS_BAR_INTERVAL * 1000
        },
        RequestContext(request, {}))
Beispiel #2
0
def search(request):
    """Render the admin view containing search tools.

    It's a dirty little secret that you can fire off 2 concurrent reindexing
    jobs; the disabling of the buttons while one is running is advisory only.
    This lets us recover if celery crashes and doesn't clear the memcached
    token.

    """
    if not request.user.has_perm('search.reindex'):
        raise PermissionDenied

    reindex_requested = 'reindex' in request.POST
    if reindex_requested:
        reindex_with_progress.delay(
                waffle_when_done='waffle_when_done' in request.POST)

    es_error_message = ''
    stats = {}
    try:
        # This gets index stats, but also tells us whether ES is in
        # a bad state.
        stats = get_doctype_stats()
    except ESMaxRetryError:
        es_error_message = ('Elastic Search is not set up on this machine '
                            'or is not responding. (MaxRetryError)')
    except ESIndexMissingException:
        es_error_message = ('Index is missing. Press the reindex button '
                            'below. (IndexMissingException)')
    except ESTimeoutError:
        es_error_message = ('Connection to Elastic Search timed out. '
                            '(TimeoutError)')

    return render_to_response(
        'search/admin/search.html',
        {'title': 'Search',
         'doctype_stats': stats,
         'es_error_message': es_error_message,
          # Dim the buttons even if the form loads before the task fires:
         'progress': cache.get(ES_REINDEX_PROGRESS,
                               '0.001' if reindex_requested else ''),
         'waffle_when_done':
             request.POST.get('waffle_when_done') if reindex_requested else
             cache.get(ES_WAFFLE_WHEN_DONE),
          'progress_url': reverse('search.reindex_progress'),
          'interval': settings.ES_REINDEX_PROGRESS_BAR_INTERVAL * 1000},
        RequestContext(request, {}))
Beispiel #3
0
def monitor(request):
    """View for services monitor."""
    status = {}

    # Note: To add a new component to the services monitor, do your
    # testing and then add a name -> list of output tuples map to
    # status.

    # Check memcached.
    memcache_results = []
    try:
        for cache_name, cache_props in settings.CACHES.items():
            result = True
            backend = cache_props['BACKEND']
            location = cache_props['LOCATION']

            # LOCATION can be a string or a list of strings
            if isinstance(location, basestring):
                location = location.split(';')

            if 'memcache' in backend:
                for loc in location:
                    # TODO: this doesn't handle unix: variant
                    ip, port = loc.split(':')
                    result = test_memcached(ip, int(port))
                    memcache_results.append(
                        (INFO, '%s:%s %s' % (ip, port, result)))

        if not memcache_results:
            memcache_results.append((ERROR, 'memcache is not configured.'))

        elif len(memcache_results) < 2:
            memcache_results.append(
                (ERROR, ('You should have at least 2 memcache servers. '
                         'You have %s.' % len(memcache_results))))

        else:
            memcache_results.append((INFO, 'memcached servers look good.'))

    except Exception as exc:
        memcache_results.append(
            (ERROR, 'Exception while looking at memcached: %s' % str(exc)))

    status['memcached'] = memcache_results

    # Check Libraries and versions
    libraries_results = []
    try:
        Image.new('RGB', (16, 16)).save(StringIO.StringIO(), 'JPEG')
        libraries_results.append((INFO, 'PIL+JPEG: Got it!'))
    except Exception as exc:
        libraries_results.append(
            (ERROR,
             'PIL+JPEG: Probably missing: '
             'Failed to create a jpeg image: %s' % exc))

    status['libraries'] = libraries_results

    # Check file paths.
    msg = 'We want read + write.'
    filepaths = (
        (settings.USER_AVATAR_PATH, os.R_OK | os.W_OK, msg),
        (settings.IMAGE_UPLOAD_PATH, os.R_OK | os.W_OK, msg),
        (settings.THUMBNAIL_UPLOAD_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_IMAGE_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_IMAGE_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_VIDEO_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_VIDEO_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg),
        (settings.GROUP_AVATAR_PATH, os.R_OK | os.W_OK, msg),
    )

    filepath_results = []
    for path, perms, notes in filepaths:
        path = os.path.join(settings.MEDIA_ROOT, path)
        path_exists = os.path.isdir(path)
        path_perms = os.access(path, perms)

        if path_exists and path_perms:
            filepath_results.append(
                (INFO, '%s: %s %s %s' % (path, path_exists, path_perms,
                                         notes)))

    status['filepaths'] = filepath_results

    # Check RabbitMQ.
    rabbitmq_results = []
    try:
        rabbit_conn = establish_connection(connect_timeout=2)
        rabbit_conn.connect()
        rabbitmq_results.append(
            (INFO, 'Successfully connected to RabbitMQ.'))

    except (socket.error, IOError) as exc:
        rabbitmq_results.append(
            (ERROR, 'Error connecting to RabbitMQ: %s' % str(exc)))

    except Exception as exc:
        rabbitmq_results.append(
            (ERROR, 'Exception while looking at RabbitMQ: %s' % str(exc)))

    status['RabbitMQ'] = rabbitmq_results

    # Check ES.
    es_results = []
    try:
        es_utils.get_doctype_stats(es_utils.READ_INDEX)
        es_results.append(
            (INFO, ('Successfully connected to ElasticSearch and index '
                    'exists.')))

    except pyes.urllib3.MaxRetryError as exc:
        es_results.append(
            (ERROR, 'Cannot connect to ElasticSearch: %s' % str(exc)))

    except pyes.exceptions.IndexMissingException:
        es_results.append(
            (ERROR, 'Index "%s" missing.' % es_utils.READ_INDEX))

    except Exception as exc:
        es_results.append(
            (ERROR, 'Exception while looking at ElasticSearch: %s' % str(exc)))

    status['ElasticSearch'] = es_results

    # Check Celery.
    # start = time.time()
    # pong = celery.task.ping()
    # rabbit_results = r = {'duration': time.time() - start}
    # status_summary['rabbit'] = pong == 'pong' and r['duration'] < 1

    # Check Redis.
    redis_results = []
    if hasattr(settings, 'REDIS_BACKENDS'):
        for backend in settings.REDIS_BACKENDS:
            try:
                redis_client(backend)
                redis_results.append((INFO, '%s: Pass!' % backend))
            except RedisError:
                redis_results.append((ERROR, '%s: Fail!' % backend))
    status['Redis'] = redis_results

    status_code = 200

    status_summary = {}
    for component, output in status.items():
        if ERROR in [item[0] for item in output]:
            status_code = 500
            status_summary[component] = False
        else:
            status_summary[component] = True

    return jingo.render(request, 'services/monitor.html',
                        {'component_status': status,
                         'status_summary': status_summary},
                        status=status_code)
Beispiel #4
0
def monitor(request):
    """View for services monitor."""
    status = {}

    # Note: To add a new component to the services monitor, do your
    # testing and then add a name -> list of output tuples map to
    # status.

    # Check memcached.
    memcache_results = []
    try:
        for cache_name, cache_props in settings.CACHES.items():
            result = True
            backend = cache_props['BACKEND']
            location = cache_props['LOCATION']

            # LOCATION can be a string or a list of strings
            if isinstance(location, basestring):
                location = location.split(';')

            if 'memcache' in backend:
                for loc in location:
                    # TODO: this doesn't handle unix: variant
                    ip, port = loc.split(':')
                    result = test_memcached(ip, int(port))
                    memcache_results.append(
                        (INFO, '%s:%s %s' % (ip, port, result)))

        if not memcache_results:
            memcache_results.append((ERROR, 'memcache is not configured.'))

        elif len(memcache_results) < 2:
            memcache_results.append(
                (ERROR, ('You should have at least 2 memcache servers. '
                         'You have %s.' % len(memcache_results))))

        else:
            memcache_results.append((INFO, 'memcached servers look good.'))

    except Exception as exc:
        memcache_results.append(
            (ERROR, 'Exception while looking at memcached: %s' % str(exc)))

    status['memcached'] = memcache_results

    # Check Libraries and versions
    libraries_results = []
    try:
        Image.new('RGB', (16, 16)).save(StringIO.StringIO(), 'JPEG')
        libraries_results.append((INFO, 'PIL+JPEG: Got it!'))
    except Exception as exc:
        libraries_results.append((ERROR, 'PIL+JPEG: Probably missing: '
                                  'Failed to create a jpeg image: %s' % exc))

    status['libraries'] = libraries_results

    # Check file paths.
    msg = 'We want read + write.'
    filepaths = (
        (settings.USER_AVATAR_PATH, os.R_OK | os.W_OK, msg),
        (settings.IMAGE_UPLOAD_PATH, os.R_OK | os.W_OK, msg),
        (settings.THUMBNAIL_UPLOAD_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_IMAGE_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_IMAGE_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_VIDEO_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_VIDEO_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg),
        (settings.GROUP_AVATAR_PATH, os.R_OK | os.W_OK, msg),
    )

    filepath_results = []
    for path, perms, notes in filepaths:
        path = os.path.join(settings.MEDIA_ROOT, path)
        path_exists = os.path.isdir(path)
        path_perms = os.access(path, perms)

        if path_exists and path_perms:
            filepath_results.append(
                (INFO,
                 '%s: %s %s %s' % (path, path_exists, path_perms, notes)))

    status['filepaths'] = filepath_results

    # Check RabbitMQ.
    rabbitmq_results = []
    try:
        rabbit_conn = establish_connection(connect_timeout=2)
        rabbit_conn.connect()
        rabbitmq_results.append((INFO, 'Successfully connected to RabbitMQ.'))

    except (socket.error, IOError) as exc:
        rabbitmq_results.append(
            (ERROR, 'Error connecting to RabbitMQ: %s' % str(exc)))

    except Exception as exc:
        rabbitmq_results.append(
            (ERROR, 'Exception while looking at RabbitMQ: %s' % str(exc)))

    status['RabbitMQ'] = rabbitmq_results

    # Check ES.
    es_results = []
    try:
        es_utils.get_doctype_stats(es_utils.READ_INDEX)
        es_results.append(
            (INFO, ('Successfully connected to ElasticSearch and index '
                    'exists.')))

    except es_utils.ES_EXCEPTIONS as exc:
        es_results.append((ERROR, 'ElasticSearch problem: %s' % str(exc)))

    except Exception as exc:
        es_results.append(
            (ERROR, 'Exception while looking at ElasticSearch: %s' % str(exc)))

    status['ElasticSearch'] = es_results

    # Check Celery.
    # start = time.time()
    # pong = celery.task.ping()
    # rabbit_results = r = {'duration': time.time() - start}
    # status_summary['rabbit'] = pong == 'pong' and r['duration'] < 1

    # Check Redis.
    redis_results = []
    if hasattr(settings, 'REDIS_BACKENDS'):
        for backend in settings.REDIS_BACKENDS:
            try:
                redis_client(backend)
                redis_results.append((INFO, '%s: Pass!' % backend))
            except RedisError:
                redis_results.append((ERROR, '%s: Fail!' % backend))
    status['Redis'] = redis_results

    status_code = 200

    status_summary = {}
    for component, output in status.items():
        if ERROR in [item[0] for item in output]:
            status_code = 500
            status_summary[component] = False
        else:
            status_summary[component] = True

    return render(request,
                  'services/monitor.html', {
                      'component_status': status,
                      'status_summary': status_summary
                  },
                  status=status_code)
Beispiel #5
0
    delete_requested = 'delete_index' in request.POST
    if delete_requested:
        try:
            return handle_delete(request)
        except DeleteError as e:
            error_messages.append(u'Error: %s' % e.message)
        except ES_EXCEPTIONS as e:
            error_messages.append('Error: {0}'.format(repr(e)))

    stats = None
    write_stats = None
    indexes = []

    try:
        stats = get_doctype_stats(es_utils.READ_INDEX)
    except ES_EXCEPTIONS:
        stats = None

    try:
        write_stats = get_doctype_stats(es_utils.WRITE_INDEX)
    except ES_EXCEPTIONS:
        write_stats = None

    try:
        indexes = get_indexes()
        indexes.sort(key=lambda m: m[0])
    except ES_EXCEPTIONS as e:
        error_messages.append('Error: {0}'.format(repr(e)))

    try:
Beispiel #6
0
                                  '(MaxRetryError)')
        except ESIndexMissingException:
            error_messages.append('Error: Index is missing. Press the reindex '
                                  'button below. (IndexMissingException)')
        except ESTimeoutError:
            error_messages.append('Error: Connection to Elastic Search timed '
                                  'out. (TimeoutError)')

    stats = None
    write_stats = None
    indexes = []
    try:
        # This gets index stats, but also tells us whether ES is in
        # a bad state.
        try:
            stats = get_doctype_stats(es_utils.READ_INDEX)
        except ESIndexMissingException:
            stats = None
        try:
            write_stats = get_doctype_stats(es_utils.WRITE_INDEX)
        except ESIndexMissingException:
            write_stats = None
        indexes = get_indexes()
        indexes.sort(key=lambda m: m[0])
    except ESMaxRetryError:
        error_messages.append('Error: Elastic Search is not set up on this '
                              'machine or is not responding. (MaxRetryError)')
    except ESIndexMissingException:
        error_messages.append('Error: Index is missing. Press the reindex '
                              'button below. (IndexMissingException)')
    except ESTimeoutError: