Esempio n. 1
0
    def setUp(self, switch_is_active):
        switch_is_active.return_value = True

        super(KarmaManagerTests, self).setUp()

        try:
            self.mgr = KarmaManager()
            redis_client('karma').flushdb()
        except RedisError:
            raise SkipTest

        self.user1 = user(save=True)
        self.user2 = user(save=True)
        self.user3 = user(save=True)

        today = date.today()

        # user1 actions (3 + 3 + 7):
        TestAction1(user=self.user1, day=today).save()
        TestAction1(user=self.user1, day=today).save()
        TestAction2(user=self.user1, day=today).save()

        # user2 actions (3 + 7 + 7):
        TestAction1(user=self.user2, day=today - timedelta(days=8)).save()
        TestAction2(user=self.user2, day=today - timedelta(days=32)).save()
        TestAction2(user=self.user2, day=today - timedelta(days=360)).save()

        # user3 actions (3 + 3 + 3 + 7):
        TestAction1(user=self.user3, day=today - timedelta(days=10)).save()
        TestAction1(user=self.user3, day=today - timedelta(days=40)).save()
        TestAction1(user=self.user3, day=today - timedelta(days=190)).save()
        TestAction2(user=self.user3, day=today - timedelta(days=3)).save()
Esempio n. 2
0
    def setUp(self, switch_is_active):
        switch_is_active.return_value = True

        super(KarmaManagerTests, self).setUp()

        try:
            self.mgr = KarmaManager()
            redis_client('karma').flushdb()
        except RedisError:
            raise SkipTest

        self.user1 = user(save=True)
        self.user2 = user(save=True)
        self.user3 = user(save=True)

        today = date.today()

        # user1 actions (3 + 3 + 7):
        TestAction1(user=self.user1, day=today).save()
        TestAction1(user=self.user1, day=today).save()
        TestAction2(user=self.user1, day=today).save()

        # user2 actions (3 + 7 + 7):
        TestAction1(user=self.user2, day=today - timedelta(days=8)).save()
        TestAction2(user=self.user2, day=today - timedelta(days=32)).save()
        TestAction2(user=self.user2, day=today - timedelta(days=360)).save()

        # user3 actions (3 + 3 + 3 + 7):
        TestAction1(user=self.user3, day=today - timedelta(days=10)).save()
        TestAction1(user=self.user3, day=today - timedelta(days=40)).save()
        TestAction1(user=self.user3, day=today - timedelta(days=190)).save()
        TestAction2(user=self.user3, day=today - timedelta(days=3)).save()
Esempio n. 3
0
def handle_reindex(request):
    """Caculates and kicks off indexing tasks"""
    write_index = es_utils.WRITE_INDEX

    # This is truthy if the user wants us to delete and recreate
    # the index first.
    delete_index_first = bool(request.POST.get("delete_index"))

    if delete_index_first:
        # Coming from the delete form, so we reindex all models.
        models_to_index = None
    else:
        # Coming from the reindex form, so we reindex whatever we're
        # told.
        models_to_index = [name.replace("check_", "") for name in request.POST.keys() if name.startswith("check_")]

    # TODO: If this gets fux0rd, then it's possible this could be
    # non-zero and we really want to just ignore it. Need the ability
    # to ignore it.
    try:
        client = redis_client("default")
        val = client.get(OUTSTANDING_INDEX_CHUNKS)
        if val is not None and int(val) > 0:
            raise ReindexError("There are %s outstanding chunks." % val)

        # We don't know how many chunks we're building, but we do want
        # to make sure another reindex request doesn't slide in here
        # and kick off a bunch of chunks.
        #
        # There is a race condition here.
        client.set(OUTSTANDING_INDEX_CHUNKS, 1)
    except RedisError:
        log.warning("Redis not running. Can not check if there are " "outstanding tasks.")

    batch_id = create_batch_id()

    # Break up all the things we want to index into chunks. This
    # chunkifies by class then by chunk size.
    chunks = []
    for cls, indexable in get_indexable(search_models=models_to_index):
        chunks.extend((cls, chunk) for chunk in chunked(indexable, CHUNK_SIZE))

    if delete_index_first:
        # The previous lines do a lot of work and take some time to
        # execute.  So we wait until here to wipe and rebuild the
        # index. That reduces the time that there is no index by a little.
        recreate_index()

    chunks_count = len(chunks)

    try:
        client = redis_client("default")
        client.set(OUTSTANDING_INDEX_CHUNKS, chunks_count)
    except RedisError:
        log.warning("Redis not running. Can't denote outstanding tasks.")

    for chunk in chunks:
        index_chunk_task.delay(write_index, batch_id, chunk)

    return HttpResponseRedirect(request.path)
Esempio n. 4
0
    def setUp(self, switch_is_active):
        switch_is_active.return_value = True

        super(KarmaAPITests, self).setUp()

        try:
            self.mgr = KarmaManager()
            redis_client('karma').flushdb()
        except RedisError:
            raise SkipTest

        self.user1 = user(save=True)
        self.user2 = user(save=True)
        self.user3 = user(save=True)

        TestAction1(user=self.user1).save()
        TestAction2(user=self.user2).save()
        TestAction2(user=self.user2).save()
        TestAction1(user=self.user3).save()
        TestAction1(user=self.user3).save()
        TestAction1(user=self.user3).save()
        self.mgr.update_top()

        self.client.login(username=self.user1.username, password='******')
        add_permission(self.user1, models.Title, 'view_dashboard')
Esempio n. 5
0
 def setUp(self):
     super(KarmaActionTests, self).setUp()
     self.user = user(save=True)
     try:
         self.mgr = KarmaManager()
         redis_client('karma').flushdb()
     except RedisError:
         raise SkipTest
Esempio n. 6
0
 def setUp(self):
     super(KarmaActionTests, self).setUp()
     self.user = user(save=True)
     try:
         self.mgr = KarmaManager()
         redis_client('karma').flushdb()
     except RedisError:
         raise SkipTest
Esempio n. 7
0
def redis_info(request):
    """Admin view that displays redis INFO+CONFIG output for all backends."""
    redis_info = {}
    for key in django_settings.REDIS_BACKENDS.keys():
        redis_info[key] = {}
        client = redis_client(key)
        redis_info[key]['connection'] = django_settings.REDIS_BACKENDS[key]
        try:
            cfg = client.config_get()
            redis_info[key]['config'] = [{
                'key': k,
                'value': cfg[k]
            } for k in sorted(cfg)]
            info = client.info()
            redis_info[key]['info'] = [{
                'key': k,
                'value': info[k]
            } for k in sorted(info)]
        except ConnectionError:
            redis_info[key]['down'] = True

    return render_to_response('kadmin/redis.html', {
        'redis_info': redis_info,
        'title': 'Redis Information'
    }, RequestContext(request, {}))
Esempio n. 8
0
def find_related_documents(doc):
    """
    Returns a QuerySet of related_docuemnts or of the
    parent's related_documents in the case of translations
    """
    if doc.locale == settings.WIKI_DEFAULT_LANGUAGE:
        return doc.related_documents.order_by('-related_to__in_common')[0:5]

    # Not English, so may need related docs which are
    # stored on the English version.
    try:
        redis = redis_client('default')
    except RedisError as e:
        # Problem with Redis. Log and return the related docs.
        statsd.incr('redis.errror')
        log.error('Redis error: %s' % e)
        return related_translated_documents(doc)

    doc_key = 'translated_doc_id:%s' % doc.id
    related_ids = redis.lrange(doc_key, 0, -1)
    if related_ids == ['0']:
        return Document.objects.get_empty_query_set()
    if related_ids:
        return Document.objects.filter(id__in=related_ids)

    related = related_translated_documents(doc)
    if not related:
        # Add '0' to prevent recalulation on a known empty set.
        redis.lpush(doc_key, 0)
    else:
        for r in related:
            redis.lpush(doc_key, r.id)
    # Cache expires in 2 hours.
    redis.expire(doc_key, 60 * 60 * 2)
    return related
Esempio n. 9
0
 def setUp(self):
     super(TestDocumentLocking, self).setUp()
     try:
         self.redis = redis_client('default')
         self.redis.flushdb()
     except RedisError:
         raise SkipTest
Esempio n. 10
0
 def __init__(self, redis=None):
     if not redis:
         try:
             redis = redis_client(name='karma')
         except RedisError as e:
             log.error('Redis error: %s' % e)
     self.redis = redis
Esempio n. 11
0
 def setUp(self):
     super(TestDocumentLocking, self).setUp()
     try:
         self.redis = redis_client('default')
         self.redis.flushdb()
     except RedisError:
         raise SkipTest
Esempio n. 12
0
 def setUp(self):
     super(TopUnhelpfulArticlesCronTests, self).setUp()
     self.REDIS_KEY = settings.HELPFULVOTES_UNHELPFUL_KEY
     try:
         self.redis = redis_client('helpfulvotes')
         self.redis.flushdb()
     except RedisError:
         raise SkipTest
Esempio n. 13
0
def init_karma():
    """Flushes the karma redis backend and populates with fresh data.

    Goes through all questions/answers/votes and save karma actions for them.
    """
    if not waffle.switch_is_active('karma'):
        return

    redis_client('karma').flushdb()

    questions = Question.objects.all()
    for chunk in chunked(questions.values_list('pk', flat=True), 200):
        _process_question_chunk.apply_async(args=[chunk])

    votes = AnswerVote.objects.all()
    for chunk in chunked(votes.values_list('pk', flat=True), 1000):
        _process_answer_vote_chunk.apply_async(args=[chunk])
Esempio n. 14
0
    def test_creator_nums_redis(self, switch_is_active):
        """Test creator_num_* pulled from karma data."""
        try:
            KarmaManager()
            redis_client('karma').flushdb()
        except RedisError:
            raise SkipTest

        switch_is_active.return_value = True
        answer = Answer.objects.all()[0]

        AnswerAction(answer.creator).save()
        AnswerAction(answer.creator).save()
        SolutionAction(answer.creator).save()

        eq_(answer.creator_num_solutions, 1)
        eq_(answer.creator_num_answers, 2)
Esempio n. 15
0
 def __init__(self, redis=None):
     if not redis:
         try:
             redis = redis_client(name='karma')
         except RedisError as e:
             statsd.incr('redis.errror')
             log.error('Redis error: %s' % e)
     self.redis = redis
Esempio n. 16
0
    def test_creator_nums_redis(self, switch_is_active):
        """Test creator_num_* pulled from karma data."""
        try:
            KarmaManager()
            redis_client('karma').flushdb()
        except RedisError:
            raise SkipTest

        switch_is_active.return_value = True
        answer = Answer.objects.all()[0]

        AnswerAction(answer.creator).save()
        AnswerAction(answer.creator).save()
        SolutionAction(answer.creator).save()

        eq_(answer.creator_num_solutions, 1)
        eq_(answer.creator_num_answers, 2)
Esempio n. 17
0
 def setUp(self):
     super(TopUnhelpfulArticlesCronTests, self).setUp()
     self.REDIS_KEY = settings.HELPFULVOTES_UNHELPFUL_KEY
     try:
         self.redis = redis_client('helpfulvotes')
         self.redis.flushdb()
     except RedisError:
         raise SkipTest
Esempio n. 18
0
def get_helpful_graph_async(request):
    doc_data = []
    REDIS_KEY = settings.HELPFULVOTES_UNHELPFUL_KEY

    try:
        redis = redis_client('helpfulvotes')
        length = redis.llen(REDIS_KEY)
        output = redis.lrange(REDIS_KEY, 0, length)
    except RedisError as e:
        log.error('Redis error: %s' % e)
        output = []

    def _format_r(strresult):
        result = strresult.split('::')
        dic = dict(title=result[6].decode('utf-8'),
                   id=result[0],
                   url=reverse('wiki.document_revisions',
                               args=[result[5].decode('utf-8')],
                               locale=settings.WIKI_DEFAULT_LANGUAGE),
                   total=int(float(result[1])),
                   currperc=float(result[2]),
                   diffperc=float(result[3]),
                   colorsize=float(result[4]))

        # Blue #418CC8 = HSB 207/67/78
        # Go from blue to light grey. Grey => smaller number.
        r, g, b = colorsys.hsv_to_rgb(0.575, 1 - dic['colorsize'], .75)
        color_shade = '#%02x%02x%02x' % (255 * r, 255 * g, 255 * b)

        size = math.pow(dic['total'], 0.33) * 1.5

        return {
            'x': 100 * dic['currperc'],
            'y': 100 * dic['diffperc'],
            'total': dic['total'],
            'title': dic['title'],
            'url': dic['url'],
            'currperc': '%.2f' % (100 * dic['currperc']),
            'diffperc': '%+.2f' % (100 * dic['diffperc']),
            'colorsize': dic['colorsize'],
            'marker': {
                'radius': size,
                'fillColor': color_shade
            }
        }

    doc_data = [_format_r(r) for r in output]

    # Format data for Highcharts
    send = {
        'data': [{
            'name': _('Document'),
            'id': 'doc_data',
            'data': doc_data
        }]
    }

    return HttpResponse(json.dumps(send), mimetype='application/json')
Esempio n. 19
0
def _process_answer_vote_chunk(data, **kwargs):
    """Save karma data for a chunk of answer votes."""
    redis = redis_client(name="karma")
    v_qs = AnswerVote.objects.select_related("answer")
    for vote in v_qs.filter(pk__in=data):
        if vote.helpful:
            action_class = AnswerMarkedHelpfulAction
        else:
            action_class = AnswerMarkedNotHelpfulAction
        action_class(vote.answer.creator_id, vote.created).save(async=False, redis=redis)
Esempio n. 20
0
def _process_answer_vote_chunk(data):
    """Save karma data for a chunk of answer votes."""
    redis = redis_client(name='karma')
    v_qs = AnswerVote.objects.select_related('answer')
    for vote in v_qs.filter(pk__in=data):
        if vote.helpful:
            action_class = AnswerMarkedHelpfulAction
        else:
            action_class = AnswerMarkedNotHelpfulAction
        action_class(vote.answer.creator_id, vote.created).save(async=False,
                                                                redis=redis)
Esempio n. 21
0
def get_helpful_graph_async(request):
    doc_data = []
    REDIS_KEY = settings.HELPFULVOTES_UNHELPFUL_KEY

    try:
        redis = redis_client('helpfulvotes')
        length = redis.llen(REDIS_KEY)
        output = redis.lrange(REDIS_KEY, 0, length)
    except RedisError as e:
        log.error('Redis error: %s' % e)
        output = []

    def _format_r(strresult):
        result = strresult.split('::')
        dic = dict(title=result[6].decode('utf-8'),
                   id=result[0],
                   url=reverse('wiki.document_revisions',
                               args=[result[5].decode('utf-8')],
                               locale=settings.WIKI_DEFAULT_LANGUAGE),
                   total=int(float(result[1])),
                   currperc=float(result[2]),
                   diffperc=float(result[3]),
                   colorsize=float(result[4])
                   )

        # Blue #418CC8 = HSB 207/67/78
        # Go from blue to light grey. Grey => smaller number.
        r, g, b = colorsys.hsv_to_rgb(0.575, 1 - dic['colorsize'], .75)
        color_shade = '#%02x%02x%02x' % (255 * r, 255 * g, 255 * b)

        size = math.pow(dic['total'], 0.33) * 1.5

        return {'x': 100 * dic['currperc'],
                'y': 100 * dic['diffperc'],
                'total': dic['total'],
                'title': dic['title'],
                'url': dic['url'],
                'currperc': '%.2f' % (100 * dic['currperc']),
                'diffperc': '%+.2f' % (100 * dic['diffperc']),
                'colorsize': dic['colorsize'],
                'marker': {'radius': size,
                           'fillColor': color_shade}}

    doc_data = [_format_r(r) for r in output]

    # Format data for Highcharts
    send = {'data': [{
                'name': _('Document'),
                'id': 'doc_data',
                'data': doc_data
                }]}

    return HttpResponse(json.dumps(send),
                        mimetype='application/json')
Esempio n. 22
0
 def setUp(self):
     super(HelpfulVotesGraphTests, self).setUp()
     self.user = user(save=True)
     self.client.login(username=self.user.username, password='******')
     self.group = group(name='Contributors', save=True)
     # Without this, there were unrelated failures with l10n dashboard
     self.REDIS_KEY = settings.HELPFULVOTES_UNHELPFUL_KEY
     try:
         self.redis = redis_client('helpfulvotes')
         self.redis.flushdb()
     except RedisError:
         raise SkipTest
Esempio n. 23
0
 def setUp(self):
     super(HelpfulVotesGraphTests, self).setUp()
     self.user = user(save=True)
     self.client.login(username=self.user.username, password='******')
     self.group = group(name='Contributors', save=True)
     # Without this, there were unrelated failures with l10n dashboard
     self.REDIS_KEY = settings.HELPFULVOTES_UNHELPFUL_KEY
     try:
         self.redis = redis_client('helpfulvotes')
         self.redis.flushdb()
     except RedisError:
         raise SkipTest
Esempio n. 24
0
    def rows(self, max=None):
        REDIS_KEY = settings.HELPFULVOTES_UNHELPFUL_KEY
        try:
            redis = redis_client('helpfulvotes')
            length = redis.llen(REDIS_KEY)
            max_get = max or length
            output = redis.lrange(REDIS_KEY, 0, max_get)
        except RedisError as e:
            log.error('Redis error: %s' % e)
            output = []

        return [self._format_row(r) for r in output]
Esempio n. 25
0
def cache_most_unhelpful_kb_articles():
    """Calculate and save the most unhelpful KB articles in the past month."""

    REDIS_KEY = settings.HELPFULVOTES_UNHELPFUL_KEY

    old_formatted = _get_old_unhelpful()
    final = _get_current_unhelpful(old_formatted)

    if final == {}:
        return

    def _mean(vals):
        """Argument: List of floats"""
        if len(vals) == 0:
            return None
        return sum(vals) / len(vals)

    def _bayes_avg(C, m, R, v):
        # Bayesian Average
        # C = mean vote, v = number of votes,
        # R = mean rating, m = minimum votes to list in topranked
        return (C * m + R * v) / (m + v)

    mean_perc = _mean([float(final[key]['currperc']) for key in final.keys()])
    mean_total = _mean([float(final[key]['total']) for key in final.keys()])

    #  TODO: Make this into namedtuples
    sorted_final = [(key,
                     final[key]['total'],
                     final[key]['currperc'],
                     final[key]['diffperc'],
                     _bayes_avg(mean_perc, mean_total,
                                final[key]['currperc'],
                                final[key]['total']))
                    for key in final.keys()]
    sorted_final.sort(key=lambda entry: entry[4])  # Sort by Bayesian Avg

    redis = redis_client('helpfulvotes')

    redis.delete(REDIS_KEY)

    max_total = max([b[1] for b in sorted_final])

    for entry in sorted_final:
        doc = Document.objects.get(pk=entry[0])
        redis.rpush(REDIS_KEY, (u'%s::%s::%s::%s::%s::%s::%s' %
                                  (entry[0],  # Document ID
                                   entry[1],  # Total Votes
                                   entry[2],  # Current Percentage
                                   entry[3],  # Difference in Percentage
                                   1 - (entry[1] / max_total),  # Graph Color
                                   doc.slug,  # Document slug
                                   doc.title)))  # Document title
Esempio n. 26
0
    def rows(self, max=None):
        REDIS_KEY = settings.HELPFULVOTES_UNHELPFUL_KEY
        try:
            redis = redis_client('helpfulvotes')
            length = redis.llen(REDIS_KEY)
            max_get = max or length
            output = redis.lrange(REDIS_KEY, 0, max_get)
        except RedisError as e:
            log.error('Redis error: %s' % e)
            output = []

        return [self._format_row(r) for r in output]
Esempio n. 27
0
File: cron.py Progetto: ibai/kitsune
def cache_most_unhelpful_kb_articles():
    """Calculate and save the most unhelpful KB articles in the past month."""

    REDIS_KEY = settings.HELPFULVOTES_UNHELPFUL_KEY

    old_formatted = _get_old_unhelpful()
    final = _get_current_unhelpful(old_formatted)

    if final == {}:
        return

    def _mean(vals):
        """Argument: List of floats"""
        if len(vals) == 0:
            return None
        return sum(vals) / len(vals)

    def _bayes_avg(C, m, R, v):
        # Bayesian Average
        # C = mean vote, v = number of votes,
        # R = mean rating, m = minimum votes to list in topranked
        return (C * m + R * v) / (m + v)

    mean_perc = _mean([float(final[key]['currperc']) for key in final.keys()])
    mean_total = _mean([float(final[key]['total']) for key in final.keys()])

    #  TODO: Make this into namedtuples
    sorted_final = [(key, final[key]['total'], final[key]['currperc'],
                     final[key]['diffperc'],
                     _bayes_avg(mean_perc, mean_total, final[key]['currperc'],
                                final[key]['total'])) for key in final.keys()]
    sorted_final.sort(key=lambda entry: entry[4])  # Sort by Bayesian Avg

    redis = redis_client('helpfulvotes')

    redis.delete(REDIS_KEY)

    max_total = max([b[1] for b in sorted_final])

    for entry in sorted_final:
        doc = Document.objects.get(pk=entry[0])
        redis.rpush(
            REDIS_KEY,
            (
                u'%s::%s::%s::%s::%s::%s::%s' % (
                    entry[0],  # Document ID
                    entry[1],  # Total Votes
                    entry[2],  # Current Percentage
                    entry[3],  # Difference in Percentage
                    1 - (entry[1] / max_total),  # Graph Color
                    doc.slug,  # Document slug
                    doc.title)))  # Document title
Esempio n. 28
0
def _document_lock_check(document_id):
    """Check for a lock on a document.

    Returns the username of the user that has the page locked, or ``None`` if
    no user has a lock.
    """
    try:
        redis = redis_client(name='default')
        key = _document_lock_key.format(id=document_id)
        return redis.get(key)
    except RedisError as e:
        statsd.incr('redis.errror')
        log.error('Redis error: %s' % e)
        return None
Esempio n. 29
0
def handle_reset(request):
    """Resets the redis scoreboard we use

    Why? The reason you'd want to reset it is if the system gets
    itself into a hosed state where the redis scoreboard says there
    are outstanding tasks, but there aren't. If you enter that state,
    this lets you reset the scoreboard.
    """
    try:
        client = redis_client("default")
        client.set(OUTSTANDING_INDEX_CHUNKS, 0)
    except RedisError:
        log.warning("Redis not running. Can not check if there are " "outstanding tasks.")
    return HttpResponseRedirect(request.path)
Esempio n. 30
0
def landing(request):
    """Customer Care Landing page."""

    # Get a redis client
    redis = None
    try:
        redis = redis_client(name='default')
    except RedisError as e:
        statsd.incr('redis.errror')
        log.error('Redis error: %s' % e)

    contributor_stats = redis and redis.get(settings.CC_TOP_CONTRIB_CACHE_KEY)
    if contributor_stats:
        contributor_stats = json.loads(contributor_stats)
        statsd.incr('customercare.stats.contributors.hit')
    else:
        statsd.incr('customercare.stats.contributors.miss')

    try:
        twitter_user = (request.twitter.api.auth.get_username()
                        if request.twitter.authed else None)
    except tweepy.TweepError:
        # Bad oauth token. Create a new session so user re-auths.
        twitter_user = None
        request.twitter = twitter.Session()

    yesterday = datetime.now() - timedelta(days=1)

    recent_replied_count = _count_answered_tweets(since=yesterday)

    return render(
        request, 'customercare/landing.html', {
            'contributor_stats':
            contributor_stats,
            'canned_responses':
            get_common_replies(request.LANGUAGE_CODE),
            'tweets':
            _get_tweets(locale=request.LANGUAGE_CODE,
                        https=request.is_secure()),
            'authed':
            request.twitter.authed,
            'twitter_user':
            twitter_user,
            'filters':
            FILTERS,
            'goal':
            settings.CC_REPLIES_GOAL,
            'recent_replied_count':
            recent_replied_count
        })
Esempio n. 31
0
def _document_lock_check(document_id):
    """Check for a lock on a document.

    Returns the username of the user that has the page locked, or ``None`` if
    no user has a lock.
    """
    try:
        redis = redis_client(name='default')
        key = _document_lock_key.format(id=document_id)
        return redis.get(key)
    except RedisError as e:
        statsd.incr('redis.errror')
        log.error('Redis error: %s' % e)
        return None
Esempio n. 32
0
    def test_stored_in_redis(self):
        key = settings.CC_TOP_CONTRIB_CACHE_KEY
        try:
            redis = redis_client(name='default')
            # Other tests are lame and don't clean up after themselves.
            # This also verifies that Redis is alive and well.
            redis.delete(key)
        except RedisError:
            raise SkipTest

        get_customercare_stats()

        blob = redis.get(key)
        stats = json.loads(blob)
        eq_(len(stats), 2)
Esempio n. 33
0
def handle_reset(request):
    """Resets the redis scoreboard we use

    Why? The reason you'd want to reset it is if the system gets
    itself into a hosed state where the redis scoreboard says there
    are outstanding tasks, but there aren't. If you enter that state,
    this lets you reset the scoreboard.
    """
    try:
        client = redis_client('default')
        client.set(OUTSTANDING_INDEX_CHUNKS, 0)
    except RedisError:
        log.warning('Redis not running. Can not check if there are '
                    'outstanding tasks.')
    return HttpResponseRedirect(request.path)
Esempio n. 34
0
def _process_question_chunk(data, **kwargs):
    """Save karma data for a chunk of questions."""
    redis = redis_client(name="karma")
    q_qs = Question.objects.select_related("solution").defer("content")
    for question in q_qs.filter(pk__in=data):
        first = True
        a_qs = question.answers.order_by("created").select_related("creator")
        for answer in a_qs.values_list("creator", "created"):
            AnswerAction(answer[0], answer[1]).save(async=False, redis=redis)
            if first:
                FirstAnswerAction(answer[0], answer[1]).save(async=False, redis=redis)
                first = False
        soln = question.solution
        if soln:
            SolutionAction(soln.creator, soln.created).save(async=False, redis=redis)
Esempio n. 35
0
def _document_lock_steal(document_id, user_name, expire_time=60 * 15):
    """Lock a document for a user.

    Note that this does not check if the page is already locked, and simply
    sets the lock on the page.
    """
    try:
        redis = redis_client(name='default')
        key = _document_lock_key.format(id=document_id)
        it_worked = redis.set(key, user_name)
        redis.expire(key, expire_time)
        return it_worked
    except RedisError as e:
        statsd.incr('redis.errror')
        log.error('Redis error: %s' % e)
        return False
Esempio n. 36
0
def _document_lock_steal(document_id, user_name, expire_time=60 * 15):
    """Lock a document for a user.

    Note that this does not check if the page is already locked, and simply
    sets the lock on the page.
    """
    try:
        redis = redis_client(name='default')
        key = _document_lock_key.format(id=document_id)
        it_worked = redis.set(key, user_name)
        redis.expire(key, expire_time)
        return it_worked
    except RedisError as e:
        statsd.incr('redis.errror')
        log.error('Redis error: %s' % e)
        return False
Esempio n. 37
0
def index_chunk_task(write_index, batch_id, chunk):
    """Index a chunk of things.

    :arg write_index: the name of the index to index to
    :arg batch_id: the name for the batch this chunk belongs to
    :arg chunk: a (class, id_list) of things to index
    """
    # Need to import Record here to prevent circular import
    from search.models import Record

    cls, id_list = chunk

    task_name = '{0} {1} -> {2}'.format(
        cls.get_model_name(), id_list[0], id_list[-1])

    rec = Record(
        starttime=datetime.datetime.now(),
        text=(u'Batch: %s Task: %s: Reindexing into %s' % (
                batch_id, task_name, write_index)))
    rec.save()

    try:
        # Pin to master db to avoid replication lag issues and stale
        # data.
        pin_this_thread()

        index_chunk(cls, id_list, reraise=True)

    except Exception:
        rec.text = (u'%s: Errored out %s %s' % (
                rec.text, sys.exc_type, sys.exc_value))
        # Some exceptions aren't pickleable and we need this to throw
        # things that are pickleable.
        raise IndexingTaskError()

    finally:
        unpin_this_thread()
        rec.endtime = datetime.datetime.now()
        rec.save()

        try:
            client = redis_client('default')
            client.decr(OUTSTANDING_INDEX_CHUNKS, 1)
        except RedisError:
            # If Redis isn't running, then we just log that the task
            # was completed.
            log.info('Index task %s completed.', task_name)
Esempio n. 38
0
def index_chunk_task(write_index, batch_id, chunk):
    """Index a chunk of things.

    :arg write_index: the name of the index to index to
    :arg batch_id: the name for the batch this chunk belongs to
    :arg chunk: a (class, id_list) of things to index
    """
    # Need to import Record here to prevent circular import
    from search.models import Record

    cls, id_list = chunk

    task_name = '{0} {1} -> {2}'.format(
        cls.get_mapping_type_name(), id_list[0], id_list[-1])

    rec = Record(
        starttime=datetime.datetime.now(),
        text=(u'Batch: %s Task: %s: Reindexing into %s' % (
                batch_id, task_name, write_index)))
    rec.save()

    try:
        # Pin to master db to avoid replication lag issues and stale
        # data.
        pin_this_thread()

        index_chunk(cls, id_list, reraise=True)

    except Exception:
        rec.text = (u'%s: Errored out %s %s' % (
                rec.text, sys.exc_type, sys.exc_value))
        # Some exceptions aren't pickleable and we need this to throw
        # things that are pickleable.
        raise IndexingTaskError()

    finally:
        unpin_this_thread()
        rec.endtime = datetime.datetime.now()
        rec.save()

        try:
            client = redis_client('default')
            client.decr(OUTSTANDING_INDEX_CHUNKS, 1)
        except RedisError:
            # If Redis isn't running, then we just log that the task
            # was completed.
            log.info('Index task %s completed.', task_name)
Esempio n. 39
0
def _process_question_chunk(data):
    """Save karma data for a chunk of questions."""
    redis = redis_client(name='karma')
    q_qs = Question.objects.select_related('solution').defer('content')
    for question in q_qs.filter(pk__in=data):
        first = True
        a_qs = question.answers.order_by('created').select_related('creator')
        for answer in a_qs.values_list('creator', 'created'):
            AnswerAction(answer[0], answer[1]).save(async=False, redis=redis)
            if first:
                FirstAnswerAction(answer[0], answer[1]).save(async=False,
                                                             redis=redis)
                first = False
        soln = question.solution
        if soln:
            SolutionAction(soln.creator, soln.created).save(async=False,
                                                            redis=redis)
Esempio n. 40
0
class UnhelpfulReadout(Readout):
    title = _lazy(u'Unhelpful Documents')

    short_title = _lazy(u'Unhelpful', 'document')
    details_link_text = _lazy(u'All unhelpful articles...')
    slug = 'unhelpful'
    column3_label = _lazy(u'Total Votes')
    column4_label = _lazy(u'Helpfulness')
    modes = []
    default_mode = None

    # This class is a namespace and doesn't get instantiated.
    key = settings.HELPFULVOTES_UNHELPFUL_KEY
    try:
        hide_readout = redis_client('helpfulvotes').llen(key) == 0
    except RedisError as e:
        log.error('Redis error: %s' % e)
        hide_readout = True

    def rows(self, max=None):
        REDIS_KEY = settings.HELPFULVOTES_UNHELPFUL_KEY
        try:
            redis = redis_client('helpfulvotes')
            length = redis.llen(REDIS_KEY)
            max_get = max or length
            output = redis.lrange(REDIS_KEY, 0, max_get)
        except RedisError as e:
            log.error('Redis error: %s' % e)
            output = []

        return [self._format_row(r) for r in output]

    def _format_row(self, strresult):
        result = strresult.split('::')
        helpfulness = Markup('<span title="%+.1f%%">%.1f%%</span>' %
                             (float(result[3]) * 100, float(result[2]) * 100))
        return dict(title=result[6].decode('utf-8'),
                    url=reverse('wiki.document_revisions',
                                args=[unicode(result[5], "utf-8")],
                                locale=self.locale),
                    visits=int(float(result[1])),
                    custom=True,
                    column4_data=helpfulness)
Esempio n. 41
0
def landing(request):
    """Customer Care Landing page."""

    # Get a redis client
    redis = None
    try:
        redis = redis_client(name='default')
    except RedisError as e:
        statsd.incr('redis.errror')
        log.error('Redis error: %s' % e)

    contributor_stats = redis and redis.get(settings.CC_TOP_CONTRIB_CACHE_KEY)
    if contributor_stats:
        contributor_stats = json.loads(contributor_stats)
        statsd.incr('customercare.stats.contributors.hit')
    else:
        statsd.incr('customercare.stats.contributors.miss')

    try:
        twitter_user = (request.twitter.api.auth.get_username() if
                        request.twitter.authed else None)
    except tweepy.TweepError:
        # Bad oauth token. Create a new session so user re-auths.
        twitter_user = None
        request.twitter = twitter.Session()

    yesterday = datetime.now() - timedelta(days=1)

    recent_replied_count = _count_answered_tweets(since=yesterday)

    return jingo.render(request, 'customercare/landing.html', {
        'contributor_stats': contributor_stats,
        'canned_responses': get_common_replies(request.locale),
        'tweets': _get_tweets(locale=request.locale,
                              https=request.is_secure()),
        'authed': request.twitter.authed,
        'twitter_user': twitter_user,
        'filters': FILTERS,
        'goal': settings.CC_REPLIES_GOAL,
        'recent_replied_count': recent_replied_count,
    })
Esempio n. 42
0
def redis_info(request):
    """Admin view that displays redis INFO+CONFIG output for all backends."""
    redis_info = {}
    for key in django_settings.REDIS_BACKENDS.keys():
        redis_info[key] = {}
        client = redis_client(key)
        redis_info[key]['connection'] = django_settings.REDIS_BACKENDS[key]
        try:
            cfg = client.config_get()
            redis_info[key]['config'] = [{'key': k, 'value': cfg[k]} for k in
                                         sorted(cfg)]
            info = client.info()
            redis_info[key]['info'] = [{'key': k, 'value': info[k]} for k in
                                       sorted(info)]
        except ConnectionError:
            redis_info[key]['down'] = True

    return render_to_response('kadmin/redis.html',
                              {'redis_info': redis_info,
                               'title': 'Redis Information'},
                              RequestContext(request, {}))
Esempio n. 43
0
def index_chunk_task(write_index, batch_id, chunk):
    """Index a chunk of things.

    :arg write_index: the name of the index to index to
    :arg batch_id: the name for the batch this chunk belongs to
    :arg chunk: a (class, id_list) of things to index
    """
    # Need to import Record here to prevent circular import
    from search.models import Record

    cls, id_list = chunk

    task_name = '%s %d -> %d' % (cls.get_model_name(), id_list[0], id_list[-1])

    rec = Record(
        starttime=datetime.datetime.now(),
        text=(u'Batch: %s Task: %s: Reindexing into %s' % (
                batch_id, task_name, write_index)))
    rec.save()

    try:
        index_chunk(cls, id_list, reraise=True)

    except Exception:
        rec.text = (u'%s: Errored out %s %s' % (
                rec.text, sys.exc_type, sys.exc_value))
        raise
    finally:
        rec.endtime = datetime.datetime.now()
        rec.save()

        try:
            client = redis_client('default')
            client.decr(OUTSTANDING_INDEX_CHUNKS, 1)
        except RedisError:
            # If Redis isn't running, then we just log that the task
            # was completed.
            log.info('Index task %s completed.', task_name)
Esempio n. 44
0
def _document_lock_clear(document_id, user_name):
    """Remove a lock from a document.

    This would be used to indicate the given user no longer wants the page
    locked, so the lock should be cleared.

    If the `user` parameter does not match the current lock, the lock remains
    in place.

    Returns true if the lock was removed, false otherwise.
    """
    try:
        redis = redis_client(name='default')
        key = _document_lock_key.format(id=document_id)
        locked_by = redis.get(key)
        if locked_by == user_name:
            return redis.delete(key)
        else:
            return False
    except RedisError as e:
        statsd.incr('redis.errror')
        log.error('Redis error: %s' % e)
        return False
Esempio n. 45
0
def _document_lock_clear(document_id, user_name):
    """Remove a lock from a document.

    This would be used to indicate the given user no longer wants the page
    locked, so the lock should be cleared.

    If the `user` parameter does not match the current lock, the lock remains
    in place.

    Returns true if the lock was removed, false otherwise.
    """
    try:
        redis = redis_client(name='default')
        key = _document_lock_key.format(id=document_id)
        locked_by = redis.get(key)
        if locked_by == user_name:
            return redis.delete(key)
        else:
            return False
    except RedisError as e:
        statsd.incr('redis.errror')
        log.error('Redis error: %s' % e)
        return False
Esempio n. 46
0
from django.conf import settings

from sumo.redis_utils import redis_client, RedisError
from customercare.cron import get_customercare_stats

try:
    print "Removing old data"
    redis = redis_client(name='default')
    redis.delete(settings.CC_TOP_CONTRIB_CACHE_KEY)

    print "Collecting new data."
    get_customercare_stats()

    print "Done"
except RedisError:
    print "This migration needs Redis to be done."
Esempio n. 47
0
def landing(request):
    """Customer Care Landing page."""

    # Get a redis client
    redis = None
    try:
        redis = redis_client(name='default')
    except RedisError as e:
        statsd.incr('redis.errror')
        log.error('Redis error: %s' % e)
    # Stats. See customercare.cron.get_customercare_stats.
    activity = redis and redis.get(settings.CC_TWEET_ACTIVITY_CACHE_KEY)
    if activity:
        activity = json.loads(activity)
    if activity and 'resultset' in activity:
        statsd.incr('customercare.stats.activity.hit')
        activity_stats = []
        for act in activity['resultset']:
            if act is None:  # Sometimes we get bad data here.
                continue
            activity_stats.append((act[0], {
                'requests':
                format_number(act[1], locale='en_US'),
                'replies':
                format_number(act[2], locale='en_US'),
                'perc':
                act[3] * 100,
            }))
    else:
        statsd.incr('customercare.stats.activity.miss')
        activity_stats = []

    contributors = redis and redis.get(settings.CC_TOP_CONTRIB_CACHE_KEY)
    if contributors:
        contributors = json.loads(contributors)
    if contributors and 'resultset' in contributors:
        statsd.incr('customercare.stats.contributors.hit')
        contributor_stats = {}
        for contrib in contributors['resultset']:
            # Create one list per time period
            period = contrib[1]
            if not contributor_stats.get(period):
                contributor_stats[period] = []
            elif len(contributor_stats[period]) == 16:
                # Show a max. of 16 people.
                continue

            contributor_stats[period].append({
                'name':
                contrib[2],
                'username':
                contrib[3],
                'count':
                contrib[4],
                'avatar':
                contributors['avatars'].get(contrib[3]),
            })
    else:
        statsd.incr('customercare.stats.contributors.miss')
        contributor_stats = {}

    try:
        twitter_user = (request.twitter.api.auth.get_username()
                        if request.twitter.authed else None)
    except tweepy.TweepError:
        # Bad oauth token. Create a new session so user re-auths.
        twitter_user = None
        request.twitter = twitter.Session()

    yesterday = datetime.now() - timedelta(days=1)

    recent_replied_count = _count_answered_tweets(since=yesterday)

    return jingo.render(
        request, 'customercare/landing.html', {
            'activity_stats': activity_stats,
            'contributor_stats': contributor_stats,
            'canned_responses': get_common_replies(request.locale),
            'tweets': _get_tweets(locale=request.locale,
                                  https=request.is_secure()),
            'authed': request.twitter.authed,
            'twitter_user': twitter_user,
            'filters': FILTERS,
            'goal': settings.CC_REPLIES_GOAL,
            'recent_replied_count': recent_replied_count,
        })
Esempio n. 48
0
def monitor(request):
    """View for services monitor."""
    status = {}

    # Note: To add a new component to the services monitor, do your
    # testing and then add a name -> list of output tuples map to
    # status.

    # Check memcached.
    memcache_results = []
    try:
        for cache_name, cache_props in settings.CACHES.items():
            result = True
            backend = cache_props['BACKEND']
            location = cache_props['LOCATION']

            # LOCATION can be a string or a list of strings
            if isinstance(location, basestring):
                location = location.split(';')

            if 'memcache' in backend:
                for loc in location:
                    # TODO: this doesn't handle unix: variant
                    ip, port = loc.split(':')
                    result = test_memcached(ip, int(port))
                    memcache_results.append(
                        (INFO, '%s:%s %s' % (ip, port, result)))

        if not memcache_results:
            memcache_results.append((ERROR, 'memcache is not configured.'))

        elif len(memcache_results) < 2:
            memcache_results.append(
                (ERROR, ('You should have at least 2 memcache servers. '
                         'You have %s.' % len(memcache_results))))

        else:
            memcache_results.append((INFO, 'memcached servers look good.'))

    except Exception as exc:
        memcache_results.append(
            (ERROR, 'Exception while looking at memcached: %s' % str(exc)))

    status['memcached'] = memcache_results

    # Check Libraries and versions
    libraries_results = []
    try:
        Image.new('RGB', (16, 16)).save(StringIO.StringIO(), 'JPEG')
        libraries_results.append((INFO, 'PIL+JPEG: Got it!'))
    except Exception as exc:
        libraries_results.append(
            (ERROR,
             'PIL+JPEG: Probably missing: '
             'Failed to create a jpeg image: %s' % exc))

    status['libraries'] = libraries_results

    # Check file paths.
    msg = 'We want read + write.'
    filepaths = (
        (settings.USER_AVATAR_PATH, os.R_OK | os.W_OK, msg),
        (settings.IMAGE_UPLOAD_PATH, os.R_OK | os.W_OK, msg),
        (settings.THUMBNAIL_UPLOAD_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_IMAGE_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_IMAGE_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_VIDEO_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_VIDEO_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg),
        (settings.GROUP_AVATAR_PATH, os.R_OK | os.W_OK, msg),
    )

    filepath_results = []
    for path, perms, notes in filepaths:
        path = os.path.join(settings.MEDIA_ROOT, path)
        path_exists = os.path.isdir(path)
        path_perms = os.access(path, perms)

        if path_exists and path_perms:
            filepath_results.append(
                (INFO, '%s: %s %s %s' % (path, path_exists, path_perms,
                                         notes)))

    status['filepaths'] = filepath_results

    # Check RabbitMQ.
    rabbitmq_results = []
    try:
        rabbit_conn = establish_connection(connect_timeout=2)
        rabbit_conn.connect()
        rabbitmq_results.append(
            (INFO, 'Successfully connected to RabbitMQ.'))

    except (socket.error, IOError) as exc:
        rabbitmq_results.append(
            (ERROR, 'Error connecting to RabbitMQ: %s' % str(exc)))

    except Exception as exc:
        rabbitmq_results.append(
            (ERROR, 'Exception while looking at RabbitMQ: %s' % str(exc)))

    status['RabbitMQ'] = rabbitmq_results

    # Check ES.
    es_results = []
    try:
        es_utils.get_doctype_stats(es_utils.READ_INDEX)
        es_results.append(
            (INFO, ('Successfully connected to ElasticSearch and index '
                    'exists.')))

    except pyes.urllib3.MaxRetryError as exc:
        es_results.append(
            (ERROR, 'Cannot connect to ElasticSearch: %s' % str(exc)))

    except pyes.exceptions.IndexMissingException:
        es_results.append(
            (ERROR, 'Index "%s" missing.' % es_utils.READ_INDEX))

    except Exception as exc:
        es_results.append(
            (ERROR, 'Exception while looking at ElasticSearch: %s' % str(exc)))

    status['ElasticSearch'] = es_results

    # Check Celery.
    # start = time.time()
    # pong = celery.task.ping()
    # rabbit_results = r = {'duration': time.time() - start}
    # status_summary['rabbit'] = pong == 'pong' and r['duration'] < 1

    # Check Redis.
    redis_results = []
    if hasattr(settings, 'REDIS_BACKENDS'):
        for backend in settings.REDIS_BACKENDS:
            try:
                redis_client(backend)
                redis_results.append((INFO, '%s: Pass!' % backend))
            except RedisError:
                redis_results.append((ERROR, '%s: Fail!' % backend))
    status['Redis'] = redis_results

    status_code = 200

    status_summary = {}
    for component, output in status.items():
        if ERROR in [item[0] for item in output]:
            status_code = 500
            status_summary[component] = False
        else:
            status_summary[component] = True

    return jingo.render(request, 'services/monitor.html',
                        {'component_status': status,
                         'status_summary': status_summary},
                        status=status_code)
Esempio n. 49
0
def monitor(request):
    """View for services monitor."""
    status = {}

    # Note: To add a new component to the services monitor, do your
    # testing and then add a name -> list of output tuples map to
    # status.

    # Check memcached.
    memcache_results = []
    try:
        for cache_name, cache_props in settings.CACHES.items():
            result = True
            backend = cache_props['BACKEND']
            location = cache_props['LOCATION']

            # LOCATION can be a string or a list of strings
            if isinstance(location, basestring):
                location = location.split(';')

            if 'memcache' in backend:
                for loc in location:
                    # TODO: this doesn't handle unix: variant
                    ip, port = loc.split(':')
                    result = test_memcached(ip, int(port))
                    memcache_results.append(
                        (INFO, '%s:%s %s' % (ip, port, result)))

        if not memcache_results:
            memcache_results.append((ERROR, 'memcache is not configured.'))

        elif len(memcache_results) < 2:
            memcache_results.append(
                (ERROR, ('You should have at least 2 memcache servers. '
                         'You have %s.' % len(memcache_results))))

        else:
            memcache_results.append((INFO, 'memcached servers look good.'))

    except Exception as exc:
        memcache_results.append(
            (ERROR, 'Exception while looking at memcached: %s' % str(exc)))

    status['memcached'] = memcache_results

    # Check Libraries and versions
    libraries_results = []
    try:
        Image.new('RGB', (16, 16)).save(StringIO.StringIO(), 'JPEG')
        libraries_results.append((INFO, 'PIL+JPEG: Got it!'))
    except Exception as exc:
        libraries_results.append((ERROR, 'PIL+JPEG: Probably missing: '
                                  'Failed to create a jpeg image: %s' % exc))

    status['libraries'] = libraries_results

    # Check file paths.
    msg = 'We want read + write.'
    filepaths = (
        (settings.USER_AVATAR_PATH, os.R_OK | os.W_OK, msg),
        (settings.IMAGE_UPLOAD_PATH, os.R_OK | os.W_OK, msg),
        (settings.THUMBNAIL_UPLOAD_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_IMAGE_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_IMAGE_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_VIDEO_PATH, os.R_OK | os.W_OK, msg),
        (settings.GALLERY_VIDEO_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg),
        (settings.GROUP_AVATAR_PATH, os.R_OK | os.W_OK, msg),
    )

    filepath_results = []
    for path, perms, notes in filepaths:
        path = os.path.join(settings.MEDIA_ROOT, path)
        path_exists = os.path.isdir(path)
        path_perms = os.access(path, perms)

        if path_exists and path_perms:
            filepath_results.append(
                (INFO,
                 '%s: %s %s %s' % (path, path_exists, path_perms, notes)))

    status['filepaths'] = filepath_results

    # Check RabbitMQ.
    rabbitmq_results = []
    try:
        rabbit_conn = establish_connection(connect_timeout=2)
        rabbit_conn.connect()
        rabbitmq_results.append((INFO, 'Successfully connected to RabbitMQ.'))

    except (socket.error, IOError) as exc:
        rabbitmq_results.append(
            (ERROR, 'Error connecting to RabbitMQ: %s' % str(exc)))

    except Exception as exc:
        rabbitmq_results.append(
            (ERROR, 'Exception while looking at RabbitMQ: %s' % str(exc)))

    status['RabbitMQ'] = rabbitmq_results

    # Check ES.
    es_results = []
    try:
        es_utils.get_doctype_stats(es_utils.READ_INDEX)
        es_results.append(
            (INFO, ('Successfully connected to ElasticSearch and index '
                    'exists.')))

    except es_utils.ES_EXCEPTIONS as exc:
        es_results.append((ERROR, 'ElasticSearch problem: %s' % str(exc)))

    except Exception as exc:
        es_results.append(
            (ERROR, 'Exception while looking at ElasticSearch: %s' % str(exc)))

    status['ElasticSearch'] = es_results

    # Check Celery.
    # start = time.time()
    # pong = celery.task.ping()
    # rabbit_results = r = {'duration': time.time() - start}
    # status_summary['rabbit'] = pong == 'pong' and r['duration'] < 1

    # Check Redis.
    redis_results = []
    if hasattr(settings, 'REDIS_BACKENDS'):
        for backend in settings.REDIS_BACKENDS:
            try:
                redis_client(backend)
                redis_results.append((INFO, '%s: Pass!' % backend))
            except RedisError:
                redis_results.append((ERROR, '%s: Fail!' % backend))
    status['Redis'] = redis_results

    status_code = 200

    status_summary = {}
    for component, output in status.items():
        if ERROR in [item[0] for item in output]:
            status_code = 500
            status_summary[component] = False
        else:
            status_summary[component] = True

    return render(request,
                  'services/monitor.html', {
                      'component_status': status,
                      'status_summary': status_summary
                  },
                  status=status_code)
Esempio n. 50
0
        except ESIndexMissingException:
            write_stats = None
        indexes = get_indexes()
        indexes.sort(key=lambda m: m[0])
    except ESMaxRetryError:
        error_messages.append('Error: Elastic Search is not set up on this '
                              'machine or is not responding. (MaxRetryError)')
    except ESIndexMissingException:
        error_messages.append('Error: Index is missing. Press the reindex '
                              'button below. (IndexMissingException)')
    except ESTimeoutError:
        error_messages.append('Error: Connection to Elastic Search timed out. '
                              '(TimeoutError)')

    try:
        client = redis_client('default')
        outstanding_chunks = int(client.get(OUTSTANDING_INDEX_CHUNKS))
    except (RedisError, TypeError):
        outstanding_chunks = None

    recent_records = Record.uncached.order_by('-starttime')[:20]

    return render_to_response(
        'search/admin/search.html', {
            'title': 'Search',
            'doctype_stats': stats,
            'doctype_write_stats': write_stats,
            'indexes': indexes,
            'read_index': es_utils.READ_INDEX,
            'write_index': es_utils.WRITE_INDEX,
            'error_messages': error_messages,
Esempio n. 51
0
def handle_reindex(request):
    """Caculates and kicks off indexing tasks"""
    write_index = es_utils.WRITE_INDEX

    # This is truthy if the user wants us to delete and recreate
    # the index first.
    delete_index_first = bool(request.POST.get('delete_index'))

    if delete_index_first:
        # Coming from the delete form, so we reindex all models.
        models_to_index = None
    else:
        # Coming from the reindex form, so we reindex whatever we're
        # told.
        models_to_index = [
            name.replace('check_', '') for name in request.POST.keys()
            if name.startswith('check_')
        ]

    # TODO: If this gets fux0rd, then it's possible this could be
    # non-zero and we really want to just ignore it. Need the ability
    # to ignore it.
    try:
        client = redis_client('default')
        val = client.get(OUTSTANDING_INDEX_CHUNKS)
        if val is not None and int(val) > 0:
            raise ReindexError('There are %s outstanding chunks.' % val)

        # We don't know how many chunks we're building, but we do want
        # to make sure another reindex request doesn't slide in here
        # and kick off a bunch of chunks.
        #
        # There is a race condition here.
        client.set(OUTSTANDING_INDEX_CHUNKS, 1)
    except RedisError:
        log.warning('Redis not running. Can not check if there are '
                    'outstanding tasks.')

    batch_id = create_batch_id()

    # Break up all the things we want to index into chunks. This
    # chunkifies by class then by chunk size.
    chunks = []
    for cls, indexable in get_indexable(search_models=models_to_index):
        chunks.extend((cls, chunk) for chunk in chunked(indexable, CHUNK_SIZE))

    if delete_index_first:
        # The previous lines do a lot of work and take some time to
        # execute.  So we wait until here to wipe and rebuild the
        # index. That reduces the time that there is no index by a little.
        recreate_index()

    chunks_count = len(chunks)

    try:
        client = redis_client('default')
        client.set(OUTSTANDING_INDEX_CHUNKS, chunks_count)
    except RedisError:
        log.warning('Redis not running. Can\'t denote outstanding tasks.')

    for chunk in chunks:
        index_chunk_task.delay(write_index, batch_id, chunk)

    return HttpResponseRedirect(request.path)
Esempio n. 52
0
        rabbitmq_results = Markup("There was an error connecting to RabbitMQ!" "<br/>%s" % str(e))
        rabbitmq_status = False
    status_summary["rabbitmq"] = rabbitmq_status

    # Check Celery.
    # start = time.time()
    # pong = celery.task.ping()
    # rabbit_results = r = {'duration': time.time() - start}
    # status_summary['rabbit'] = pong == 'pong' and r['duration'] < 1

    # Check Redis.
    redis_results = {}
    if hasattr(settings, "REDIS_BACKENDS"):
        for backend in settings.REDIS_BACKENDS:
            try:
                c = redis_client(backend)
                redis_results[backend] = c.info()
            except RedisError:
                redis_results[backend] = False
    status_summary["redis"] = all(redis_results.values())

    if not all(status_summary.values()):
        status = 500

    return jingo.render(
        request,
        "services/monitor.html",
        {
            "memcache_results": memcache_results,
            "libraries_results": libraries_results,
            "filepath_results": filepath_results,
Esempio n. 53
0
def landing(request):
    """Customer Care Landing page."""

    # Get a redis client
    redis = None
    try:
        redis = redis_client(name='default')
    except RedisError as e:
        statsd.incr('redis.errror')
        log.error('Redis error: %s' % e)
    # Stats. See customercare.cron.get_customercare_stats.
    activity = redis and redis.get(settings.CC_TWEET_ACTIVITY_CACHE_KEY)
    if activity:
        activity = json.loads(activity)
    if activity and 'resultset' in activity:
        statsd.incr('customercare.stats.activity.hit')
        activity_stats = []
        for act in activity['resultset']:
            if act is None:  # Sometimes we get bad data here.
                continue
            activity_stats.append((act[0], {
                'requests': format_number(act[1], locale='en_US'),
                'replies': format_number(act[2], locale='en_US'),
                'perc': act[3] * 100,
            }))
    else:
        statsd.incr('customercare.stats.activity.miss')
        activity_stats = []

    contributors = redis and redis.get(settings.CC_TOP_CONTRIB_CACHE_KEY)
    if contributors:
        contributors = json.loads(contributors)
    if contributors and 'resultset' in contributors:
        statsd.incr('customercare.stats.contributors.hit')
        contributor_stats = {}
        for contrib in contributors['resultset']:
            # Create one list per time period
            period = contrib[1]
            if not contributor_stats.get(period):
                contributor_stats[period] = []
            elif len(contributor_stats[period]) == 16:
                # Show a max. of 16 people.
                continue

            contributor_stats[period].append({
                'name': contrib[2],
                'username': contrib[3],
                'count': contrib[4],
                'avatar': contributors['avatars'].get(contrib[3]),
            })
    else:
        statsd.incr('customercare.stats.contributors.miss')
        contributor_stats = {}

    # reformat stats to be more useful.
    new_contrib_stats = {}
    for time_period, contributors in contributor_stats.items():
        for contributor in contributors:
            username = contributor['username']
            if contributor['username'] not in new_contrib_stats:
                new_contrib_stats[contributor['username']] = {
                    'username': username,
                    'name': contributor['name'],
                    'avatar': contributor['avatar'],
                }
            assert time_period not in new_contrib_stats[username]
            new_contrib_stats[username][time_period] = contributor['count']

    contributor_stats = sorted(new_contrib_stats.values(), reverse=True,
        key=lambda c: c.get('Last Week', 0))

    try:
        twitter_user = (request.twitter.api.auth.get_username() if
                        request.twitter.authed else None)
    except tweepy.TweepError:
        # Bad oauth token. Create a new session so user re-auths.
        twitter_user = None
        request.twitter = twitter.Session()

    yesterday = datetime.now() - timedelta(days=1)

    recent_replied_count = _count_answered_tweets(since=yesterday)

    return jingo.render(request, 'customercare/landing.html', {
        'activity_stats': activity_stats,
        'contributor_stats': contributor_stats,
        'canned_responses': get_common_replies(request.locale),
        'tweets': _get_tweets(locale=request.locale,
                              https=request.is_secure()),
        'authed': request.twitter.authed,
        'twitter_user': twitter_user,
        'filters': FILTERS,
        'goal': settings.CC_REPLIES_GOAL,
        'recent_replied_count': recent_replied_count,
    })
Esempio n. 54
0
    except ES_EXCEPTIONS:
        stats = None

    try:
        write_stats = get_doctype_stats(es_utils.WRITE_INDEX)
    except ES_EXCEPTIONS:
        write_stats = None

    try:
        indexes = get_indexes()
        indexes.sort(key=lambda m: m[0])
    except ES_EXCEPTIONS as e:
        error_messages.append('Error: {0}'.format(repr(e)))

    try:
        client = redis_client('default')
        outstanding_chunks = int(client.get(OUTSTANDING_INDEX_CHUNKS))
    except (RedisError, TypeError):
        outstanding_chunks = None

    recent_records = Record.uncached.order_by('-starttime')[:20]

    return render(
        request,
        'admin/search_maintenance.html',
        {'title': 'Search',
         'doctype_stats': stats,
         'doctype_write_stats': write_stats,
         'indexes': indexes,
         'read_index': es_utils.READ_INDEX,
         'write_index': es_utils.WRITE_INDEX,
Esempio n. 55
0
def get_customercare_stats():
    """
    Generate customer care stats from the Replies table.

    This gets cached in Redis as a sorted list of contributors, stored as JSON.

    Example Top Contributor data:

    [
        {
            'twitter_username': '******',
            'avatar': 'http://twitter.com/path/to/the/avatar.png',
            'avatar_https': 'https://twitter.com/path/to/the/avatar.png',
            'all': 5211,
            '1m': 230,
            '1w': 33,
            '1d': 3,
        },
        { ... },
        { ... },
    ]
    """

    contributor_stats = {}

    now = datetime.now()
    one_month_ago = now - timedelta(days=30)
    one_week_ago = now - timedelta(days=7)
    yesterday = now - timedelta(days=1)

    for reply in Reply.objects.all():
        raw = json.loads(reply.raw_json)
        user = reply.twitter_username
        if user not in contributor_stats:
            contributor_stats[user] = {
                "twitter_username": user,
                "avatar": raw["profile_image_url"],
                "avatar_https": raw["profile_image_url_https"],
                "all": 0,
                "1m": 0,
                "1w": 0,
                "1d": 0,
            }
        contributor = contributor_stats[reply.twitter_username]

        contributor["all"] += 1
        if reply.created > one_month_ago:
            contributor["1m"] += 1
            if reply.created > one_week_ago:
                contributor["1w"] += 1
                if reply.created > yesterday:
                    contributor["1d"] += 1

    sort_key = settings.CC_TOP_CONTRIB_SORT
    limit = settings.CC_TOP_CONTRIB_LIMIT
    # Sort by whatever is in settings, break ties with 'all'
    contributor_stats = sorted(contributor_stats.values(), key=lambda c: (c[sort_key], c["all"]), reverse=True)[:limit]

    try:
        redis = redis_client(name="default")
        key = settings.CC_TOP_CONTRIB_CACHE_KEY
        redis.set(key, json.dumps(contributor_stats))
    except RedisError as e:
        statsd.incr("redis.error")
        log.error("Redis error: %s" % e)

    return contributor_stats
Esempio n. 56
0
def get_customercare_stats():
    """
    Generate customer care stats from the Replies table.

    This gets cached in Redis as a sorted list of contributors, stored as JSON.

    Example Top Contributor data:

    [
        {
            'twitter_username': '******',
            'avatar': 'http://twitter.com/path/to/the/avatar.png',
            'avatar_https': 'https://twitter.com/path/to/the/avatar.png',
            'all': 5211,
            '1m': 230,
            '1w': 33,
            '1d': 3,
        },
        { ... },
        { ... },
    ]
    """

    contributor_stats = {}

    now = datetime.now()
    one_month_ago = now - timedelta(days=30)
    one_week_ago = now - timedelta(days=7)
    yesterday = now - timedelta(days=1)

    for reply in Reply.objects.all():
        raw = json.loads(reply.raw_json)
        user = reply.twitter_username
        if user not in contributor_stats:
            contributor_stats[user] = {
                'twitter_username': user,
                'avatar': raw['profile_image_url'],
                'avatar_https': raw['profile_image_url_https'],
                'all': 0,
                '1m': 0,
                '1w': 0,
                '1d': 0,
            }
        contributor = contributor_stats[reply.twitter_username]

        contributor['all'] += 1
        if reply.created > one_month_ago:
            contributor['1m'] += 1
            if reply.created > one_week_ago:
                contributor['1w'] += 1
                if reply.created > yesterday:
                    contributor['1d'] += 1

    sort_key = settings.CC_TOP_CONTRIB_SORT
    limit = settings.CC_TOP_CONTRIB_LIMIT
    # Sort by whatever is in settings, break ties with 'all'
    contributor_stats = sorted(contributor_stats.values(),
                               key=lambda c: (c[sort_key], c['all']),
                               reverse=True)[:limit]

    try:
        redis = redis_client(name='default')
        key = settings.CC_TOP_CONTRIB_CACHE_KEY
        redis.set(key, json.dumps(contributor_stats))
    except RedisError as e:
        statsd.incr('redis.error')
        log.error('Redis error: %s' % e)

    return contributor_stats