Exemplo n.º 1
0
def _live_index_handler(sender, **kwargs):
    if (not settings.ES_LIVE_INDEX
            or 'signal' not in kwargs
            or 'instance' not in kwargs):
        return

    instance = kwargs['instance']

    try:
        if kwargs['signal'] == post_save:
            cls_path = to_class_path(instance.get_doctype())
            index_item_task.delay(cls_path, instance.id)

        elif kwargs['signal'] == pre_delete:
            cls_path = to_class_path(instance.get_doctype())
            unindex_item_task.delay(cls_path, instance.id)

    except Exception:
        # At this point, we're trying to create an indexing task for
        # some response that's changed. When an indexing task is
        # created, it uses amqp to connect to rabbitmq to put the
        # new task in the queue. If a user is leaving feadback and
        # this fails (which it does with some regularity), the user
        # gets an HTTP 500 which stinks.
        #
        # The problem is exacerbated by the fact I don't know the full
        # list of exceptions that can get kicked up here. So what
        # we're going to do is catch them all, look for "amqp" in the
        # frames and if it's there, we'll ignore the exception and
        # send an email. We can collect reasons and narrow this down
        # at some point if that makes sense to do. If "amqp" is not in
        # the frames, then it's some other kind of error that we want
        # to show up, so we'll re-raise it. Sorry, user!
        #
        # In this way, users will stop seeing HTTP 500 errors during
        # rabbitmq outages.
        exc_type, exc_value, exc_tb = sys.exc_info()
        frames = traceback.extract_tb(exc_tb)
        for fn, ln, fun, text in frames:
            if 'amqp' in fn:
                # This is an amqp frame which indicates that we
                # should ignore this and send an email.
                mail_admins(
                    subject='amqp error',
                    message=(
                        'amqp error:\n\n' +
                        traceback.format_exc()
                    )
                )
                return

        # No amqp frames, so re-raise it.
        raise
Exemplo n.º 2
0
def _live_index_handler(sender, **kwargs):
    if (not settings.ES_LIVE_INDEX or
        'signal' not in kwargs or 'instance' not in kwargs):
        return

    instance = kwargs['instance']

    if kwargs['signal'] == post_save:
        cls_path = to_class_path(instance.get_mapping_type())
        index_item_task.delay(cls_path, instance.id)

    elif kwargs['signal'] == pre_delete:
        cls_path = to_class_path(instance.get_mapping_type())
        unindex_item_task(cls_path, instance.id)
Exemplo n.º 3
0
def _live_index_handler(sender, **kwargs):
    if (not settings.ES_LIVE_INDEX or 'signal' not in kwargs
            or 'instance' not in kwargs):
        return

    instance = kwargs['instance']

    if kwargs['signal'] == post_save:
        cls_path = to_class_path(instance.get_mapping_type())
        index_item_task.delay(cls_path, instance.id)

    elif kwargs['signal'] == pre_delete:
        cls_path = to_class_path(instance.get_mapping_type())
        unindex_item_task.delay(cls_path, instance.id)
Exemplo n.º 4
0
def _live_index_handler(sender, **kwargs):
    if (not settings.ES_LIVE_INDEX or 'signal' not in kwargs
            or 'instance' not in kwargs):
        return

    instance = kwargs['instance']

    try:
        if kwargs['signal'] == post_save:
            cls_path = to_class_path(instance.get_mapping_type())
            index_item_task.delay(cls_path, instance.id)

        elif kwargs['signal'] == pre_delete:
            cls_path = to_class_path(instance.get_mapping_type())
            unindex_item_task.delay(cls_path, instance.id)

    except Exception:
        # At this point, we're trying to create an indexing task for
        # some response that's changed. When an indexing task is
        # created, it uses amqp to connect to rabbitmq to put the
        # new task in the queue. If a user is leaving feadback and
        # this fails (which it does with some regularity), the user
        # gets an HTTP 500 which stinks.
        #
        # The problem is exacerbated by the fact I don't know the full
        # list of exceptions that can get kicked up here. So what
        # we're going to do is catch them all, look for "amqp" in the
        # frames and if it's there, we'll ignore the exception and
        # send an email. We can collect reasons and narrow this down
        # at some point if that makes sense to do. If "amqp" is not in
        # the frames, then it's some other kind of error that we want
        # to show up, so we'll re-raise it. Sorry, user!
        #
        # In this way, users will stop seeing HTTP 500 errors during
        # rabbitmq outages.
        exc_type, exc_value, exc_tb = sys.exc_info()
        frames = traceback.extract_tb(exc_tb)
        for fn, ln, fun, text in frames:
            if 'amqp' in fn:
                # This is an amqp frame which indicates that we
                # should ignore this and send an email.
                mail_admins(subject='amqp error',
                            message=('amqp error:\n\n' +
                                     traceback.format_exc()))
                return

        # No amqp frames, so re-raise it.
        raise
Exemplo n.º 5
0
def handle_reindex(request):
    """Caculate chunks and kick off indexing tasks."""
    index = get_index()

    batch_id = create_batch_id()

    # Break up all the things we want to index into chunks. This
    # chunkifies by class then by chunk size.
    chunks = []
    for cls, indexable in get_indexable():
        chunks.extend(
            (cls, chunk) for chunk in chunked(indexable, CHUNK_SIZE))

    # The previous lines do a lot of work and take some time to
    # execute.  So we wait until here to wipe and rebuild the
    # index. That reduces the time that there is no index by a little.
    recreate_index()

    for cls, id_list in chunks:
        chunk_name = '%s %d -> %d' % (cls.get_mapping_type_name(),
                                      id_list[0], id_list[-1])
        rec = Record(batch_id=batch_id, name=chunk_name)
        rec.save()
        index_chunk_task.delay(index, batch_id, rec.id,
                               (to_class_path(cls), id_list))

    return HttpResponseRedirect(request.path)
Exemplo n.º 6
0
    def test_index_chunk_task(self):
        responses = ResponseFactory.create_batch(10)

        # With live indexing, that'll create items in the index. Since
        # we want to test index_chunk_test, we need a clean index to
        # start with so we delete and recreate it.
        self.setup_indexes(empty=True)

        # Verify there's nothing in the index.
        assert ResponseDocType.docs.search().count() == 0

        # Create the record and the chunk and then run it through
        # celery.
        batch_id = 'ou812'
        rec = RecordFactory(batch_id=batch_id)

        chunk = (
            to_class_path(ResponseDocType),
            [item.id for item in responses]
        )
        index_chunk_task.delay(get_index_name(), batch_id, rec.id, chunk)

        self.refresh()

        # Verify everything is in the index now.
        assert ResponseDocType.docs.search().count() == 10

        # Verify the record was marked succeeded.
        rec = Record.objects.get(pk=rec.id)
        assert rec.status == Record.STATUS_SUCCESS
Exemplo n.º 7
0
def handle_reindex(request):
    """Caculate chunks and kick off indexing tasks."""
    index = get_index()

    batch_id = create_batch_id()

    # Break up all the things we want to index into chunks. This
    # chunkifies by class then by chunk size.
    chunks = []
    for cls, indexable in get_indexable():
        chunks.extend((cls, chunk) for chunk in chunked(indexable, CHUNK_SIZE))

    # The previous lines do a lot of work and take some time to
    # execute.  So we wait until here to wipe and rebuild the
    # index. That reduces the time that there is no index by a little.
    recreate_index()

    for cls, id_list in chunks:
        chunk_name = '%s %d -> %d' % (cls.get_mapping_type_name(), id_list[0],
                                      id_list[-1])
        rec = Record(batch_id=batch_id, name=chunk_name)
        rec.save()
        index_chunk_task.delay(index, batch_id, rec.id,
                               (to_class_path(cls), id_list))

    return HttpResponseRedirect(request.path)
Exemplo n.º 8
0
    def test_index_chunk_task(self):
        responses = ResponseFactory.create_batch(10)

        # With live indexing, that'll create items in the index. Since
        # we want to test index_chunk_test, we need a clean index to
        # start with so we delete and recreate it.
        self.setup_indexes(empty=True)

        # Verify there's nothing in the index.
        eq_(len(ResponseMappingType.search()), 0)

        # Create the record and the chunk and then run it through
        # celery.
        batch_id = 'ou812'
        rec = RecordFactory(batch_id=batch_id)

        chunk = (to_class_path(ResponseMappingType),
                 [item.id for item in responses])
        index_chunk_task.delay(get_index(), batch_id, rec.id, chunk)

        ResponseMappingType.refresh_index()

        # Verify everything is in the index now.
        eq_(len(ResponseMappingType.search()), 10)

        # Verify the record was marked succeeded.
        rec = Record.objects.get(pk=rec.id)
        eq_(rec.status, Record.STATUS_SUCCESS)
Exemplo n.º 9
0
    def test_index_chunk_task(self):
        responses = [response(save=True) for i in range(10)]

        # With live indexing, that'll create items in the index. Since
        # we want to test index_chunk_test, we need a clean index to
        # start with so we delete and recreate it.
        self.setup_indexes(empty=True)

        # Verify there's nothing in the index.
        eq_(len(ResponseMappingType.search()), 0)

        # Create the record and the chunk and then run it through
        # celery.
        batch_id = 'ou812'
        rec = record(batch_id=batch_id, save=True)

        chunk = (to_class_path(ResponseMappingType),
                 [item.id for item in responses])
        index_chunk_task.delay(get_index(), batch_id, rec.id, chunk)

        ResponseMappingType.refresh_index()

        # Verify everything is in the index now.
        eq_(len(ResponseMappingType.search()), 10)

        # Verify the record was marked succeeded.
        rec = Record.objects.get(pk=rec.id)
        eq_(rec.status, Record.STATUS_SUCCESS)
Exemplo n.º 10
0
def _live_index_handler(sender, **kwargs):
    if (not settings.ES_LIVE_INDEX or
        'signal' not in kwargs or 'instance' not in kwargs):
        return

    instance = kwargs['instance']

    try:
        if kwargs['signal'] == post_save:
            cls_path = to_class_path(instance.get_mapping_type())
            index_item_task.delay(cls_path, instance.id)

        elif kwargs['signal'] == pre_delete:
            cls_path = to_class_path(instance.get_mapping_type())
            unindex_item_task.delay(cls_path, instance.id)
    except socket.error:
        # If we have a socket error here, it means we couldn't connect
        # to amqp.  FIXME: Remove this at some point. But we have it
        # here now for cron jobs.
        pass
Exemplo n.º 11
0
Arquivo: admin.py Projeto: xrile/fjord
def reindex():
    """Calculates and creates indexing chunks"""
    index = get_index_name()

    batch_id = create_batch_id()

    # Break up all the things we want to index into chunks. This
    # chunkifies by class then by chunk size.
    chunks = []
    for cls, indexable in get_indexable():
        chunks.extend((cls, chunk) for chunk in chunked(indexable, CHUNK_SIZE))

    for cls, id_list in chunks:
        chunk_name = '%s %d -> %d' % (cls._doc_type.name, id_list[0],
                                      id_list[-1])
        rec = Record(batch_id=batch_id, name=chunk_name)
        rec.save()
        index_chunk_task.delay(index, batch_id, rec.id,
                               (to_class_path(cls), id_list))
Exemplo n.º 12
0
def reindex():
    """Calculates and creates indexing chunks"""
    index = get_index()

    batch_id = create_batch_id()

    # Break up all the things we want to index into chunks. This
    # chunkifies by class then by chunk size.
    chunks = []
    for cls, indexable in get_indexable():
        chunks.extend(
            (cls, chunk) for chunk in chunked(indexable, CHUNK_SIZE))

    for cls, id_list in chunks:
        chunk_name = '%s %d -> %d' % (cls.get_mapping_type_name(),
                                      id_list[0], id_list[-1])
        rec = Record(batch_id=batch_id, name=chunk_name)
        rec.save()
        index_chunk_task.delay(index, batch_id, rec.id,
                               (to_class_path(cls), id_list))
Exemplo n.º 13
0
def test_to_class_path():
    eq_(to_class_path(FooBarClassOfAwesome),
        'fjord.search.tests.test_utils:FooBarClassOfAwesome')
Exemplo n.º 14
0
def test_to_class_path():
    assert(
        to_class_path(FooBarClassOfAwesome) ==
        'fjord.search.tests.test_utils:FooBarClassOfAwesome'
    )
Exemplo n.º 15
0
def test_to_class_path():
    eq_(
        to_class_path(FooBarClassOfAwesome),
        'fjord.search.tests.test__utils:FooBarClassOfAwesome'
    )
Exemplo n.º 16
0
def test_to_class_path():
    assert (to_class_path(FooBarClassOfAwesome) ==
            'fjord.search.tests.test_utils:FooBarClassOfAwesome')