Exemple #1
0
def threads(request, document_slug):
    """View all the threads in a discussion forum."""
    doc = get_document(document_slug, request)
    try:
        sort = int(request.GET.get('sort', 0))
    except ValueError:
        sort = 0

    try:
        desc = int(request.GET.get('desc', 0))
    except ValueError:
        desc = 0
    desc_toggle = 0 if desc else 1

    threads_ = sort_threads(doc.thread_set, sort, desc)
    threads_ = paginate(request, threads_,
                        per_page=kbforums.THREADS_PER_PAGE)

    feed_urls = ((reverse('wiki.discuss.threads.feed', args=[document_slug]),
                  ThreadsFeed().title(doc)),)

    is_watching_forum = (request.user.is_authenticated() and
                         NewThreadEvent.is_notifying(request.user, doc))
    return render(request, 'kbforums/threads.html', {
        'document': doc, 'threads': threads_,
        'is_watching_forum': is_watching_forum,
        'sort': sort, 'desc_toggle': desc_toggle,
        'feeds': feed_urls})
Exemple #2
0
def recent_revisions(request):
    # Make writable
    request.GET = request.GET.copy()

    fragment = request.GET.pop("fragment", None)
    form = RevisionFilterForm(request.GET)
    revs = Revision.objects.order_by("-created")

    # We are going to ignore validation errors for the most part, but
    # this is needed to call the functions that generate `cleaned_data`
    # This helps in particular when bad user names are typed in.
    form.is_valid()

    # If something has gone very wrong, `cleaned_data` won't be there.
    if hasattr(form, "cleaned_data"):
        if form.cleaned_data.get("locale"):
            revs = revs.filter(document__locale=form.cleaned_data["locale"])
        if form.cleaned_data.get("users"):
            revs = revs.filter(creator__in=form.cleaned_data["users"])
        if form.cleaned_data.get("start"):
            revs = revs.filter(created__gte=form.cleaned_data["start"])
        if form.cleaned_data.get("end"):
            revs = revs.filter(created__lte=form.cleaned_data["end"])

    revs = paginate(request, revs)

    c = {"revisions": revs, "form": form}
    if fragment:
        template = "wiki/includes/recent_revisions_fragment.html"
    else:
        template = "wiki/recent_revisions.html"

    return render(request, template, c)
Exemple #3
0
def gallery_async(request):
    """AJAX endpoint to media gallery.

    Returns an HTML list representation of the media.

    """
    # Maybe refactor this into existing views and check request.is_ajax?
    media_type = request.GET.get('type', 'image')
    term = request.GET.get('q')
    media_locale = request.GET.get('locale', settings.WIKI_DEFAULT_LANGUAGE)
    if media_type == 'image':
        media_qs = Image.objects
    elif media_type == 'video':
        media_qs = Video.objects
    else:
        raise Http404

    media_qs = media_qs.filter(locale=media_locale)

    if term:
        media_qs = media_qs.filter(
            Q(title__icontains=term) | Q(description__icontains=term))

    media = paginate(request, media_qs, per_page=ITEMS_PER_PAGE)

    return render(request, 'gallery/includes/media_list.html',
                  {'media_list': media})
Exemple #4
0
def gallery(request, media_type='image'):
    """The media gallery.

    Filter can be set to 'images' or 'videos'.

    """
    if media_type == 'image':
        media_qs = Image.objects.filter(locale=request.LANGUAGE_CODE)
    elif media_type == 'video':
        media_qs = Video.objects.filter(locale=request.LANGUAGE_CODE)
    else:
        raise Http404

    media = paginate(request, media_qs, per_page=ITEMS_PER_PAGE)

    drafts = _get_drafts(request.user)
    image = drafts['image'][0] if drafts['image'] else None
    image_form = _init_media_form(ImageForm, request, image)
    if request.method == 'POST':
        image_form.is_valid()

    return render(
        request, 'gallery/gallery.html', {
            'media': media,
            'media_type': media_type,
            'image_form': image_form,
            'submitted': request.method == 'POST'
        })
def test_invalid_page_param():
    url = '%s?%s' % (reverse('search'), 'page=a')
    request = RequestFactory().get(url)
    queryset = range(100)
    paginated = paginate(request, queryset)
    eq_(paginated.url,
        request.build_absolute_uri(request.path) + '?')
Exemple #6
0
def posts(request, document_slug, thread_id, form=None, post_preview=None):
    """View all the posts in a thread."""
    doc = get_document(document_slug, request)

    thread = get_object_or_404(Thread, pk=thread_id, document=doc)

    posts_ = thread.post_set.all()
    count = posts_.count()
    if count:
        last_post = posts_[count - 1]
    else:
        last_post = None
    posts_ = paginate(request, posts_, kbforums.POSTS_PER_PAGE)

    if not form:
        form = ReplyForm()

    feed_urls = ((reverse('wiki.discuss.posts.feed',
                          kwargs={'document_slug': document_slug,
                                  'thread_id': thread_id}),
                  PostsFeed().title(thread)),)

    is_watching_thread = (request.user.is_authenticated() and
                          NewPostEvent.is_notifying(request.user, thread))
    return render(request, 'kbforums/posts.html', {
        'document': doc, 'thread': thread,
        'posts': posts_, 'form': form,
        'count': count,
        'last_post': last_post,
        'post_preview': post_preview,
        'is_watching_thread': is_watching_thread,
        'feeds': feed_urls})
Exemple #7
0
def posts(request, document_slug, thread_id, form=None, post_preview=None):
    """View all the posts in a thread."""
    doc = get_document(document_slug, request)

    thread = get_object_or_404(Thread, pk=thread_id, document=doc)

    posts_ = thread.post_set.all()
    count = posts_.count()
    if count:
        last_post = posts_[count - 1]
    else:
        last_post = None
    posts_ = paginate(request, posts_, kbforums.POSTS_PER_PAGE)

    if not form:
        form = ReplyForm()

    feed_urls = ((reverse('wiki.discuss.posts.feed',
                          kwargs={'document_slug': document_slug,
                                  'thread_id': thread_id}),
                  PostsFeed().title(thread)),)

    is_watching_thread = (request.user.is_authenticated() and
                          NewPostEvent.is_notifying(request.user, thread))
    return render(request, 'kbforums/posts.html', {
        'document': doc, 'thread': thread,
        'posts': posts_, 'form': form,
        'count': count,
        'last_post': last_post,
        'post_preview': post_preview,
        'is_watching_thread': is_watching_thread,
        'feeds': feed_urls})
Exemple #8
0
def threads(request, forum_slug):
    """View all the threads in a forum."""
    forum = get_object_or_404(Forum, slug=forum_slug)
    if not forum.allows_viewing_by(request.user):
        raise Http404  # Pretend there's nothing there.

    try:
        sort = int(request.GET.get('sort', 0))
    except ValueError:
        sort = 0

    try:
        desc = int(request.GET.get('desc', 0))
    except ValueError:
        desc = 0
    desc_toggle = 0 if desc else 1

    threads_ = sort_threads(forum.thread_set, sort, desc)
    count = threads_.count()
    threads_ = threads_.select_related('creator', 'last_post',
                                       'last_post__author')
    threads_ = paginate(request, threads_,
                        per_page=constants.THREADS_PER_PAGE, count=count)

    feed_urls = ((reverse('forums.threads.feed', args=[forum_slug]),
                  ThreadsFeed().title(forum)),)

    is_watching_forum = (request.user.is_authenticated() and
                         NewThreadEvent.is_notifying(request.user, forum))
    return render(request, 'forums/threads.html', {
        'forum': forum, 'threads': threads_,
        'is_watching_forum': is_watching_forum,
        'sort': sort, 'desc_toggle': desc_toggle,
        'feeds': feed_urls})
Exemple #9
0
def locale_discussions(request):
    locale_name = LOCALES[request.LANGUAGE_CODE].native
    threads = Thread.objects.filter(document__locale=request.LANGUAGE_CODE,
                                    document__allow_discussion=True)
    try:
        sort = int(request.GET.get('sort', 0))
    except ValueError:
        sort = 0

    try:
        desc = int(request.GET.get('desc', 0))
    except ValueError:
        desc = 0
    desc_toggle = 0 if desc else 1

    threads_ = sort_threads(threads, sort, desc)

    # Ignore sticky-ness:
    threads_ = threads_.order_by('-last_post__created')
    threads_ = paginate(request, threads_, per_page=kbforums.THREADS_PER_PAGE)
    is_watching_locale = (request.user.is_authenticated()
                          and NewThreadInLocaleEvent.is_notifying(
                              request.user, locale=request.LANGUAGE_CODE))
    return render(
        request, 'kbforums/discussions.html', {
            'locale_name': locale_name,
            'threads': threads_,
            'desc_toggle': desc_toggle,
            'is_watching_locale': is_watching_locale
        })
Exemple #10
0
def gallery_async(request):
    """AJAX endpoint to media gallery.

    Returns an HTML list representation of the media.

    """
    # Maybe refactor this into existing views and check request.is_ajax?
    media_type = request.GET.get('type', 'image')
    term = request.GET.get('q')
    media_locale = request.GET.get('locale', settings.WIKI_DEFAULT_LANGUAGE)
    if media_type == 'image':
        media_qs = Image.objects
    elif media_type == 'video':
        media_qs = Video.objects
    else:
        raise Http404

    media_qs = media_qs.filter(locale=media_locale)

    if term:
        media_qs = media_qs.filter(Q(title__icontains=term) |
                                   Q(description__icontains=term))

    media = paginate(request, media_qs, per_page=ITEMS_PER_PAGE)

    return render(request, 'gallery/includes/media_list.html', {
        'media_list': media})
Exemple #11
0
def posts(request,
          forum_slug,
          thread_id,
          form=None,
          post_preview=None,
          is_reply=False):
    """View all the posts in a thread."""
    thread = get_object_or_404(Thread, pk=thread_id)
    forum = thread.forum

    if forum.slug != forum_slug and not is_reply:
        new_forum = get_object_or_404(Forum, slug=forum_slug)
        if new_forum.allows_viewing_by(request.user):
            return HttpResponseRedirect(thread.get_absolute_url())
        raise Http404  # User has no right to view destination forum.
    elif forum.slug != forum_slug:
        raise Http404

    if not forum.allows_viewing_by(request.user):
        raise Http404

    posts_ = thread.post_set.all()
    count = posts_.count()
    if count:
        last_post = posts_[count - 1]
    else:
        last_post = None
    posts_ = posts_.select_related('author', 'updated_by')
    posts_ = posts_.extra(
        select={
            'author_post_count':
            'SELECT COUNT(*) FROM forums_post WHERE '
            'forums_post.author_id = auth_user.id'
        })
    posts_ = paginate(request, posts_, constants.POSTS_PER_PAGE, count=count)

    if not form:
        form = ReplyForm()

    feed_urls = ((reverse('forums.posts.feed',
                          kwargs={
                              'forum_slug': forum_slug,
                              'thread_id': thread_id
                          }), PostsFeed().title(thread)), )

    is_watching_thread = (request.user.is_authenticated()
                          and NewPostEvent.is_notifying(request.user, thread))
    return render(
        request, 'forums/posts.html', {
            'forum': forum,
            'thread': thread,
            'posts': posts_,
            'form': form,
            'count': count,
            'last_post': last_post,
            'post_preview': post_preview,
            'is_watching_thread': is_watching_thread,
            'feeds': feed_urls,
            'forums': Forum.objects.all()
        })
Exemple #12
0
def gallery(request, media_type="image"):
    """The media gallery.

    Filter can be set to 'images' or 'videos'.

    """
    if media_type == "image":
        media_qs = Image.objects.filter(locale=request.LANGUAGE_CODE)
    elif media_type == "video":
        media_qs = Video.objects.filter(locale=request.LANGUAGE_CODE)
    else:
        raise Http404

    media = paginate(request, media_qs, per_page=ITEMS_PER_PAGE)

    drafts = _get_drafts(request.user)
    image = drafts["image"][0] if drafts["image"] else None
    image_form = _init_media_form(ImageForm, request, image)
    if request.method == "POST":
        image_form.is_valid()

    return render(
        request,
        "gallery/gallery.html",
        {
            "media": media,
            "media_type": media_type,
            "image_form": image_form,
            "submitted": request.method == "POST",
        },
    )
Exemple #13
0
def locale_discussions(request):
    locale_name = LOCALES[request.LANGUAGE_CODE].native
    threads = Thread.objects.filter(document__locale=request.LANGUAGE_CODE,
                                    document__allow_discussion=True)
    try:
        sort = int(request.GET.get('sort', 0))
    except ValueError:
        sort = 0

    try:
        desc = int(request.GET.get('desc', 0))
    except ValueError:
        desc = 0
    desc_toggle = 0 if desc else 1

    threads_ = sort_threads(threads, sort, desc)

    # Ignore sticky-ness:
    threads_ = threads_.order_by('-last_post__created')
    threads_ = paginate(request, threads_,
                        per_page=kbforums.THREADS_PER_PAGE)
    is_watching_locale = (request.user.is_authenticated() and
                          NewThreadInLocaleEvent.is_notifying(
                              request.user, locale=request.LANGUAGE_CODE))
    return render(request, 'kbforums/discussions.html', {
        'locale_name': locale_name, 'threads': threads_,
        'desc_toggle': desc_toggle,
        'is_watching_locale': is_watching_locale})
Exemple #14
0
def gallery(request, media_type='image'):
    """The media gallery.

    Filter can be set to 'images' or 'videos'.

    """
    if media_type == 'image':
        media_qs = Image.objects.filter(locale=request.LANGUAGE_CODE)
    elif media_type == 'video':
        media_qs = Video.objects.filter(locale=request.LANGUAGE_CODE)
    else:
        raise Http404

    media = paginate(request, media_qs, per_page=ITEMS_PER_PAGE)

    drafts = _get_drafts(request.user)
    image = drafts['image'][0] if drafts['image'] else None
    image_form = _init_media_form(ImageForm, request, image)
    if request.method == 'POST':
        image_form.is_valid()

    return render(request, 'gallery/gallery.html', {
        'media': media,
        'media_type': media_type,
        'image_form': image_form,
        'submitted': request.method == 'POST'})
Exemple #15
0
def locale_discussions(request):
    locale_name = LOCALES[request.LANGUAGE_CODE].native
    threads = Thread.objects.filter(document__locale=request.LANGUAGE_CODE,
                                    document__allow_discussion=True)
    try:
        sort = int(request.GET.get("sort", 5))
    except ValueError:
        sort = 5

    try:
        desc = int(request.GET.get("desc", 1))
    except ValueError:
        desc = 1
    desc_toggle = 0 if desc else 1

    threads_ = sort_threads(threads, sort, desc)
    threads_ = paginate(request, threads_, per_page=kbforums.THREADS_PER_PAGE)
    is_watching_locale = request.user.is_authenticated and NewThreadInLocaleEvent.is_notifying(
        request.user, locale=request.LANGUAGE_CODE)
    return render(
        request,
        "kbforums/discussions.html",
        {
            "locale_name": locale_name,
            "threads": threads_,
            "desc_toggle": desc_toggle,
            "is_watching_locale": is_watching_locale,
        },
    )
Exemple #16
0
def threads(request, document_slug):
    """View all the threads in a discussion forum."""
    doc = get_document(document_slug, request)
    try:
        sort = int(request.GET.get("sort", 0))
    except ValueError:
        sort = 0

    try:
        desc = int(request.GET.get("desc", 0))
    except ValueError:
        desc = 0
    desc_toggle = 0 if desc else 1

    threads_ = sort_threads(doc.thread_set, sort, desc)
    threads_ = paginate(request, threads_, per_page=kbforums.THREADS_PER_PAGE)

    feed_urls = ((reverse("wiki.discuss.threads.feed",
                          args=[document_slug]), ThreadsFeed().title(doc)), )

    is_watching_forum = request.user.is_authenticated and NewThreadEvent.is_notifying(
        request.user, doc)
    return render(
        request,
        "kbforums/threads.html",
        {
            "document": doc,
            "threads": threads_,
            "is_watching_forum": is_watching_forum,
            "sort": sort,
            "desc_toggle": desc_toggle,
            "feeds": feed_urls,
        },
    )
Exemple #17
0
def search(request, forum_slug=None):
    """Search a specific forum."""

    try:
        forum = Forum.objects.get(slug=forum_slug)
    except Forum.DoesNotExist:
        raise Http404

    search_form = BaseSearchForm(request.GET, initial={"forum": forum})
    if not search_form.is_valid():
        messages.add_message(
            request, messages.WARNING,
            _("Something went wrong. Cannot search this forum."))
        return threads(request, forum_slug=forum_slug)

    cdata = search_form.cleaned_data

    search = ForumSearch(query=cdata["q"], thread_forum_id=forum.pk)

    # execute search
    pages = paginate(request, search, paginator_cls=SumoSearchPaginator)
    total = search.total
    results = search.results
    data = {
        "q": cdata["q"],
        "results": results,
        "search_form": search_form,
        "num_results": total,
        "forum": forum,
        "pages": pages,
    }
    return render(request, "search/search-results.html", data)
Exemple #18
0
def threads(request, forum_slug):
    """View all the threads in a forum."""
    forum = get_object_or_404(Forum, slug=forum_slug)
    if not forum.allows_viewing_by(request.user):
        raise Http404  # Pretend there's nothing there.

    try:
        sort = int(request.GET.get('sort', 0))
    except ValueError:
        sort = 0

    try:
        desc = int(request.GET.get('desc', 0))
    except ValueError:
        desc = 0
    desc_toggle = 0 if desc else 1

    threads_ = sort_threads(forum.thread_set, sort, desc)
    count = threads_.count()
    threads_ = threads_.select_related('creator', 'last_post',
                                       'last_post__author')
    threads_ = paginate(request, threads_,
                        per_page=constants.THREADS_PER_PAGE, count=count)

    feed_urls = ((reverse('forums.threads.feed', args=[forum_slug]),
                  ThreadsFeed().title(forum)),)

    is_watching_forum = (request.user.is_authenticated() and
                         NewThreadEvent.is_notifying(request.user, forum))
    return render(request, 'forums/threads.html', {
        'forum': forum, 'threads': threads_,
        'is_watching_forum': is_watching_forum,
        'sort': sort, 'desc_toggle': desc_toggle,
        'feeds': feed_urls})
Exemple #19
0
def threads(request, document_slug):
    """View all the threads in a discussion forum."""
    doc = get_document(document_slug, request)
    try:
        sort = int(request.GET.get("sort", 0))
    except ValueError:
        sort = 0

    try:
        desc = int(request.GET.get("desc", 0))
    except ValueError:
        desc = 0
    desc_toggle = 0 if desc else 1

    threads_ = sort_threads(doc.thread_set, sort, desc)
    threads_ = paginate(request, threads_, per_page=kbforums.THREADS_PER_PAGE)

    feed_urls = ((reverse("wiki.discuss.threads.feed", args=[document_slug]), ThreadsFeed().title(doc)),)

    is_watching_forum = request.user.is_authenticated() and NewThreadEvent.is_notifying(request.user, doc)
    return render(
        request,
        "kbforums/threads.html",
        {
            "document": doc,
            "threads": threads_,
            "is_watching_forum": is_watching_forum,
            "sort": sort,
            "desc_toggle": desc_toggle,
            "feeds": feed_urls,
        },
    )
Exemple #20
0
def threads(request, document_slug):
    """View all the threads in a discussion forum."""
    doc = get_document(document_slug, request)
    try:
        sort = int(request.GET.get('sort', 0))
    except ValueError:
        sort = 0

    try:
        desc = int(request.GET.get('desc', 0))
    except ValueError:
        desc = 0
    desc_toggle = 0 if desc else 1

    threads_ = sort_threads(doc.thread_set, sort, desc)
    threads_ = paginate(request, threads_, per_page=kbforums.THREADS_PER_PAGE)

    feed_urls = ((reverse('wiki.discuss.threads.feed',
                          args=[document_slug]), ThreadsFeed().title(doc)), )

    is_watching_forum = (request.user.is_authenticated()
                         and NewThreadEvent.is_notifying(request.user, doc))
    return render(
        request, 'kbforums/threads.html', {
            'document': doc,
            'threads': threads_,
            'is_watching_forum': is_watching_forum,
            'sort': sort,
            'desc_toggle': desc_toggle,
            'feeds': feed_urls
        })
Exemple #21
0
def inbox(request, template):
    user = request.user
    messages = InboxMessage.uncached.filter(to=user).order_by("-created")
    count = messages.count()

    messages = paginate(request, messages, per_page=MESSAGES_PER_PAGE, count=count)

    return render(request, template, {"msgs": messages})
Exemple #22
0
def test_paginated_url():
    """Avoid duplicating page param in pagination."""
    url = '%s?%s' % (reverse('search'), 'q=bookmarks&page=2')
    request = RequestFactory().get(url)
    queryset = [{}, {}]
    paginated = paginate(request, queryset)
    eq_(paginated.url,
        request.build_absolute_uri(request.path) + '?q=bookmarks')
Exemple #23
0
def test_paginator_filter():

    # Correct number of <li>s on page 1.
    url = reverse('search')
    request = RequestFactory().get(url)
    pager = paginate(request, range(100), per_page=9)
    html = paginator(pager)
    doc = pyquery.PyQuery(html)
    eq_(11, len(doc('li')))

    # Correct number of <li>s in the middle.
    url = '%s?%s' % (reverse('search'), 'page=10')
    request = RequestFactory().get(url)
    pager = paginate(request, range(200), per_page=10)
    html = paginator(pager)
    doc = pyquery.PyQuery(html)
    eq_(13, len(doc('li')))
def test_paginated_url():
    """Avoid duplicating page param in pagination."""
    url = '%s?%s' % (reverse('search'), 'q=bookmarks&page=2')
    request = RequestFactory().get(url)
    queryset = [{}, {}]
    paginated = paginate(request, queryset)
    eq_(paginated.url,
        request.build_absolute_uri(request.path) + '?q=bookmarks')
def test_paginator_filter():

    # Correct number of <li>s on page 1.
    url = reverse('search')
    request = RequestFactory().get(url)
    pager = paginate(request, range(100), per_page=9)
    html = paginator(pager)
    doc = pyquery.PyQuery(html)
    eq_(11, len(doc('li')))

    # Correct number of <li>s in the middle.
    url = '%s?%s' % (reverse('search'), 'page=10')
    request = RequestFactory().get(url)
    pager = paginate(request, range(200), per_page=10)
    html = paginator(pager)
    doc = pyquery.PyQuery(html)
    eq_(13, len(doc('li')))
Exemple #26
0
def test_paginated_url():
    """Avoid duplicating page param in pagination."""
    url = "%s?%s" % (reverse("search"), "q=bookmarks&page=2")
    request = RequestFactory().get(url)
    queryset = [{}, {}]
    paginated = paginate(request, queryset)
    eq_(paginated.url,
        request.build_absolute_uri(request.path) + "?q=bookmarks")
Exemple #27
0
def test_paginator_filter():

    # Correct number of <li>s on page 1.
    url = reverse("search")
    request = RequestFactory().get(url)
    pager = paginate(request, list(range(100)), per_page=9)
    html = paginator(pager)
    doc = pyquery.PyQuery(html)
    eq_(11, len(doc("li")))

    # Correct number of <li>s in the middle.
    url = "%s?%s" % (reverse("search"), "page=10")
    request = RequestFactory().get(url)
    pager = paginate(request, list(range(200)), per_page=10)
    html = paginator(pager)
    doc = pyquery.PyQuery(html)
    eq_(13, len(doc("li")))
Exemple #28
0
def inbox(request, template):
    user = request.user
    messages = InboxMessage.uncached.filter(to=user).order_by('-created')
    count = messages.count()

    messages = paginate(
        request, messages, per_page=MESSAGES_PER_PAGE, count=count)

    return render(request, template, {'msgs': messages})
Exemple #29
0
def inbox(request, template):
    user = request.user
    messages = InboxMessage.objects.filter(to=user).order_by('-created')
    count = messages.count()

    messages = paginate(
        request, messages, per_page=MESSAGES_PER_PAGE, count=count)

    return render(request, template, {'msgs': messages})
Exemple #30
0
def forums(request):
    """View all the forums."""
    qs = Forum.objects.filter(is_listed=True)
    qs = qs.select_related('last_post', 'last_post__author')
    qs = qs.extra(select={'thread_count': 'SELECT COUNT(*) FROM forums_thread '
                                          'WHERE forums_thread.forum_id = '
                                          'forums_forum.id'})
    forums_ = [f for f in qs if f.allows_viewing_by(request.user)]
    return render(request, 'forums/forums.html', {
        'forums': paginate(request, forums_)})
Exemple #31
0
def forums(request):
    """View all the forums."""
    qs = Forum.objects.filter(is_listed=True)
    qs = qs.select_related('last_post', 'last_post__author')
    qs = qs.extra(select={'thread_count': 'SELECT COUNT(*) FROM forums_thread '
                                          'WHERE forums_thread.forum_id = '
                                          'forums_forum.id'})
    forums_ = [f for f in qs if f.allows_viewing_by(request.user)]
    return render(request, 'forums/forums.html', {
        'forums': paginate(request, forums_)})
Exemple #32
0
def outbox(request, template):
    user = request.user
    messages = OutboxMessage.uncached.filter(sender=user).order_by("-created")
    count = messages.count()

    messages = paginate(request, messages, per_page=MESSAGES_PER_PAGE, count=count)

    for msg in messages.object_list:
        _add_recipients(msg)

    return render(request, template, {"msgs": messages})
Exemple #33
0
def inbox(request):
    user = request.user
    messages = InboxMessage.objects.filter(to=user).order_by("-created")
    count = messages.count()

    messages = paginate(request,
                        messages,
                        per_page=MESSAGES_PER_PAGE,
                        count=count)

    return render(request, "messages/inbox.html", {"msgs": messages})
Exemple #34
0
def outbox(request, template):
    user = request.user
    messages = OutboxMessage.uncached.filter(sender=user).order_by('-created')
    count = messages.count()

    messages = paginate(
        request, messages, per_page=MESSAGES_PER_PAGE, count=count)

    for msg in messages.object_list:
        _add_recipients(msg)

    return render(request, template, {'msgs': messages})
Exemple #35
0
def forums(request):
    """View all the forums."""
    qs = Forum.objects.filter(is_listed=True)
    qs = qs.select_related("last_post", "last_post__author")
    qs = qs.extra(
        select={
            "thread_count": "SELECT COUNT(*) FROM forums_thread "
            "WHERE forums_thread.forum_id = "
            "forums_forum.id"
        }
    )
    forums_ = [f for f in qs if f.allows_viewing_by(request.user)]
    return render(request, "forums/forums.html", {"forums": paginate(request, forums_)})
Exemple #36
0
def outbox(request):
    user = request.user
    messages = OutboxMessage.objects.filter(sender=user).order_by("-created")
    count = messages.count()

    messages = paginate(request,
                        messages,
                        per_page=MESSAGES_PER_PAGE,
                        count=count)

    for msg in messages.object_list:
        _add_recipients(msg)

    return render(request, "messages/outbox.html", {"msgs": messages})
Exemple #37
0
def questions_contributed(request, username):
    # plus sign (+) is converted to space
    username = username.replace(" ", "+")
    profile = get_object_or_404(Profile, user__username=username, user__is_active=True)

    questions = paginate(request, profile.user.questions.order_by("-created"))

    return render(
        request,
        "users/questions_contributed.html",
        {
            "profile": profile,
            "questions": questions,
        },
    )
Exemple #38
0
def posts(request, forum_slug, thread_id, form=None, post_preview=None,
          is_reply=False):
    """View all the posts in a thread."""
    thread = get_object_or_404(Thread, pk=thread_id)
    forum = thread.forum

    if forum.slug != forum_slug and not is_reply:
        new_forum = get_object_or_404(Forum, slug=forum_slug)
        if new_forum.allows_viewing_by(request.user):
            return HttpResponseRedirect(thread.get_absolute_url())
        raise Http404  # User has no right to view destination forum.
    elif forum.slug != forum_slug:
        raise Http404

    if not forum.allows_viewing_by(request.user):
        raise Http404

    posts_ = thread.post_set.all()
    count = posts_.count()
    if count:
        last_post = posts_[count - 1]
    else:
        last_post = None
    posts_ = posts_.select_related('author', 'updated_by')
    posts_ = posts_.extra(
        select={'author_post_count': 'SELECT COUNT(*) FROM forums_post WHERE '
                                     'forums_post.author_id = auth_user.id'})
    posts_ = paginate(request, posts_, constants.POSTS_PER_PAGE, count=count)

    if not form:
        form = ReplyForm()

    feed_urls = ((reverse('forums.posts.feed',
                          kwargs={'forum_slug': forum_slug,
                                  'thread_id': thread_id}),
                  PostsFeed().title(thread)),)

    is_watching_thread = (request.user.is_authenticated() and
                          NewPostEvent.is_notifying(request.user, thread))
    return render(request, 'forums/posts.html', {
        'forum': forum, 'thread': thread,
        'posts': posts_, 'form': form,
        'count': count,
        'last_post': last_post,
        'post_preview': post_preview,
        'is_watching_thread': is_watching_thread,
        'feeds': feed_urls,
        'forums': Forum.objects.all()})
Exemple #39
0
def list_documents(request, category=None):
    """List wiki documents."""
    docs = Document.objects.filter(locale=request.LANGUAGE_CODE).order_by("title")
    if category:
        docs = docs.filter(category=category)
        try:
            category_id = int(category)
        except ValueError:
            raise Http404
        try:
            category = unicode(dict(CATEGORIES)[category_id])
        except KeyError:
            raise Http404

    docs = paginate(request, docs, per_page=DOCUMENTS_PER_PAGE)
    return render(request, "wiki/list_documents.html", {"documents": docs, "category": category})
Exemple #40
0
def search(request):
    """Find users by username and displayname.

    Uses the ES user's index.
    """

    data = {}

    if q := request.GET.get("q"):
        contributor_group_ids = list(
            Group.objects.filter(name__in=[
                "Contributors",
                CONTRIBUTOR_GROUP,
            ]).values_list("id", flat=True))
        search = ProfileSearch(query=q, group_ids=contributor_group_ids)
        pages = paginate(request, search, paginator_cls=SumoSearchPaginator)
        data = {"q": q, "results": search.results, "pages": pages}
Exemple #41
0
def posts(request, document_slug, thread_id, form=None, post_preview=None):
    """View all the posts in a thread."""
    doc = get_document(document_slug, request)

    thread = get_object_or_404(Thread, pk=thread_id, document=doc)

    posts_ = thread.post_set.all()
    count = posts_.count()
    if count:
        last_post = posts_[count - 1]
    else:
        last_post = None
    posts_ = paginate(request, posts_, kbforums.POSTS_PER_PAGE)

    if not form:
        form = ReplyForm()

    feed_urls = ((
        reverse(
            "wiki.discuss.posts.feed",
            kwargs={
                "document_slug": document_slug,
                "thread_id": thread_id
            },
        ),
        PostsFeed().title(thread),
    ), )

    is_watching_thread = request.user.is_authenticated and NewPostEvent.is_notifying(
        request.user, thread)
    return render(
        request,
        "kbforums/posts.html",
        {
            "document": doc,
            "thread": thread,
            "posts": posts_,
            "form": form,
            "count": count,
            "last_post": last_post,
            "post_preview": post_preview,
            "is_watching_thread": is_watching_thread,
            "feeds": feed_urls,
        },
    )
Exemple #42
0
def list_documents(request, category=None):
    """List wiki documents."""
    docs = (Document.objects.filter(locale=request.LANGUAGE_CODE)
            .order_by('title'))
    if category:
        docs = docs.filter(category=category)
        try:
            category_id = int(category)
        except ValueError:
            raise Http404
        try:
            category = unicode(dict(CATEGORIES)[category_id])
        except KeyError:
            raise Http404

    docs = paginate(request, docs, per_page=DOCUMENTS_PER_PAGE)
    return render(request, 'wiki/list_documents.html', {
        'documents': docs,
        'category': category})
Exemple #43
0
def threads(request, forum_slug):
    """View all the threads in a forum."""
    forum = get_object_or_404(Forum, slug=forum_slug)
    if not forum.allows_viewing_by(request.user):
        raise Http404  # Pretend there's nothing there.

    try:
        sort = int(request.GET.get("sort", 0))
    except ValueError:
        sort = 0

    try:
        desc = int(request.GET.get("desc", 0))
    except ValueError:
        desc = 0
    desc_toggle = 0 if desc else 1

    threads_ = sort_threads(forum.thread_set, sort, desc)
    count = threads_.count()
    threads_ = threads_.select_related("creator", "last_post",
                                       "last_post__author")
    threads_ = paginate(request,
                        threads_,
                        per_page=constants.THREADS_PER_PAGE,
                        count=count)

    feed_urls = ((reverse("forums.threads.feed",
                          args=[forum_slug]), ThreadsFeed().title(forum)), )

    is_watching_forum = request.user.is_authenticated and NewThreadEvent.is_notifying(
        request.user, forum)
    return render(
        request,
        "forums/threads.html",
        {
            "forum": forum,
            "threads": threads_,
            "is_watching_forum": is_watching_forum,
            "sort": sort,
            "desc_toggle": desc_toggle,
            "feeds": feed_urls,
        },
    )
Exemple #44
0
def posts(request, document_slug, thread_id, form=None, post_preview=None):
    """View all the posts in a thread."""
    doc = get_document(document_slug, request)

    thread = get_object_or_404(Thread, pk=thread_id, document=doc)

    posts_ = thread.post_set.all()
    count = posts_.count()
    if count:
        last_post = posts_[count - 1]
    else:
        last_post = None
    posts_ = paginate(request, posts_, kbforums.POSTS_PER_PAGE)

    if not form:
        form = ReplyForm()

    feed_urls = (
        (
            reverse("wiki.discuss.posts.feed", kwargs={"document_slug": document_slug, "thread_id": thread_id}),
            PostsFeed().title(thread),
        ),
    )

    is_watching_thread = request.user.is_authenticated() and NewPostEvent.is_notifying(request.user, thread)
    return render(
        request,
        "kbforums/posts.html",
        {
            "document": doc,
            "thread": thread,
            "posts": posts_,
            "form": form,
            "count": count,
            "last_post": last_post,
            "post_preview": post_preview,
            "is_watching_thread": is_watching_thread,
            "feeds": feed_urls,
        },
    )
Exemple #45
0
def search(request, media_type):
    """Search the media gallery."""

    term = request.GET.get("q")
    if not term:
        url = reverse("gallery.gallery", args=[media_type])
        return HttpResponseRedirect(url)

    filter = Q(title__icontains=term) | Q(description__icontains=term)

    if media_type == "image":
        media_qs = Image.objects.filter(filter, locale=request.LANGUAGE_CODE)
    elif media_type == "video":
        media_qs = Video.objects.filter(filter, locale=request.LANGUAGE_CODE)
    else:
        raise Http404

    media = paginate(request, media_qs, per_page=ITEMS_PER_PAGE)

    return render(
        request, "gallery/search.html", {"media": media, "media_type": media_type, "q": term}
    )
Exemple #46
0
def _answers_data(request,
                  question_id,
                  form=None,
                  watch_form=None,
                  answer_preview=None):
    """Return a map of the minimal info necessary to draw an answers page."""
    question = get_object_or_404(Question, pk=question_id)
    answers_ = question.answers.all()

    # Remove spam flag if an answer passed the moderation queue
    if not settings.READ_ONLY:
        answers_.filter(flags__status=2).update(is_spam=False)

    if not request.user.has_perm("flagit.can_moderate"):
        answers_ = answers_.filter(is_spam=False)

    answers_ = paginate(request, answers_, per_page=config.ANSWERS_PER_PAGE)
    feed_urls = ((
        reverse("questions.answers.feed", kwargs={"question_id": question_id}),
        AnswersFeed().title(question),
    ), )
    frequencies = dict(FREQUENCY_CHOICES)

    is_watching_question = request.user.is_authenticated and (
        QuestionReplyEvent.is_notifying(request.user, question)
        or QuestionSolvedEvent.is_notifying(request.user, question))
    return {
        "question": question,
        "answers": answers_,
        "form": form or AnswerForm(),
        "answer_preview": answer_preview,
        "watch_form": watch_form or _init_watch_form(request, "reply"),
        "feeds": feed_urls,
        "frequencies": frequencies,
        "is_watching_question": is_watching_question,
        "can_tag": request.user.has_perm("questions.tag_question"),
        "can_create_tags": request.user.has_perm("taggit.add_tag"),
    }
Exemple #47
0
def search(request, media_type):
    """Search the media gallery."""

    term = request.GET.get('q')
    if not term:
        url = reverse('gallery.gallery', args=[media_type])
        return HttpResponseRedirect(url)

    filter = Q(title__icontains=term) | Q(description__icontains=term)

    if media_type == 'image':
        media_qs = Image.objects.filter(filter, locale=request.LANGUAGE_CODE)
    elif media_type == 'video':
        media_qs = Video.objects.filter(filter, locale=request.LANGUAGE_CODE)
    else:
        raise Http404

    media = paginate(request, media_qs, per_page=ITEMS_PER_PAGE)

    return render(request, 'gallery/search.html', {
        'media': media,
        'media_type': media_type,
        'q': term})
Exemple #48
0
def search(request, media_type):
    """Search the media gallery."""

    term = request.GET.get('q')
    if not term:
        url = reverse('gallery.gallery', args=[media_type])
        return HttpResponseRedirect(url)

    filter = Q(title__icontains=term) | Q(description__icontains=term)

    if media_type == 'image':
        media_qs = Image.objects.filter(filter, locale=request.LANGUAGE_CODE)
    elif media_type == 'video':
        media_qs = Video.objects.filter(filter, locale=request.LANGUAGE_CODE)
    else:
        raise Http404

    media = paginate(request, media_qs, per_page=ITEMS_PER_PAGE)

    return render(request, 'gallery/search.html', {
        'media': media,
        'media_type': media_type,
        'q': term})
Exemple #49
0
def recent_revisions(request):
    # Make writable
    request.GET = request.GET.copy()

    fragment = request.GET.pop('fragment', None)
    form = RevisionFilterForm(request.GET)
    revs = Revision.objects.order_by('-created')

    # We are going to ignore validation errors for the most part, but
    # this is needed to call the functions that generate `cleaned_data`
    # This helps in particular when bad user names are typed in.
    form.is_valid()

    # If something has gone very wrong, `cleaned_data` won't be there.
    if hasattr(form, 'cleaned_data'):
        if form.cleaned_data.get('locale'):
            revs = revs.filter(document__locale=form.cleaned_data['locale'])
        if form.cleaned_data.get('users'):
            revs = revs.filter(creator__in=form.cleaned_data['users'])
        if form.cleaned_data.get('start'):
            revs = revs.filter(created__gte=form.cleaned_data['start'])
        if form.cleaned_data.get('end'):
            revs = revs.filter(created__lte=form.cleaned_data['end'])

    revs = paginate(request, revs)

    c = {
        'revisions': revs,
        'form': form,
    }
    if fragment:
        template = 'wiki/includes/recent_revisions_fragment.html'
    else:
        template = 'wiki/recent_revisions.html'

    return render(request, template, c)
Exemple #50
0
def gallery(request, media_type='image'):
    """The media gallery.

    Filter can be set to 'images' or 'videos'.

    """
    if media_type == 'image':
        media_qs = Image.objects.filter(locale=request.LANGUAGE_CODE)
    elif media_type == 'video':
        media_qs = Video.objects.filter(locale=request.LANGUAGE_CODE)
    else:
        raise Http404

    media = paginate(request, media_qs, per_page=ITEMS_PER_PAGE)

    drafts = _get_drafts(request.user)
    image_form, video_form, upload_type_form = _init_forms(request, drafts)

    return render(request, 'gallery/gallery.html', {
        'media': media,
        'media_type': media_type,
        'upload_type_form': upload_type_form,
        'image_form': image_form,
        'video_form': video_form})
Exemple #51
0
def recent_revisions(request):
    # Make writable
    request.GET = request.GET.copy()

    fragment = request.GET.pop('fragment', None)
    form = RevisionFilterForm(request.GET)
    revs = Revision.objects.order_by('-created')

    # We are going to ignore validation errors for the most part, but
    # this is needed to call the functions that generate `cleaned_data`
    # This helps in particular when bad user names are typed in.
    form.is_valid()

    # If something has gone very wrong, `cleaned_data` won't be there.
    if hasattr(form, 'cleaned_data'):
        if form.cleaned_data.get('locale'):
            revs = revs.filter(document__locale=form.cleaned_data['locale'])
        if form.cleaned_data.get('users'):
            revs = revs.filter(creator__in=form.cleaned_data['users'])
        if form.cleaned_data.get('start'):
            revs = revs.filter(created__gte=form.cleaned_data['start'])
        if form.cleaned_data.get('end'):
            revs = revs.filter(created__lte=form.cleaned_data['end'])

    revs = paginate(request, revs)

    c = {
        'revisions': revs,
        'form': form,
    }
    if fragment:
        template = 'wiki/includes/recent_revisions_fragment.html'
    else:
        template = 'wiki/recent_revisions.html'

    return render(request, template, c)
Exemple #52
0
def search(request, template=None):
    """ES-specific search view"""

    # JSON-specific variables
    is_json = (request.GET.get('format') == 'json')
    callback = request.GET.get('callback', '').strip()
    mimetype = 'application/x-javascript' if callback else 'application/json'

    # Search "Expires" header format
    expires_fmt = '%A, %d %B %Y %H:%M:%S GMT'

    # Check callback is valid
    if is_json and callback and not jsonp_is_valid(callback):
        return HttpResponse(
            json.dumps({'error': _('Invalid callback function.')}),
            mimetype=mimetype, status=400)

    language = locale_or_default(
        request.GET.get('language', request.LANGUAGE_CODE))
    r = request.GET.copy()
    a = request.GET.get('a', '0')

    # Search default values
    try:
        category = (map(int, r.getlist('category')) or
                    settings.SEARCH_DEFAULT_CATEGORIES)
    except ValueError:
        category = settings.SEARCH_DEFAULT_CATEGORIES
    r.setlist('category', category)

    # Basic form
    if a == '0':
        r['w'] = r.get('w', constants.WHERE_BASIC)
    # Advanced form
    if a == '2':
        r['language'] = language
        r['a'] = '1'

    # TODO: Rewrite so SearchForm is unbound initially and we can use
    # `initial` on the form fields.
    if 'include_archived' not in r:
        r['include_archived'] = False

    search_form = SearchForm(r)
    search_form.set_allowed_forums(request.user)

    if not search_form.is_valid() or a == '2':
        if is_json:
            return HttpResponse(
                json.dumps({'error': _('Invalid search data.')}),
                mimetype=mimetype,
                status=400)

        t = template if request.MOBILE else 'search/form.html'
        search_ = render(request, t, {
            'advanced': a, 'request': request,
            'search_form': search_form})
        search_['Cache-Control'] = 'max-age=%s' % \
                                   (settings.SEARCH_CACHE_PERIOD * 60)
        search_['Expires'] = (datetime.utcnow() +
                              timedelta(
                                minutes=settings.SEARCH_CACHE_PERIOD)) \
                              .strftime(expires_fmt)
        return search_

    cleaned = search_form.cleaned_data

    if request.MOBILE and cleaned['w'] == constants.WHERE_BASIC:
        cleaned['w'] = constants.WHERE_WIKI

    page = max(smart_int(request.GET.get('page')), 1)
    offset = (page - 1) * settings.SEARCH_RESULTS_PER_PAGE

    lang = language.lower()
    if settings.LANGUAGES.get(lang):
        lang_name = settings.LANGUAGES[lang]
    else:
        lang_name = ''

    # We use a regular S here because we want to search across
    # multiple doctypes.
    searcher = (AnalyzerS().es(urls=settings.ES_URLS)
                           .indexes(es_utils.READ_INDEX))

    wiki_f = F(model='wiki_document')
    question_f = F(model='questions_question')
    discussion_f = F(model='forums_thread')

    # Start - wiki filters

    if cleaned['w'] & constants.WHERE_WIKI:
        # Category filter
        if cleaned['category']:
            wiki_f &= F(document_category__in=cleaned['category'])

        # Locale filter
        wiki_f &= F(document_locale=language)

        # Product filter
        products = cleaned['product']
        for p in products:
            wiki_f &= F(product=p)

        # Topics filter
        topics = cleaned['topics']
        for t in topics:
            wiki_f &= F(topic=t)

        # Archived bit
        if a == '0' and not cleaned['include_archived']:
            # Default to NO for basic search:
            cleaned['include_archived'] = False
        if not cleaned['include_archived']:
            wiki_f &= F(document_is_archived=False)

    # End - wiki filters

    # Start - support questions filters

    if cleaned['w'] & constants.WHERE_SUPPORT:
        # Solved is set by default if using basic search
        if a == '0' and not cleaned['has_helpful']:
            cleaned['has_helpful'] = constants.TERNARY_YES

        # These filters are ternary, they can be either YES, NO, or OFF
        ternary_filters = ('is_locked', 'is_solved', 'has_answers',
                           'has_helpful')
        d = dict(('question_%s' % filter_name,
                  _ternary_filter(cleaned[filter_name]))
                 for filter_name in ternary_filters if cleaned[filter_name])
        if d:
            question_f &= F(**d)

        if cleaned['asked_by']:
            question_f &= F(question_creator=cleaned['asked_by'])

        if cleaned['answered_by']:
            question_f &= F(question_answer_creator=cleaned['answered_by'])

        q_tags = [t.strip() for t in cleaned['q_tags'].split(',')]
        for t in q_tags:
            if t:
                question_f &= F(question_tag=t)

        # Product filter
        products = cleaned['product']
        for p in products:
            question_f &= F(product=p)

        # Topics filter
        topics = cleaned['topics']
        for t in topics:
            question_f &= F(topic=t)

    # End - support questions filters

    # Start - discussion forum filters

    if cleaned['w'] & constants.WHERE_DISCUSSION:
        if cleaned['author']:
            discussion_f &= F(post_author_ord=cleaned['author'])

        if cleaned['thread_type']:
            if constants.DISCUSSION_STICKY in cleaned['thread_type']:
                discussion_f &= F(post_is_sticky=1)

            if constants.DISCUSSION_LOCKED in cleaned['thread_type']:
                discussion_f &= F(post_is_locked=1)

        valid_forum_ids = [
            f.id for f in Forum.authorized_forums_for_user(request.user)]

        forum_ids = None
        if cleaned['forum']:
            forum_ids = [f for f in cleaned['forum'] if f in valid_forum_ids]

        # If we removed all the forums they wanted to look at or if
        # they didn't specify, then we filter on the list of all
        # forums they're authorized to look at.
        if not forum_ids:
            forum_ids = valid_forum_ids

        discussion_f &= F(post_forum_id__in=forum_ids)

    # End - discussion forum filters

    # Created filter
    unix_now = int(time.time())
    interval_filters = (
        ('created', cleaned['created'], cleaned['created_date']),
        ('updated', cleaned['updated'], cleaned['updated_date']))
    for filter_name, filter_option, filter_date in interval_filters:
        if filter_option == constants.INTERVAL_BEFORE:
            before = {filter_name + '__gte': 0,
                      filter_name + '__lte': max(filter_date, 0)}

            discussion_f &= F(**before)
            question_f &= F(**before)
        elif filter_option == constants.INTERVAL_AFTER:
            after = {filter_name + '__gte': min(filter_date, unix_now),
                     filter_name + '__lte': unix_now}

            discussion_f &= F(**after)
            question_f &= F(**after)

    # In basic search, we limit questions from the last
    # SEARCH_DEFAULT_MAX_QUESTION_AGE seconds.
    if a == '0':
        start_date = unix_now - settings.SEARCH_DEFAULT_MAX_QUESTION_AGE
        question_f &= F(created__gte=start_date)

    # Note: num_voted (with a d) is a different field than num_votes
    # (with an s). The former is a dropdown and the latter is an
    # integer value.
    if cleaned['num_voted'] == constants.INTERVAL_BEFORE:
        question_f &= F(question_num_votes__lte=max(cleaned['num_votes'], 0))
    elif cleaned['num_voted'] == constants.INTERVAL_AFTER:
        question_f &= F(question_num_votes__gte=cleaned['num_votes'])

    # Done with all the filtery stuff--time  to generate results

    # Combine all the filters and add to the searcher
    doctypes = []
    final_filter = F()
    if cleaned['w'] & constants.WHERE_WIKI:
        doctypes.append(DocumentMappingType.get_mapping_type_name())
        final_filter |= wiki_f

    if cleaned['w'] & constants.WHERE_SUPPORT:
        doctypes.append(QuestionMappingType.get_mapping_type_name())
        final_filter |= question_f

    if cleaned['w'] & constants.WHERE_DISCUSSION:
        doctypes.append(ThreadMappingType.get_mapping_type_name())
        final_filter |= discussion_f

    searcher = searcher.doctypes(*doctypes)
    searcher = searcher.filter(final_filter)

    if 'explain' in request.GET and request.GET['explain'] == '1':
        searcher = searcher.explain()

    documents = ComposedList()

    try:
        cleaned_q = cleaned['q']

        # Set up the highlights. Show the entire field highlighted.
        searcher = searcher.highlight(
            'question_content',  # support forum
            'document_summary',  # kb
            'post_content',  # contributor forum
            pre_tags=['<b>'],
            post_tags=['</b>'],
            number_of_fragments=0)

        # Set up boosts
        searcher = searcher.boost(
            question_title=4.0,
            question_content=3.0,
            question_answer_content=3.0,
            post_title=2.0,
            post_content=1.0,
            document_title=6.0,
            document_content=1.0,
            document_keywords=8.0,
            document_summary=2.0,

            # Text phrases in document titles and content get an extra
            # boost.
            document_title__text_phrase=10.0,
            document_content__text_phrase=8.0)

        # Apply sortby for advanced search of questions
        if cleaned['w'] == constants.WHERE_SUPPORT:
            sortby = cleaned['sortby']
            try:
                searcher = searcher.order_by(
                    *constants.SORT_QUESTIONS[sortby])
            except IndexError:
                # Skip index errors because they imply the user is
                # sending us sortby values that aren't valid.
                pass

        # Apply sortby for advanced search of kb documents
        if cleaned['w'] == constants.WHERE_WIKI:
            sortby = cleaned['sortby_documents']
            try:
                searcher = searcher.order_by(
                    *constants.SORT_DOCUMENTS[sortby])
            except IndexError:
                # Skip index errors because they imply the user is
                # sending us sortby values that aren't valid.
                pass

        # Build the query
        if cleaned_q:
            query_fields = chain(*[cls.get_query_fields()
                                   for cls in get_mapping_types()])
            query = {}
            # Create text and text_phrase queries for every field
            # we want to search.
            for field in query_fields:
                for query_type in ['text', 'text_phrase']:
                    query['%s__%s' % (field, query_type)] = cleaned_q

            # Transform the query to use locale aware analyzers.
            query = es_utils.es_query_with_analyzer(query, language)

            searcher = searcher.query(should=True, **query)

        num_results = min(searcher.count(), settings.SEARCH_MAX_RESULTS)

        # TODO - Can ditch the ComposedList here, but we need
        # something that paginate can use to figure out the paging.
        documents = ComposedList()
        documents.set_count(('results', searcher), num_results)

        results_per_page = settings.SEARCH_RESULTS_PER_PAGE
        pages = paginate(request, documents, results_per_page)

        # If we know there aren't any results, let's cheat and in
        # doing that, not hit ES again.
        if num_results == 0:
            searcher = []
        else:
            # Get the documents we want to show and add them to
            # docs_for_page
            documents = documents[offset:offset + results_per_page]

            if len(documents) == 0:
                # If the user requested a page that's beyond the
                # pagination, then documents is an empty list and
                # there are no results to show.
                searcher = []
            else:
                bounds = documents[0][1]
                searcher = searcher.values_dict()[bounds[0]:bounds[1]]

        results = []
        for i, doc in enumerate(searcher):
            rank = i + offset

            if doc['model'] == 'wiki_document':
                summary = _build_es_excerpt(doc)
                if not summary:
                    summary = doc['document_summary']
                result = {
                    'title': doc['document_title'],
                    'type': 'document'}

            elif doc['model'] == 'questions_question':
                summary = _build_es_excerpt(doc)
                if not summary:
                    # We're excerpting only question_content, so if
                    # the query matched question_title or
                    # question_answer_content, then there won't be any
                    # question_content excerpts. In that case, just
                    # show the question--but only the first 500
                    # characters.
                    summary = bleach.clean(
                        doc['question_content'], strip=True)[:500]

                result = {
                    'title': doc['question_title'],
                    'type': 'question',
                    'is_solved': doc['question_is_solved'],
                    'num_answers': doc['question_num_answers'],
                    'num_votes': doc['question_num_votes'],
                    'num_votes_past_week': doc['question_num_votes_past_week']}

            else:
                summary = _build_es_excerpt(doc, first_only=True)
                result = {
                    'title': doc['post_title'],
                    'type': 'thread'}

            result['url'] = doc['url']
            result['object'] = ObjectDict(doc)
            result['search_summary'] = summary
            result['rank'] = rank
            result['score'] = doc._score
            result['explanation'] = escape(format_explanation(
                    doc._explanation))
            results.append(result)

    except ES_EXCEPTIONS as exc:
        # Handle timeout and all those other transient errors with a
        # "Search Unavailable" rather than a Django error page.
        if is_json:
            return HttpResponse(json.dumps({'error':
                                             _('Search Unavailable')}),
                                mimetype=mimetype, status=503)

        # Cheating here: Convert from 'Timeout()' to 'timeout' so
        # we have less code, but still have good stats.
        exc_bucket = repr(exc).lower().strip('()')
        statsd.incr('search.esunified.{0}'.format(exc_bucket))

        import logging
        logging.exception(exc)

        t = 'search/mobile/down.html' if request.MOBILE else 'search/down.html'
        return render(request, t, {'q': cleaned['q']}, status=503)

    items = [(k, v) for k in search_form.fields for
             v in r.getlist(k) if v and k != 'a']
    items.append(('a', '2'))

    if is_json:
        # Models are not json serializable.
        for r in results:
            del r['object']
        data = {}
        data['results'] = results
        data['total'] = len(results)
        data['query'] = cleaned['q']
        if not results:
            data['message'] = _('No pages matched the search criteria')
        json_data = json.dumps(data)
        if callback:
            json_data = callback + '(' + json_data + ');'

        return HttpResponse(json_data, mimetype=mimetype)

    fallback_results = None
    if num_results == 0:
        fallback_results = _fallback_results(language, cleaned['product'])

    results_ = render(request, template, {
        'num_results': num_results,
        'results': results,
        'fallback_results': fallback_results,
        'q': cleaned['q'],
        'w': cleaned['w'],
        'product': Product.objects.filter(slug__in=cleaned['product']),
        'products': Product.objects.filter(visible=True),
        'pages': pages,
        'search_form': search_form,
        'lang_name': lang_name, })
    results_['Cache-Control'] = 'max-age=%s' % \
                                (settings.SEARCH_CACHE_PERIOD * 60)
    results_['Expires'] = (datetime.utcnow() +
                           timedelta(minutes=settings.SEARCH_CACHE_PERIOD)) \
                           .strftime(expires_fmt)
    results_.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']),
                        max_age=3600, secure=False, httponly=False)

    return results_
Exemple #53
0
def advanced_search(request, template=None):
    """ES-specific Advanced search view"""

    # JSON-specific variables
    is_json = request.GET.get("format") == "json"
    callback = request.GET.get("callback", "").strip()
    content_type = "application/x-javascript" if callback else "application/json"

    # Check callback is valid
    if is_json and callback and not jsonp_is_valid(callback):
        return HttpResponse(
            json.dumps({"error": _("Invalid callback function.")}), content_type=content_type, status=400
        )

    language = locale_or_default(request.GET.get("language", request.LANGUAGE_CODE))
    r = request.GET.copy()
    # TODO: Figure out how to get rid of 'a' and do it.
    # It basically is used to switch between showing the form or results.
    a = request.GET.get("a", "2")
    # TODO: This is so the 'a=1' stays in the URL for pagination.
    r["a"] = 1

    # Search default values
    try:
        category = map(int, r.getlist("category")) or settings.SEARCH_DEFAULT_CATEGORIES
    except ValueError:
        category = settings.SEARCH_DEFAULT_CATEGORIES
    r.setlist("category", category)

    r["language"] = language

    search_form = AdvancedSearchForm(r, auto_id=False)
    search_form.set_allowed_forums(request.user)

    # This is all we use a for now I think.
    if not search_form.is_valid() or a == "2":
        if is_json:
            return HttpResponse(json.dumps({"error": _("Invalid search data.")}), content_type=content_type, status=400)

        t = template if request.MOBILE else "search/form.html"
        search_ = render(request, t, {"advanced": True, "request": request, "search_form": search_form})
        cache_period = settings.SEARCH_CACHE_PERIOD
        search_["Cache-Control"] = "max-age=%s" % (cache_period * 60)
        search_["Expires"] = (datetime.utcnow() + timedelta(minutes=cache_period)).strftime(EXPIRES_FMT)
        return search_

    cleaned = search_form.cleaned_data

    if request.MOBILE and cleaned["w"] == constants.WHERE_BASIC:
        cleaned["w"] = constants.WHERE_WIKI

    page = max(smart_int(request.GET.get("page")), 1)
    offset = (page - 1) * settings.SEARCH_RESULTS_PER_PAGE

    lang = language.lower()
    if settings.LANGUAGES_DICT.get(lang):
        lang_name = settings.LANGUAGES_DICT[lang]
    else:
        lang_name = ""

    # We use a regular S here because we want to search across
    # multiple doctypes.
    searcher = AnalyzerS().es(urls=settings.ES_URLS).indexes(es_utils.read_index("default"))

    wiki_f = F(model="wiki_document")
    question_f = F(model="questions_question")
    discussion_f = F(model="forums_thread")

    # Start - wiki filters

    if cleaned["w"] & constants.WHERE_WIKI:
        # Category filter
        if cleaned["category"]:
            wiki_f &= F(document_category__in=cleaned["category"])

        # Locale filter
        wiki_f &= F(document_locale=language)

        # Product filter
        products = cleaned["product"]
        for p in products:
            wiki_f &= F(product=p)

        # Topics filter
        topics = cleaned["topics"]
        for t in topics:
            wiki_f &= F(topic=t)

        # Archived bit
        if not cleaned["include_archived"]:
            wiki_f &= F(document_is_archived=False)

    # End - wiki filters

    # Start - support questions filters

    if cleaned["w"] & constants.WHERE_SUPPORT:
        # These filters are ternary, they can be either YES, NO, or OFF
        ternary_filters = ("is_locked", "is_solved", "has_answers", "has_helpful", "is_archived")
        d = dict(
            ("question_%s" % filter_name, _ternary_filter(cleaned[filter_name]))
            for filter_name in ternary_filters
            if cleaned[filter_name]
        )
        if d:
            question_f &= F(**d)

        if cleaned["asked_by"]:
            question_f &= F(question_creator=cleaned["asked_by"])

        if cleaned["answered_by"]:
            question_f &= F(question_answer_creator=cleaned["answered_by"])

        q_tags = [t.strip() for t in cleaned["q_tags"].split(",")]
        for t in q_tags:
            if t:
                question_f &= F(question_tag=t)

        # Product filter
        products = cleaned["product"]
        for p in products:
            question_f &= F(product=p)

        # Topics filter
        topics = cleaned["topics"]
        for t in topics:
            question_f &= F(topic=t)

    # End - support questions filters

    # Start - discussion forum filters

    if cleaned["w"] & constants.WHERE_DISCUSSION:
        if cleaned["author"]:
            discussion_f &= F(post_author_ord=cleaned["author"])

        if cleaned["thread_type"]:
            if constants.DISCUSSION_STICKY in cleaned["thread_type"]:
                discussion_f &= F(post_is_sticky=1)

            if constants.DISCUSSION_LOCKED in cleaned["thread_type"]:
                discussion_f &= F(post_is_locked=1)

        valid_forum_ids = [f.id for f in Forum.authorized_forums_for_user(request.user)]

        forum_ids = None
        if cleaned["forum"]:
            forum_ids = [f for f in cleaned["forum"] if f in valid_forum_ids]

        # If we removed all the forums they wanted to look at or if
        # they didn't specify, then we filter on the list of all
        # forums they're authorized to look at.
        if not forum_ids:
            forum_ids = valid_forum_ids

        discussion_f &= F(post_forum_id__in=forum_ids)

    # End - discussion forum filters

    # Created filter
    unix_now = int(time.time())
    interval_filters = (
        ("created", cleaned["created"], cleaned["created_date"]),
        ("updated", cleaned["updated"], cleaned["updated_date"]),
    )
    for filter_name, filter_option, filter_date in interval_filters:
        if filter_option == constants.INTERVAL_BEFORE:
            before = {filter_name + "__gte": 0, filter_name + "__lte": max(filter_date, 0)}

            discussion_f &= F(**before)
            question_f &= F(**before)
        elif filter_option == constants.INTERVAL_AFTER:
            after = {filter_name + "__gte": min(filter_date, unix_now), filter_name + "__lte": unix_now}

            discussion_f &= F(**after)
            question_f &= F(**after)

    # Note: num_voted (with a d) is a different field than num_votes
    # (with an s). The former is a dropdown and the latter is an
    # integer value.
    if cleaned["num_voted"] == constants.INTERVAL_BEFORE:
        question_f &= F(question_num_votes__lte=max(cleaned["num_votes"], 0))
    elif cleaned["num_voted"] == constants.INTERVAL_AFTER:
        question_f &= F(question_num_votes__gte=cleaned["num_votes"])

    # Done with all the filtery stuff--time  to generate results

    # Combine all the filters and add to the searcher
    doctypes = []
    final_filter = F()
    if cleaned["w"] & constants.WHERE_WIKI:
        doctypes.append(DocumentMappingType.get_mapping_type_name())
        final_filter |= wiki_f

    if cleaned["w"] & constants.WHERE_SUPPORT:
        doctypes.append(QuestionMappingType.get_mapping_type_name())
        final_filter |= question_f

    if cleaned["w"] & constants.WHERE_DISCUSSION:
        doctypes.append(ThreadMappingType.get_mapping_type_name())
        final_filter |= discussion_f

    searcher = searcher.doctypes(*doctypes)
    searcher = searcher.filter(final_filter)

    if "explain" in request.GET and request.GET["explain"] == "1":
        searcher = searcher.explain()

    documents = ComposedList()

    try:
        cleaned_q = cleaned["q"]

        # Set up the highlights. Show the entire field highlighted.
        searcher = searcher.highlight(
            "question_content",  # support forum
            "document_summary",  # kb
            "post_content",  # contributor forum
            pre_tags=["<b>"],
            post_tags=["</b>"],
            number_of_fragments=0,
        )

        # Set up boosts
        searcher = searcher.boost(
            question_title=4.0,
            question_content=3.0,
            question_answer_content=3.0,
            post_title=2.0,
            post_content=1.0,
            document_title=6.0,
            document_content=1.0,
            document_keywords=8.0,
            document_summary=2.0,
            # Text phrases in document titles and content get an extra
            # boost.
            document_title__match_phrase=10.0,
            document_content__match_phrase=8.0,
        )

        # Apply sortby for advanced search of questions
        if cleaned["w"] == constants.WHERE_SUPPORT:
            sortby = cleaned["sortby"]
            try:
                searcher = searcher.order_by(*constants.SORT_QUESTIONS[sortby])
            except IndexError:
                # Skip index errors because they imply the user is
                # sending us sortby values that aren't valid.
                pass

        # Apply sortby for advanced search of kb documents
        if cleaned["w"] == constants.WHERE_WIKI:
            sortby = cleaned["sortby_documents"]
            try:
                searcher = searcher.order_by(*constants.SORT_DOCUMENTS[sortby])
            except IndexError:
                # Skip index errors because they imply the user is
                # sending us sortby values that aren't valid.
                pass

        # Build the query
        if cleaned_q:
            query_fields = chain(
                *[cls.get_query_fields() for cls in [DocumentMappingType, ThreadMappingType, QuestionMappingType]]
            )
            query = {}
            # Create a simple_query_search query for every field
            # we want to search.
            for field in query_fields:
                query["%s__sqs" % field] = cleaned_q

            # Transform the query to use locale aware analyzers.
            query = es_utils.es_query_with_analyzer(query, language)

            searcher = searcher.query(should=True, **query)

        num_results = min(searcher.count(), settings.SEARCH_MAX_RESULTS)

        # TODO - Can ditch the ComposedList here, but we need
        # something that paginate can use to figure out the paging.
        documents = ComposedList()
        documents.set_count(("results", searcher), num_results)

        results_per_page = settings.SEARCH_RESULTS_PER_PAGE
        pages = paginate(request, documents, results_per_page)

        # If we know there aren't any results, let's cheat and in
        # doing that, not hit ES again.
        if num_results == 0:
            searcher = []
        else:
            # Get the documents we want to show and add them to
            # docs_for_page
            documents = documents[offset : offset + results_per_page]

            if len(documents) == 0:
                # If the user requested a page that's beyond the
                # pagination, then documents is an empty list and
                # there are no results to show.
                searcher = []
            else:
                bounds = documents[0][1]
                searcher = searcher[bounds[0] : bounds[1]]

        results = []
        for i, doc in enumerate(searcher):
            rank = i + offset

            if doc["model"] == "wiki_document":
                summary = _build_es_excerpt(doc)
                if not summary:
                    summary = doc["document_summary"]
                result = {"title": doc["document_title"], "type": "document"}

            elif doc["model"] == "questions_question":
                summary = _build_es_excerpt(doc)
                if not summary:
                    # We're excerpting only question_content, so if
                    # the query matched question_title or
                    # question_answer_content, then there won't be any
                    # question_content excerpts. In that case, just
                    # show the question--but only the first 500
                    # characters.
                    summary = bleach.clean(doc["question_content"], strip=True)[:500]

                result = {
                    "title": doc["question_title"],
                    "type": "question",
                    "is_solved": doc["question_is_solved"],
                    "num_answers": doc["question_num_answers"],
                    "num_votes": doc["question_num_votes"],
                    "num_votes_past_week": doc["question_num_votes_past_week"],
                }

            else:
                summary = _build_es_excerpt(doc, first_only=True)
                result = {"title": doc["post_title"], "type": "thread"}

            result["url"] = doc["url"]
            result["object"] = doc
            result["search_summary"] = summary
            result["rank"] = rank
            result["score"] = doc.es_meta.score
            result["explanation"] = escape(format_explanation(doc.es_meta.explanation))
            results.append(result)

    except ES_EXCEPTIONS as exc:
        # Handle timeout and all those other transient errors with a
        # "Search Unavailable" rather than a Django error page.
        if is_json:
            return HttpResponse(json.dumps({"error": _("Search Unavailable")}), content_type=content_type, status=503)

        # Cheating here: Convert from 'Timeout()' to 'timeout' so
        # we have less code, but still have good stats.
        exc_bucket = repr(exc).lower().strip("()")
        statsd.incr("search.esunified.{0}".format(exc_bucket))

        log.exception(exc)

        t = "search/mobile/down.html" if request.MOBILE else "search/down.html"
        return render(request, t, {"q": cleaned["q"]}, status=503)

    items = [(k, v) for k in search_form.fields for v in r.getlist(k) if v and k != "a"]
    items.append(("a", "2"))

    fallback_results = None
    if num_results == 0:
        fallback_results = _fallback_results(language, cleaned["product"])

    product = Product.objects.filter(slug__in=cleaned["product"])
    if product:
        product_titles = [_(p.title, "DB: products.Product.title") for p in product]
    else:
        product_titles = [_("All Products")]

    product_titles = ", ".join(product_titles)

    data = {
        "num_results": num_results,
        "results": results,
        "fallback_results": fallback_results,
        "product_titles": product_titles,
        "q": cleaned["q"],
        "w": cleaned["w"],
        "lang_name": lang_name,
        "advanced": True,
    }

    if is_json:
        # Models are not json serializable.
        for r in data["results"]:
            del r["object"]
        data["total"] = len(data["results"])

        data["products"] = [{"slug": p.slug, "title": p.title} for p in Product.objects.filter(visible=True)]

        if product:
            data["product"] = product[0].slug

        pages = Paginator(pages)
        data["pagination"] = dict(
            number=pages.pager.number,
            num_pages=pages.pager.paginator.num_pages,
            has_next=pages.pager.has_next(),
            has_previous=pages.pager.has_previous(),
            max=pages.max,
            span=pages.span,
            dotted_upper=pages.pager.dotted_upper,
            dotted_lower=pages.pager.dotted_lower,
            page_range=pages.pager.page_range,
            url=pages.pager.url,
        )
        if not results:
            data["message"] = _("No pages matched the search criteria")
        json_data = json.dumps(data)
        if callback:
            json_data = callback + "(" + json_data + ");"

        return HttpResponse(json_data, content_type=content_type)

    data.update(
        {
            "product": product,
            "products": Product.objects.filter(visible=True),
            "pages": pages,
            "search_form": search_form,
        }
    )
    results_ = render(request, template, data)
    cache_period = settings.SEARCH_CACHE_PERIOD
    results_["Cache-Control"] = "max-age=%s" % (cache_period * 60)
    results_["Expires"] = (datetime.utcnow() + timedelta(minutes=cache_period)).strftime(EXPIRES_FMT)
    results_.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned["q"]), max_age=3600, secure=False, httponly=False)

    return results_
Exemple #54
0
def simple_search(request, template=None):
    """ES-specific simple search view.

    This view is for end user searching of the Knowledge Base and
    Support Forum. Filtering options are limited to:
    * product (`product=firefox`, for example, for only Firefox results)
    * document type (`w=2`, for esample, for Support Forum questions only)
    """

    # Redirect to old Advanced Search URLs (?a={1,2}) to the new URL.
    a = request.GET.get("a")
    if a in ["1", "2"]:
        new_url = reverse("search.advanced") + "?" + request.GET.urlencode()
        return HttpResponseRedirect(new_url)

    # JSON-specific variables
    is_json = request.GET.get("format") == "json"
    callback = request.GET.get("callback", "").strip()
    content_type = "application/x-javascript" if callback else "application/json"

    # Check callback is valid
    if is_json and callback and not jsonp_is_valid(callback):
        return HttpResponse(
            json.dumps({"error": _("Invalid callback function.")}), content_type=content_type, status=400
        )

    language = locale_or_default(request.GET.get("language", request.LANGUAGE_CODE))
    r = request.GET.copy()

    # TODO: Do we really need to add this to the URL if it isn't already there?
    r["w"] = r.get("w", constants.WHERE_BASIC)

    # TODO: Break out a separate simple search form.
    search_form = SimpleSearchForm(r, auto_id=False)

    if not search_form.is_valid():
        if is_json:
            return HttpResponse(json.dumps({"error": _("Invalid search data.")}), content_type=content_type, status=400)

        t = template if request.MOBILE else "search/form.html"
        search_ = render(request, t, {"advanced": False, "request": request, "search_form": search_form})
        cache_period = settings.SEARCH_CACHE_PERIOD
        search_["Cache-Control"] = "max-age=%s" % (cache_period * 60)
        search_["Expires"] = (datetime.utcnow() + timedelta(minutes=cache_period)).strftime(EXPIRES_FMT)
        return search_

    cleaned = search_form.cleaned_data

    # On mobile, we default to just wiki results.
    if request.MOBILE and cleaned["w"] == constants.WHERE_BASIC:
        cleaned["w"] = constants.WHERE_WIKI

    page = max(smart_int(request.GET.get("page")), 1)
    offset = (page - 1) * settings.SEARCH_RESULTS_PER_PAGE

    lang = language.lower()
    if settings.LANGUAGES_DICT.get(lang):
        lang_name = settings.LANGUAGES_DICT[lang]
    else:
        lang_name = ""

    # We use a regular S here because we want to search across
    # multiple doctypes.
    searcher = AnalyzerS().es(urls=settings.ES_URLS).indexes(es_utils.read_index("default"))

    wiki_f = F(model="wiki_document")
    question_f = F(model="questions_question")

    cleaned_q = cleaned["q"]
    products = cleaned["product"]

    if not products and "all_products" not in request.GET:
        lowered_q = cleaned_q.lower()

        if "thunderbird" in lowered_q:
            products.append("thunderbird")
        elif "android" in lowered_q:
            products.append("mobile")
        elif "ios" in lowered_q or "ipad" in lowered_q or "ipod" in lowered_q or "iphone" in lowered_q:
            products.append("ios")
        elif "firefox os" in lowered_q:
            products.append("firefox-os")
        elif "firefox" in lowered_q:
            products.append("firefox")

    # Start - wiki filters

    if cleaned["w"] & constants.WHERE_WIKI:
        # Category filter
        wiki_f &= F(document_category__in=settings.SEARCH_DEFAULT_CATEGORIES)

        # Locale filter
        wiki_f &= F(document_locale=language)

        # Product filter
        for p in products:
            wiki_f &= F(product=p)

        # Archived bit
        wiki_f &= F(document_is_archived=False)

    # End - wiki filters

    # Start - support questions filters

    if cleaned["w"] & constants.WHERE_SUPPORT:
        # Has helpful answers is set by default if using basic search
        cleaned["has_helpful"] = constants.TERNARY_YES

        # No archived questions in default search.
        cleaned["is_archived"] = constants.TERNARY_NO

        # These filters are ternary, they can be either YES, NO, or OFF
        ternary_filters = ("has_helpful", "is_archived")
        d = dict(
            ("question_%s" % filter_name, _ternary_filter(cleaned[filter_name]))
            for filter_name in ternary_filters
            if cleaned[filter_name]
        )
        if d:
            question_f &= F(**d)

        # Product filter
        for p in products:
            question_f &= F(product=p)

    # End - support questions filters

    # Done with all the filtery stuff--time  to generate results

    # Combine all the filters and add to the searcher
    doctypes = []
    final_filter = F()
    if cleaned["w"] & constants.WHERE_WIKI:
        doctypes.append(DocumentMappingType.get_mapping_type_name())
        final_filter |= wiki_f

    if cleaned["w"] & constants.WHERE_SUPPORT:
        doctypes.append(QuestionMappingType.get_mapping_type_name())
        final_filter |= question_f

    searcher = searcher.doctypes(*doctypes)
    searcher = searcher.filter(final_filter)

    if "explain" in request.GET and request.GET["explain"] == "1":
        searcher = searcher.explain()

    documents = ComposedList()

    try:
        # Set up the highlights. Show the entire field highlighted.
        searcher = searcher.highlight(
            "question_content",  # support forum
            "document_summary",  # kb
            pre_tags=["<b>"],
            post_tags=["</b>"],
            number_of_fragments=0,
        )

        # Set up boosts
        searcher = searcher.boost(
            question_title=4.0,
            question_content=3.0,
            question_answer_content=3.0,
            document_title=6.0,
            document_content=1.0,
            document_keywords=8.0,
            document_summary=2.0,
            # Text phrases in document titles and content get an extra
            # boost.
            document_title__match_phrase=10.0,
            document_content__match_phrase=8.0,
        )

        # Build the query
        query_fields = chain(*[cls.get_query_fields() for cls in [DocumentMappingType, QuestionMappingType]])
        query = {}
        # Create match and match_phrase queries for every field
        # we want to search.
        for field in query_fields:
            for query_type in ["match", "match_phrase"]:
                query["%s__%s" % (field, query_type)] = cleaned_q

        # Transform the query to use locale aware analyzers.
        query = es_utils.es_query_with_analyzer(query, language)

        searcher = searcher.query(should=True, **query)

        num_results = min(searcher.count(), settings.SEARCH_MAX_RESULTS)

        # TODO - Can ditch the ComposedList here, but we need
        # something that paginate can use to figure out the paging.
        documents = ComposedList()
        documents.set_count(("results", searcher), num_results)

        results_per_page = settings.SEARCH_RESULTS_PER_PAGE
        pages = paginate(request, documents, results_per_page)

        # If we know there aren't any results, let's cheat and in
        # doing that, not hit ES again.
        if num_results == 0:
            searcher = []
        else:
            # Get the documents we want to show and add them to
            # docs_for_page
            documents = documents[offset : offset + results_per_page]

            if len(documents) == 0:
                # If the user requested a page that's beyond the
                # pagination, then documents is an empty list and
                # there are no results to show.
                searcher = []
            else:
                bounds = documents[0][1]
                searcher = searcher[bounds[0] : bounds[1]]

        results = []
        for i, doc in enumerate(searcher):
            rank = i + offset

            if doc["model"] == "wiki_document":
                summary = _build_es_excerpt(doc)
                if not summary:
                    summary = doc["document_summary"]
                result = {"title": doc["document_title"], "type": "document"}

            elif doc["model"] == "questions_question":
                summary = _build_es_excerpt(doc)
                if not summary:
                    # We're excerpting only question_content, so if
                    # the query matched question_title or
                    # question_answer_content, then there won't be any
                    # question_content excerpts. In that case, just
                    # show the question--but only the first 500
                    # characters.
                    summary = bleach.clean(doc["question_content"], strip=True)[:500]

                result = {
                    "title": doc["question_title"],
                    "type": "question",
                    "is_solved": doc["question_is_solved"],
                    "num_answers": doc["question_num_answers"],
                    "num_votes": doc["question_num_votes"],
                    "num_votes_past_week": doc["question_num_votes_past_week"],
                }

            result["url"] = doc["url"]
            result["object"] = doc
            result["search_summary"] = summary
            result["rank"] = rank
            result["score"] = doc.es_meta.score
            result["explanation"] = escape(format_explanation(doc.es_meta.explanation))
            result["id"] = doc["id"]
            results.append(result)

    except ES_EXCEPTIONS as exc:
        # Handle timeout and all those other transient errors with a
        # "Search Unavailable" rather than a Django error page.
        if is_json:
            return HttpResponse(json.dumps({"error": _("Search Unavailable")}), content_type=content_type, status=503)

        # Cheating here: Convert from 'Timeout()' to 'timeout' so
        # we have less code, but still have good stats.
        exc_bucket = repr(exc).lower().strip("()")
        statsd.incr("search.esunified.{0}".format(exc_bucket))

        log.exception(exc)

        t = "search/mobile/down.html" if request.MOBILE else "search/down.html"
        return render(request, t, {"q": cleaned["q"]}, status=503)

    items = [(k, v) for k in search_form.fields for v in r.getlist(k) if v and k != "a"]
    items.append(("a", "2"))

    fallback_results = None
    if num_results == 0:
        fallback_results = _fallback_results(language, cleaned["product"])

    product = Product.objects.filter(slug__in=cleaned["product"])
    if product:
        product_titles = [_(p.title, "DB: products.Product.title") for p in product]
    else:
        product_titles = [_("All Products")]

    product_titles = ", ".join(product_titles)

    data = {
        "num_results": num_results,
        "results": results,
        "fallback_results": fallback_results,
        "product_titles": product_titles,
        "q": cleaned["q"],
        "w": cleaned["w"],
        "lang_name": lang_name,
    }

    if is_json:
        # Models are not json serializable.
        for r in data["results"]:
            del r["object"]
        data["total"] = len(data["results"])

        data["products"] = [{"slug": p.slug, "title": p.title} for p in Product.objects.filter(visible=True)]

        if product:
            data["product"] = product[0].slug

        pages = Paginator(pages)
        data["pagination"] = dict(
            number=pages.pager.number,
            num_pages=pages.pager.paginator.num_pages,
            has_next=pages.pager.has_next(),
            has_previous=pages.pager.has_previous(),
            max=pages.max,
            span=pages.span,
            dotted_upper=pages.pager.dotted_upper,
            dotted_lower=pages.pager.dotted_lower,
            page_range=pages.pager.page_range,
            url=pages.pager.url,
        )
        if not results:
            data["message"] = _("No pages matched the search criteria")
        json_data = json.dumps(data)
        if callback:
            json_data = callback + "(" + json_data + ");"

        return HttpResponse(json_data, content_type=content_type)

    data.update(
        {
            "product": product,
            "products": Product.objects.filter(visible=True),
            "pages": pages,
            "search_form": search_form,
            "advanced": False,
        }
    )
    results_ = render(request, template, data)
    cache_period = settings.SEARCH_CACHE_PERIOD
    results_["Cache-Control"] = "max-age=%s" % (cache_period * 60)
    results_["Expires"] = (datetime.utcnow() + timedelta(minutes=cache_period)).strftime(EXPIRES_FMT)
    results_.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned["q"]), max_age=3600, secure=False, httponly=False)

    return results_
Exemple #55
0
def simple_search(request, template=None):
    """Elasticsearch-specific simple search view.

    This view is for end user searching of the Knowledge Base and
    Support Forum. Filtering options are limited to:

    * product (`product=firefox`, for example, for only Firefox results)
    * document type (`w=2`, for example, for Support Forum questions only)

    """
    # Redirect to old Advanced Search URLs (?a={1,2}) to the new URL.
    if request.GET.get('a') in ['1', '2']:
        new_url = reverse('search.advanced') + '?' + request.GET.urlencode()
        return HttpResponseRedirect(new_url)

    # JSON-specific variables
    is_json = (request.GET.get('format') == 'json')
    callback = request.GET.get('callback', '').strip()
    content_type = 'application/x-javascript' if callback else 'application/json'

    # Check callback is valid
    if is_json and callback and not jsonp_is_valid(callback):
        return HttpResponse(
            json.dumps({'error': _('Invalid callback function.')}),
            content_type=content_type,
            status=400)

    search_form = SimpleSearchForm(request.GET, auto_id=False)

    if not search_form.is_valid():
        if is_json:
            return HttpResponse(
                json.dumps({'error': _('Invalid search data.')}),
                content_type=content_type,
                status=400)

        t = template if request.MOBILE else 'search/form.html'
        search_ = render(request, t, {
            'advanced': False,
            'request': request,
            'search_form': search_form})
        cache_period = settings.SEARCH_CACHE_PERIOD
        search_['Cache-Control'] = 'max-age=%s' % (cache_period * 60)
        search_['Expires'] = (
            (datetime.utcnow() + timedelta(minutes=cache_period))
            .strftime(EXPIRES_FMT))
        return search_

    cleaned = search_form.cleaned_data

    # On mobile, we default to just wiki results.
    if request.MOBILE and cleaned['w'] == constants.WHERE_BASIC:
        cleaned['w'] = constants.WHERE_WIKI

    language = locale_or_default(cleaned['language'] or request.LANGUAGE_CODE)
    lang = language.lower()
    lang_name = settings.LANGUAGES_DICT.get(lang) or ''

    searcher = generate_simple_search(search_form, language, with_highlights=True)
    searcher = searcher[:settings.SEARCH_MAX_RESULTS]
    fallback_results = None

    try:
        pages = paginate(request, searcher, settings.SEARCH_RESULTS_PER_PAGE)
        offset = pages.start_index()

        results = []
        if pages.paginator.count == 0:
            fallback_results = _fallback_results(language, cleaned['product'])

        else:
            for i, doc in enumerate(pages):
                rank = i + offset

                if doc['model'] == 'wiki_document':
                    summary = _build_es_excerpt(doc)
                    if not summary:
                        summary = doc['document_summary']
                    result = {
                        'title': doc['document_title'],
                        'type': 'document'}

                elif doc['model'] == 'questions_question':
                    summary = _build_es_excerpt(doc)
                    if not summary:
                        # We're excerpting only question_content, so if the query matched
                        # question_title or question_answer_content, then there won't be any
                        # question_content excerpts. In that case, just show the question--but
                        # only the first 500 characters.
                        summary = bleach.clean(doc['question_content'], strip=True)[:500]

                    result = {
                        'title': doc['question_title'],
                        'type': 'question',
                        'is_solved': doc['question_is_solved'],
                        'num_answers': doc['question_num_answers'],
                        'num_votes': doc['question_num_votes'],
                        'num_votes_past_week': doc['question_num_votes_past_week']}

                result['url'] = doc['url']
                result['object'] = doc
                result['search_summary'] = summary
                result['rank'] = rank
                result['score'] = doc.es_meta.score
                result['explanation'] = escape(format_explanation(
                    doc.es_meta.explanation))
                result['id'] = doc['id']
                results.append(result)

    except ES_EXCEPTIONS as exc:
        # Handle timeout and all those other transient errors with a
        # "Search Unavailable" rather than a Django error page.
        if is_json:
            return HttpResponse(json.dumps({'error': _('Search Unavailable')}),
                                content_type=content_type, status=503)

        # Cheating here: Convert from 'Timeout()' to 'timeout' so
        # we have less code, but still have good stats.
        exc_bucket = repr(exc).lower().strip('()')
        statsd.incr('search.esunified.{0}'.format(exc_bucket))

        log.exception(exc)

        t = 'search/mobile/down.html' if request.MOBILE else 'search/down.html'
        return render(request, t, {'q': cleaned['q']}, status=503)

    product = Product.objects.filter(slug__in=cleaned['product'])
    if product:
        product_titles = [_(p.title, 'DB: products.Product.title') for p in product]
    else:
        product_titles = [_('All Products')]

    # FIXME: This is probably bad l10n.
    product_titles = ', '.join(product_titles)

    data = {
        'num_results': pages.paginator.count,
        'results': results,
        'fallback_results': fallback_results,
        'product_titles': product_titles,
        'q': cleaned['q'],
        'w': cleaned['w'],
        'lang_name': lang_name}

    if is_json:
        # Models are not json serializable.
        for r in data['results']:
            del r['object']
        data['total'] = len(data['results'])

        data['products'] = [{'slug': p.slug, 'title': p.title}
                            for p in Product.objects.filter(visible=True)]

        if product:
            data['product'] = product[0].slug

        pages = Paginator(pages)
        data['pagination'] = dict(
            number=pages.pager.number,
            num_pages=pages.pager.paginator.num_pages,
            has_next=pages.pager.has_next(),
            has_previous=pages.pager.has_previous(),
            max=pages.max,
            span=pages.span,
            dotted_upper=pages.pager.dotted_upper,
            dotted_lower=pages.pager.dotted_lower,
            page_range=pages.pager.page_range,
            url=pages.pager.url,
        )
        if not results:
            data['message'] = _('No pages matched the search criteria')
        json_data = json.dumps(data)
        if callback:
            json_data = callback + '(' + json_data + ');'

        return HttpResponse(json_data, content_type=content_type)

    data.update({
        'product': product,
        'products': Product.objects.filter(visible=True),
        'pages': pages,
        'search_form': search_form,
        'advanced': False,
    })
    results_ = render(request, template, data)
    cache_period = settings.SEARCH_CACHE_PERIOD
    results_['Cache-Control'] = 'max-age=%s' % (cache_period * 60)
    results_['Expires'] = (
        (datetime.utcnow() + timedelta(minutes=cache_period))
        .strftime(EXPIRES_FMT))
    results_.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']),
                        max_age=3600, secure=False, httponly=False)

    return results_
Exemple #56
0
def simple_search(request, template=None):
    """ES-specific simple search view.

    This view is for end user searching of the Knowledge Base and
    Support Forum. Filtering options are limited to:
    * product (`product=firefox`, for example, for only Firefox results)
    * document type (`w=2`, for esample, for Support Forum questions only)
    """

    # Redirect to old Advanced Search URLs (?a={1,2}) to the new URL.
    a = request.GET.get('a')
    if a in ['1', '2']:
        new_url = reverse('search.advanced') + '?' + request.GET.urlencode()
        return HttpResponseRedirect(new_url)

    # JSON-specific variables
    is_json = (request.GET.get('format') == 'json')
    callback = request.GET.get('callback', '').strip()
    content_type = (
        'application/x-javascript' if callback else 'application/json')

    # Check callback is valid
    if is_json and callback and not jsonp_is_valid(callback):
        return HttpResponse(
            json.dumps({'error': _('Invalid callback function.')}),
            content_type=content_type, status=400)

    language = locale_or_default(
        request.GET.get('language', request.LANGUAGE_CODE))
    r = request.GET.copy()

    # TODO: Do we really need to add this to the URL if it isn't already there?
    r['w'] = r.get('w', constants.WHERE_BASIC)

    # TODO: Break out a separate simple search form.
    search_form = SimpleSearchForm(r, auto_id=False)

    if not search_form.is_valid():
        if is_json:
            return HttpResponse(
                json.dumps({'error': _('Invalid search data.')}),
                content_type=content_type,
                status=400)

        t = template if request.MOBILE else 'search/form.html'
        search_ = render(request, t, {
            'advanced': False, 'request': request,
            'search_form': search_form})
        cache_period = settings.SEARCH_CACHE_PERIOD
        search_['Cache-Control'] = 'max-age={0!s}'.format((cache_period * 60))
        search_['Expires'] = (
            (datetime.utcnow() + timedelta(minutes=cache_period))
            .strftime(EXPIRES_FMT))
        return search_

    cleaned = search_form.cleaned_data

    # On mobile, we default to just wiki results.
    if request.MOBILE and cleaned['w'] == constants.WHERE_BASIC:
        cleaned['w'] = constants.WHERE_WIKI

    page = max(smart_int(request.GET.get('page')), 1)
    offset = (page - 1) * settings.SEARCH_RESULTS_PER_PAGE

    lang = language.lower()
    if settings.LANGUAGES_DICT.get(lang):
        lang_name = settings.LANGUAGES_DICT[lang]
    else:
        lang_name = ''

    # We use a regular S here because we want to search across
    # multiple doctypes.
    searcher = (AnalyzerS().es(urls=settings.ES_URLS)
                .indexes(es_utils.read_index('default')))

    wiki_f = F(model='wiki_document')
    question_f = F(model='questions_question')

    cleaned_q = cleaned['q']
    products = cleaned['product']

    if not products and 'all_products' not in request.GET:
        lowered_q = cleaned_q.lower()

        if 'thunderbird' in lowered_q:
            products.append('thunderbird')
        elif 'android' in lowered_q:
            products.append('mobile')
        elif ('ios' in lowered_q or 'ipad' in lowered_q or 'ipod' in lowered_q or
              'iphone' in lowered_q):
            products.append('ios')
        elif 'firefox os' in lowered_q:
            products.append('firefox-os')
        elif 'firefox' in lowered_q:
            products.append('firefox')

    # Start - wiki filters

    if cleaned['w'] & constants.WHERE_WIKI:
        # Category filter
        wiki_f &= F(document_category__in=settings.SEARCH_DEFAULT_CATEGORIES)

        # Locale filter
        wiki_f &= F(document_locale=language)

        # Product filter
        for p in products:
            wiki_f &= F(product=p)

        # Archived bit
        wiki_f &= F(document_is_archived=False)

    # End - wiki filters

    # Start - support questions filters

    if cleaned['w'] & constants.WHERE_SUPPORT:
        # Has helpful answers is set by default if using basic search
        cleaned['has_helpful'] = constants.TERNARY_YES

        # No archived questions in default search.
        cleaned['is_archived'] = constants.TERNARY_NO

        # These filters are ternary, they can be either YES, NO, or OFF
        ternary_filters = ('has_helpful', 'is_archived')
        d = dict(('question_{0!s}'.format(filter_name),
                  _ternary_filter(cleaned[filter_name]))
                 for filter_name in ternary_filters if cleaned[filter_name])
        if d:
            question_f &= F(**d)

        # Product filter
        for p in products:
            question_f &= F(product=p)

    # End - support questions filters

    # Done with all the filtery stuff--time  to generate results

    # Combine all the filters and add to the searcher
    doctypes = []
    final_filter = F()
    if cleaned['w'] & constants.WHERE_WIKI:
        doctypes.append(DocumentMappingType.get_mapping_type_name())
        final_filter |= wiki_f

    if cleaned['w'] & constants.WHERE_SUPPORT:
        doctypes.append(QuestionMappingType.get_mapping_type_name())
        final_filter |= question_f

    searcher = searcher.doctypes(*doctypes)
    searcher = searcher.filter(final_filter)

    if 'explain' in request.GET and request.GET['explain'] == '1':
        searcher = searcher.explain()

    documents = ComposedList()

    try:
        # Set up the highlights. Show the entire field highlighted.
        searcher = searcher.highlight(
            'question_content',  # support forum
            'document_summary',  # kb
            pre_tags=['<b>'],
            post_tags=['</b>'],
            number_of_fragments=0)

        # Set up boosts
        searcher = searcher.boost(
            question_title=4.0,
            question_content=3.0,
            question_answer_content=3.0,
            document_title=6.0,
            document_content=1.0,
            document_keywords=8.0,
            document_summary=2.0,

            # Text phrases in document titles and content get an extra
            # boost.
            document_title__match_phrase=10.0,
            document_content__match_phrase=8.0)

        # Build the query
        query_fields = chain(*[
            cls.get_query_fields() for cls in [
                DocumentMappingType,
                QuestionMappingType
            ]
        ])
        query = {}
        # Create match and match_phrase queries for every field
        # we want to search.
        for field in query_fields:
            for query_type in ['match', 'match_phrase']:
                query['{0!s}__{1!s}'.format(field, query_type)] = cleaned_q

        # Transform the query to use locale aware analyzers.
        query = es_utils.es_query_with_analyzer(query, language)

        searcher = searcher.query(should=True, **query)

        num_results = min(searcher.count(), settings.SEARCH_MAX_RESULTS)

        # TODO - Can ditch the ComposedList here, but we need
        # something that paginate can use to figure out the paging.
        documents = ComposedList()
        documents.set_count(('results', searcher), num_results)

        results_per_page = settings.SEARCH_RESULTS_PER_PAGE
        pages = paginate(request, documents, results_per_page)

        # If we know there aren't any results, let's cheat and in
        # doing that, not hit ES again.
        if num_results == 0:
            searcher = []
        else:
            # Get the documents we want to show and add them to
            # docs_for_page
            documents = documents[offset:offset + results_per_page]

            if len(documents) == 0:
                # If the user requested a page that's beyond the
                # pagination, then documents is an empty list and
                # there are no results to show.
                searcher = []
            else:
                bounds = documents[0][1]
                searcher = searcher[bounds[0]:bounds[1]]

        results = []
        for i, doc in enumerate(searcher):
            rank = i + offset

            if doc['model'] == 'wiki_document':
                summary = _build_es_excerpt(doc)
                if not summary:
                    summary = doc['document_summary']
                result = {
                    'title': doc['document_title'],
                    'type': 'document'}

            elif doc['model'] == 'questions_question':
                summary = _build_es_excerpt(doc)
                if not summary:
                    # We're excerpting only question_content, so if
                    # the query matched question_title or
                    # question_answer_content, then there won't be any
                    # question_content excerpts. In that case, just
                    # show the question--but only the first 500
                    # characters.
                    summary = bleach.clean(
                        doc['question_content'], strip=True)[:500]

                result = {
                    'title': doc['question_title'],
                    'type': 'question',
                    'is_solved': doc['question_is_solved'],
                    'num_answers': doc['question_num_answers'],
                    'num_votes': doc['question_num_votes'],
                    'num_votes_past_week': doc['question_num_votes_past_week']}

            result['url'] = doc['url']
            result['object'] = doc
            result['search_summary'] = summary
            result['rank'] = rank
            result['score'] = doc.es_meta.score
            result['explanation'] = escape(format_explanation(
                doc.es_meta.explanation))
            result['id'] = doc['id']
            results.append(result)

    except ES_EXCEPTIONS as exc:
        # Handle timeout and all those other transient errors with a
        # "Search Unavailable" rather than a Django error page.
        if is_json:
            return HttpResponse(json.dumps({'error': _('Search Unavailable')}),
                                content_type=content_type, status=503)

        # Cheating here: Convert from 'Timeout()' to 'timeout' so
        # we have less code, but still have good stats.
        exc_bucket = repr(exc).lower().strip('()')
        statsd.incr('search.esunified.{0}'.format(exc_bucket))

        log.exception(exc)

        t = 'search/mobile/down.html' if request.MOBILE else 'search/down.html'
        return render(request, t, {'q': cleaned['q']}, status=503)

    items = [(k, v) for k in search_form.fields for
             v in r.getlist(k) if v and k != 'a']
    items.append(('a', '2'))

    fallback_results = None
    if num_results == 0:
        fallback_results = _fallback_results(language, cleaned['product'])

    product = Product.objects.filter(slug__in=cleaned['product'])
    if product:
        product_titles = [_(p.title, 'DB: products.Product.title')
                          for p in product]
    else:
        product_titles = [_('All Products')]

    product_titles = ', '.join(product_titles)

    data = {
        'num_results': num_results,
        'results': results,
        'fallback_results': fallback_results,
        'product_titles': product_titles,
        'q': cleaned['q'],
        'w': cleaned['w'],
        'lang_name': lang_name, }

    if is_json:
        # Models are not json serializable.
        for r in data['results']:
            del r['object']
        data['total'] = len(data['results'])

        data['products'] = ([{'slug': p.slug, 'title': p.title}
                             for p in Product.objects.filter(visible=True)])

        if product:
            data['product'] = product[0].slug

        pages = Paginator(pages)
        data['pagination'] = dict(
            number=pages.pager.number,
            num_pages=pages.pager.paginator.num_pages,
            has_next=pages.pager.has_next(),
            has_previous=pages.pager.has_previous(),
            max=pages.max,
            span=pages.span,
            dotted_upper=pages.pager.dotted_upper,
            dotted_lower=pages.pager.dotted_lower,
            page_range=pages.pager.page_range,
            url=pages.pager.url,
        )
        if not results:
            data['message'] = _('No pages matched the search criteria')
        json_data = json.dumps(data)
        if callback:
            json_data = callback + '(' + json_data + ');'

        return HttpResponse(json_data, content_type=content_type)

    data.update({
        'product': product,
        'products': Product.objects.filter(visible=True),
        'pages': pages,
        'search_form': search_form,
        'advanced': False,
    })
    results_ = render(request, template, data)
    cache_period = settings.SEARCH_CACHE_PERIOD
    results_['Cache-Control'] = 'max-age={0!s}'.format((cache_period * 60))
    results_['Expires'] = (
        (datetime.utcnow() + timedelta(minutes=cache_period))
        .strftime(EXPIRES_FMT))
    results_.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']),
                        max_age=3600, secure=False, httponly=False)

    return results_
Exemple #57
0
def simple_search(request, template=None):
    """Elasticsearch-specific simple search view.

    This view is for end user searching of the Knowledge Base and
    Support Forum. Filtering options are limited to:

    * product (`product=firefox`, for example, for only Firefox results)
    * document type (`w=2`, for example, for Support Forum questions only)

    """

    to_json = JSONRenderer().render

    # 1. Prep request.
    # Redirect to old Advanced Search URLs (?a={1,2}) to the new URL.
    if request.GET.get('a') in ['1', '2']:
        new_url = reverse('search.advanced') + '?' + request.GET.urlencode()
        return HttpResponseRedirect(new_url)

    # 2. Build form.
    search_form = SimpleSearchForm(request.GET, auto_id=False)

    # 3. Validate request.
    if not search_form.is_valid():
        if request.IS_JSON:
            return HttpResponse(
                json.dumps({'error': _('Invalid search data.')}),
                content_type=request.CONTENT_TYPE,
                status=400)

        t = template if request.MOBILE else 'search/form.html'
        return cache_control(
            render(request, t, {
                'advanced': False,
                'request': request,
                'search_form': search_form}),
            settings.SEARCH_CACHE_PERIOD)

    # 4. Generate search.
    cleaned = search_form.cleaned_data

    # On mobile, we default to just wiki results.
    if request.MOBILE and cleaned['w'] == constants.WHERE_BASIC:
        cleaned['w'] = constants.WHERE_WIKI

    language = locale_or_default(cleaned['language'] or request.LANGUAGE_CODE)
    lang_name = settings.LANGUAGES_DICT.get(language.lower()) or ''

    searcher = generate_simple_search(search_form, language, with_highlights=True)
    searcher = searcher[:settings.SEARCH_MAX_RESULTS]

    # 5. Generate output.
    pages = paginate(request, searcher, settings.SEARCH_RESULTS_PER_PAGE)

    if pages.paginator.count == 0:
        fallback_results = _fallback_results(language, cleaned['product'])
        results = []

    else:
        fallback_results = None
        results = build_results_list(pages, request.IS_JSON)

    product = Product.objects.filter(slug__in=cleaned['product'])
    if product:
        product_titles = [pgettext('DB: products.Product.title', p.title) for p in product]
    else:
        product_titles = [_('All Products')]

    # FIXME: This is probably bad l10n.
    product_titles = ', '.join(product_titles)

    data = {
        'num_results': pages.paginator.count,
        'results': results,
        'fallback_results': fallback_results,
        'product_titles': product_titles,
        'q': cleaned['q'],
        'w': cleaned['w'],
        'lang_name': lang_name,
        'products': Product.objects.filter(visible=True)}

    if request.IS_JSON:
        data['total'] = len(data['results'])
        data['products'] = [{'slug': p.slug, 'title': p.title}
                            for p in data['products']]

        if product:
            data['product'] = product[0].slug

        pages = Paginator(pages)
        data['pagination'] = dict(
            number=pages.pager.number,
            num_pages=pages.pager.paginator.num_pages,
            has_next=pages.pager.has_next(),
            has_previous=pages.pager.has_previous(),
            max=pages.max,
            span=pages.span,
            dotted_upper=pages.pager.dotted_upper,
            dotted_lower=pages.pager.dotted_lower,
            page_range=pages.pager.page_range,
            url=pages.pager.url,
        )
        if not results:
            data['message'] = _('No pages matched the search criteria')

        json_data = to_json(data)
        if request.JSON_CALLBACK:
            json_data = request.JSON_CALLBACK + '(' + json_data + ');'
        return HttpResponse(json_data, content_type=request.CONTENT_TYPE)

    data.update({
        'product': product,
        'pages': pages,
        'search_form': search_form,
        'advanced': False,
    })
    resp = cache_control(render(request, template, data), settings.SEARCH_CACHE_PERIOD)
    resp.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']),
                    max_age=3600, secure=False, httponly=False)
    return resp
Exemple #58
0
def advanced_search(request, template=None):
    """Elasticsearch-specific Advanced search view"""

    to_json = JSONRenderer().render

    # 1. Prep request.
    r = request.GET.copy()
    # TODO: Figure out how to get rid of 'a' and do it.
    # It basically is used to switch between showing the form or results.
    a = request.GET.get('a', '2')
    # TODO: This is so the 'a=1' stays in the URL for pagination.
    r['a'] = 1

    language = locale_or_default(request.GET.get('language', request.LANGUAGE_CODE))
    r['language'] = language
    lang = language.lower()
    lang_name = settings.LANGUAGES_DICT.get(lang) or ''

    # 2. Build form.
    search_form = AdvancedSearchForm(r, auto_id=False)
    search_form.set_allowed_forums(request.user)

    # 3. Validate request.
    # Note: a == 2 means "show the form"--that's all we use it for now.
    if a == '2' or not search_form.is_valid():
        if request.IS_JSON:
            return HttpResponse(
                json.dumps({'error': _('Invalid search data.')}),
                content_type=request.CONTENT_TYPE,
                status=400)

        t = template if request.MOBILE else 'search/form.html'
        data = {'advanced': True,
                'request': request,
                'search_form': search_form}
        # get value for search input from last search term.
        last_search = request.COOKIES.get(settings.LAST_SEARCH_COOKIE)
        # If there is any cached input from last search, pass it to template
        if last_search and 'q' not in r:
            cached_field = urlquote(last_search)
            data.update({'cached_field': cached_field})

        return cache_control(
            render(request, t, data),
            settings.SEARCH_CACHE_PERIOD)

    # 4. Generate search.
    cleaned = search_form.cleaned_data

    # On mobile, we default to just wiki results.
    if request.MOBILE and cleaned['w'] == constants.WHERE_BASIC:
        cleaned['w'] = constants.WHERE_WIKI

    # We use a regular S here because we want to search across
    # multiple doctypes.
    searcher = (AnalyzerS().es(urls=settings.ES_URLS)
                .indexes(es_utils.read_index('default')))

    doctypes = []
    final_filter = F()
    unix_now = int(time.time())
    interval_filters = (
        ('created', cleaned['created'], cleaned['created_date']),
        ('updated', cleaned['updated'], cleaned['updated_date'])
    )

    # Start - wiki search configuration

    if cleaned['w'] & constants.WHERE_WIKI:
        wiki_f = F(model='wiki_document')

        # Category filter
        if cleaned['category']:
            wiki_f &= F(document_category__in=cleaned['category'])

        # Locale filter
        wiki_f &= F(document_locale=language)

        # Product filter
        products = cleaned['product']
        for p in products:
            wiki_f &= F(product=p)

        # Topics filter
        topics = cleaned['topics']
        for t in topics:
            wiki_f &= F(topic=t)

        # Archived bit
        if not cleaned['include_archived']:
            wiki_f &= F(document_is_archived=False)

        # Apply sortby
        sortby = cleaned['sortby_documents']
        try:
            searcher = searcher.order_by(*constants.SORT_DOCUMENTS[sortby])
        except IndexError:
            # Skip index errors because they imply the user is sending us sortby values
            # that aren't valid.
            pass

        doctypes.append(DocumentMappingType.get_mapping_type_name())
        final_filter |= wiki_f

    # End - wiki search configuration

    # Start - support questions configuration

    if cleaned['w'] & constants.WHERE_SUPPORT:
        question_f = F(model='questions_question')

        # These filters are ternary, they can be either YES, NO, or OFF
        ternary_filters = ('is_locked', 'is_solved', 'has_answers',
                           'has_helpful', 'is_archived')
        d = dict(('question_%s' % filter_name,
                  _ternary_filter(cleaned[filter_name]))
                 for filter_name in ternary_filters if cleaned[filter_name])
        if d:
            question_f &= F(**d)

        if cleaned['asked_by']:
            question_f &= F(question_creator=cleaned['asked_by'])

        if cleaned['answered_by']:
            question_f &= F(question_answer_creator=cleaned['answered_by'])

        q_tags = [t.strip() for t in cleaned['q_tags'].split(',')]
        for t in q_tags:
            if t:
                question_f &= F(question_tag=t)

        # Product filter
        products = cleaned['product']
        for p in products:
            question_f &= F(product=p)

        # Topics filter
        topics = cleaned['topics']
        for t in topics:
            question_f &= F(topic=t)

        # Note: num_voted (with a d) is a different field than num_votes
        # (with an s). The former is a dropdown and the latter is an
        # integer value.
        if cleaned['num_voted'] == constants.INTERVAL_BEFORE:
            question_f &= F(question_num_votes__lte=max(cleaned['num_votes'], 0))
        elif cleaned['num_voted'] == constants.INTERVAL_AFTER:
            question_f &= F(question_num_votes__gte=cleaned['num_votes'])

        # Apply sortby
        sortby = cleaned['sortby']
        try:
            searcher = searcher.order_by(*constants.SORT_QUESTIONS[sortby])
        except IndexError:
            # Skip index errors because they imply the user is sending us sortby values
            # that aren't valid.
            pass

        # Apply created and updated filters
        for filter_name, filter_option, filter_date in interval_filters:
            if filter_option == constants.INTERVAL_BEFORE:
                before = {filter_name + '__gte': 0,
                          filter_name + '__lte': max(filter_date, 0)}

                question_f &= F(**before)

            elif filter_option == constants.INTERVAL_AFTER:
                after = {filter_name + '__gte': min(filter_date, unix_now),
                         filter_name + '__lte': unix_now}

                question_f &= F(**after)

        doctypes.append(QuestionMappingType.get_mapping_type_name())
        final_filter |= question_f

    # End - support questions configuration

    # Start - discussion forum configuration

    if cleaned['w'] & constants.WHERE_DISCUSSION:
        discussion_f = F(model='forums_thread')

        if cleaned['author']:
            discussion_f &= F(post_author_ord=cleaned['author'])

        if cleaned['thread_type']:
            if constants.DISCUSSION_STICKY in cleaned['thread_type']:
                discussion_f &= F(post_is_sticky=1)

            if constants.DISCUSSION_LOCKED in cleaned['thread_type']:
                discussion_f &= F(post_is_locked=1)

        valid_forum_ids = [f.id for f in Forum.authorized_forums_for_user(request.user)]

        forum_ids = None
        if cleaned['forum']:
            forum_ids = [f for f in cleaned['forum'] if f in valid_forum_ids]

        # If we removed all the forums they wanted to look at or if
        # they didn't specify, then we filter on the list of all
        # forums they're authorized to look at.
        if not forum_ids:
            forum_ids = valid_forum_ids

        discussion_f &= F(post_forum_id__in=forum_ids)

        # Apply created and updated filters
        for filter_name, filter_option, filter_date in interval_filters:
            if filter_option == constants.INTERVAL_BEFORE:
                before = {filter_name + '__gte': 0,
                          filter_name + '__lte': max(filter_date, 0)}

                discussion_f &= F(**before)

            elif filter_option == constants.INTERVAL_AFTER:
                after = {filter_name + '__gte': min(filter_date, unix_now),
                         filter_name + '__lte': unix_now}

                discussion_f &= F(**after)

        doctypes.append(ThreadMappingType.get_mapping_type_name())
        final_filter |= discussion_f

    # End - discussion forum configuration

    # Done with all the filtery stuff--time  to generate results

    searcher = searcher.doctypes(*doctypes)
    searcher = searcher.filter(final_filter)

    if 'explain' in request.GET and request.GET['explain'] == '1':
        searcher = searcher.explain()

    cleaned_q = cleaned['q']

    # Set up the highlights. Show the entire field highlighted.
    searcher = searcher.highlight(
        'question_content',  # support forum
        'document_summary',  # kb
        'post_content',  # contributor forum
        pre_tags=['<b>'],
        post_tags=['</b>'],
        number_of_fragments=0)

    searcher = apply_boosts(searcher)

    # Build the query
    if cleaned_q:
        query_fields = chain(*[
            cls.get_query_fields() for cls in [
                DocumentMappingType,
                ThreadMappingType,
                QuestionMappingType
            ]
        ])
        query = {}
        # Create a simple_query_search query for every field we want to search.
        for field in query_fields:
            query['%s__sqs' % field] = cleaned_q

        # Transform the query to use locale aware analyzers.
        query = es_utils.es_query_with_analyzer(query, language)

        searcher = searcher.query(should=True, **query)

    searcher = searcher[:settings.SEARCH_MAX_RESULTS]

    # 5. Generate output
    pages = paginate(request, searcher, settings.SEARCH_RESULTS_PER_PAGE)

    if pages.paginator.count == 0:
        # If we know there aren't any results, show fallback_results.
        fallback_results = _fallback_results(language, cleaned['product'])
        results = []
    else:
        fallback_results = None
        results = build_results_list(pages, request.IS_JSON)

    items = [(k, v) for k in search_form.fields for
             v in r.getlist(k) if v and k != 'a']
    items.append(('a', '2'))

    product = Product.objects.filter(slug__in=cleaned['product'])
    if product:
        product_titles = [pgettext('DB: products.Product.title', p.title) for p in product]
    else:
        product_titles = [_('All Products')]

    # FIXME: This is probably bad l10n.
    product_titles = ', '.join(product_titles)

    data = {
        'num_results': pages.paginator.count,
        'results': results,
        'fallback_results': fallback_results,
        'product_titles': product_titles,
        'q': cleaned['q'],
        'w': cleaned['w'],
        'lang_name': lang_name,
        'advanced': True,
        'products': Product.objects.filter(visible=True)
    }

    if request.IS_JSON:
        data['total'] = len(data['results'])
        data['products'] = [{'slug': p.slug, 'title': p.title}
                            for p in data['products']]

        if product:
            data['product'] = product[0].slug

        pages = Paginator(pages)
        data['pagination'] = dict(
            number=pages.pager.number,
            num_pages=pages.pager.paginator.num_pages,
            has_next=pages.pager.has_next(),
            has_previous=pages.pager.has_previous(),
            max=pages.max,
            span=pages.span,
            dotted_upper=pages.pager.dotted_upper,
            dotted_lower=pages.pager.dotted_lower,
            page_range=pages.pager.page_range,
            url=pages.pager.url,
        )
        if not results:
            data['message'] = _('No pages matched the search criteria')
        json_data = to_json(data)
        if request.JSON_CALLBACK:
            json_data = request.JSON_CALLBACK + '(' + json_data + ');'
        return HttpResponse(json_data, content_type=request.CONTENT_TYPE)

    data.update({
        'product': product,
        'pages': pages,
        'search_form': search_form
    })
    resp = cache_control(render(request, template, data), settings.SEARCH_CACHE_PERIOD)
    resp.set_cookie(settings.LAST_SEARCH_COOKIE, urlquote(cleaned['q']),
                    max_age=3600, secure=False, httponly=False)
    return resp