예제 #1
0
파일: textwiki.py 프로젝트: antiface/ductus
def html5_pagelist_macro(macro_tag, fullpagesource):
    """ generate the html output for the pagelist macro"""

    from lxml import etree
    from ductus.resource.ductmodels import tag_value_attribute_validator
    from ductus.index import search_pages

    tags = macro_tag.get("data-tags", '')

    try:
        parsed_tags = tags.split(',')
        for tag in parsed_tags:
            tag_value_attribute_validator(tag)
    except Exception:
        rv = etree.fromstring('<p>Invalid tag search</p>')

    try:
        pages = search_pages(tags=parsed_tags)
    except Exception:
        rv = etree.fromstring('<p>Search failed</p>')

    rv = etree.Element('ul')
    rv.set("class", "search_results")
    for page in pages:
        li = etree.SubElement(rv, 'li')
        a = etree.SubElement(li, 'a', href=page['path'])
        a.text = page['absolute_pagename']

    return macro_tag.append(rv)
예제 #2
0
파일: textwiki.py 프로젝트: antiface/ductus
def search_pages_macro(macro, environ, **kwargs):
    """
    A creole macro that lists wiki pages according to various criteria (tags only for now).
    Usage: <<PageList tags=tag1,tag2,tag3>>
    """
    from genshi import Markup
    from ductus.resource.ductmodels import tag_value_attribute_validator
    from ductus.index import search_pages

    tags = kwargs.get("tags", '')

    try:
        parsed_tags = tags.split(',')
        for tag in parsed_tags:
            tag_value_attribute_validator(tag)
    except Exception:
        return Markup('<p>Invalid tag search</p>')

    try:
        pages = search_pages(tags=parsed_tags)
    except Exception:
        return Markup('<p>Search failed</p>')

    html = ['<ul>']
    for page in pages:
        html.append('<li><a href="{0}">{1}</a></li>'.format(page['path'], page['absolute_pagename']))

    html.append('</ul>')
    return Markup('<p class="search_results">' + '\n'.join(html) + '</p>')
예제 #3
0
def html5_pagelist_macro(macro_tag, fullpagesource):
    """ generate the html output for the pagelist macro"""

    from lxml import etree
    from ductus.resource.ductmodels import tag_value_attribute_validator
    from ductus.index import search_pages

    tags = macro_tag.get("data-tags", '')

    try:
        parsed_tags = tags.split(',')
        for tag in parsed_tags:
            tag_value_attribute_validator(tag)
    except Exception:
        rv = etree.fromstring('<p>Invalid tag search</p>')

    try:
        pages = search_pages(tags=parsed_tags)
    except Exception:
        rv = etree.fromstring('<p>Search failed</p>')

    rv = etree.Element('ul')
    rv.set("class", "search_results")
    for page in pages:
        li = etree.SubElement(rv, 'li')
        a = etree.SubElement(li, 'a', href=page['path'])
        a.text = page['absolute_pagename']

    return macro_tag.append(rv)
예제 #4
0
def search_pages_macro(macro, environ, **kwargs):
    """
    A creole macro that lists wiki pages according to various criteria (tags only for now).
    Usage: <<PageList tags=tag1,tag2,tag3>>
    """
    from genshi import Markup
    from ductus.resource.ductmodels import tag_value_attribute_validator
    from ductus.index import search_pages

    tags = kwargs.get("tags", '')

    try:
        parsed_tags = tags.split(',')
        for tag in parsed_tags:
            tag_value_attribute_validator(tag)
    except Exception:
        return Markup('<p>Invalid tag search</p>')

    try:
        pages = search_pages(tags=parsed_tags)
    except Exception:
        return Markup('<p>Search failed</p>')

    html = ['<ul>']
    for page in pages:
        html.append('<li><a href="{0}">{1}</a></li>'.format(
            page['path'], page['absolute_pagename']))

    html.append('</ul>')
    return Markup('<p class="search_results">' + '\n'.join(html) + '</p>')
예제 #5
0
def fsw_get_flashcard(request, extra_tags, prompt_side, answer_side):
    """return a JSON flashcard object
    extra_tags: a list of tags the flashcard deck must have
    prompt_side: the index (0 based) of the side to use as prompt (which cannot be empty)
    answer_side: the index (0 based) of the side that must be empty
    """
    if request.method != 'GET':
        raise ImmediateResponse(
            HttpTextResponseBadRequest('only GET is allowed'))

    # get the language to search for
    language = request.GET.get(
        'language', getattr(settings, "FIVE_SEC_WIDGET_DEFAULT_LANGUAGE",
                            'en'))
    search_tags = ['target-language:' + language] + extra_tags
    # get a list of pages tagged as we want
    url_list = search_pages(tags=search_tags)

    if not url_list:
        raise Http404('No material available for this language')

    #url_list = [url for url in url_list if url.split(':')[0] == language]
    # pick a randomly chosen flashcard that has no text transcript in side[0]
    resource_database = get_resource_database()
    while True:
        url = url_list[random.randint(0, len(url_list) - 1)]
        try:
            page = WikiPage.objects.get(name=url['absolute_pagename'])
        except WikiPage.DoesNotExist:
            url_list.remove(url)
            if len(url_list) > 0:
                continue
            else:
                raise Http404('wikipage does not exist: ' + url['path'])

        revision = page.get_latest_revision()
        urn = 'urn:' + revision.urn
        fcd = resource_database.get_resource_object(urn)
        card_index = random.randint(0, len(fcd.cards.array) - 1)
        fc = fcd.cards.array[card_index].get()
        prompt = fc.sides.array[prompt_side].get()
        answer = fc.sides.array[answer_side].get()
        if prompt and not answer:
            break

    resource = resource_json(fc)
    # temporary hack for FSI: add the URL this flashcard is taken from
    tmp_resource = json.loads(resource)
    tmp_resource['fsi_url'] = url['absolute_pagename']
    tmp_resource['fsi_index'] = card_index
    return render_json_response(tmp_resource)
예제 #6
0
파일: views.py 프로젝트: antiface/ductus
def fsw_get_flashcard(request, extra_tags, prompt_side, answer_side):
    """return a JSON flashcard object
    extra_tags: a list of tags the flashcard deck must have
    prompt_side: the index (0 based) of the side to use as prompt (which cannot be empty)
    answer_side: the index (0 based) of the side that must be empty
    """
    if request.method == 'GET':
        # get the language to search for
        language = request.GET.get('language', getattr(settings, "FIVE_SEC_WIDGET_DEFAULT_LANGUAGE", 'en'))
        search_tags = ['target-language:' + language] + extra_tags
        # get a list of pages tagged as we want
        try:
            url_list = search_pages(tags=search_tags)
        except IndexingError:
            raise Http404('Indexing error, contact the site administrator')

        if url_list != []:
            #url_list = [url for url in url_list if url.split(':')[0] == language]
            # pick a randomly chosen flashcard that has no text transcript in side[0]
            resource_database = get_resource_database()
            while True:
                url = url_list[random.randint(0, len(url_list) - 1)]
                try:
                    page = WikiPage.objects.get(name=url['absolute_pagename'])
                except WikiPage.DoesNotExist:
                    url_list.remove(url)
                    if len(url_list) > 0:
                        continue
                    else:
                        raise Http404('wikipage does not exist: ' + url['path'])

                revision = page.get_latest_revision()
                urn = 'urn:' + revision.urn
                fcd = resource_database.get_resource_object(urn)
                card_index = random.randint(0, len(fcd.cards.array) - 1)
                fc = fcd.cards.array[card_index].get()
                prompt = fc.sides.array[prompt_side].get()
                answer = fc.sides.array[answer_side].get()
                if prompt and not answer:
                    break

            resource = resource_json(fc)
            # temporary hack for FSI: add the URL this flashcard is taken from
            tmp_resource = json.loads(resource)
            tmp_resource['fsi_url'] = url['absolute_pagename']
            tmp_resource['fsi_index'] = card_index
            return render_json_response(tmp_resource)

        raise Http404('No material available for this language')
예제 #7
0
파일: views.py 프로젝트: wikiotics/ductus1
def ajax_search_pages(request, pagename):
    """return a JSON object containing the urls matching the query
    in the request, such that:
    TODO: document
    """
    # TODO: limit the number of results returned
    if request.method != 'GET':
        raise ImmediateResponse(HttpTextResponseBadRequest('only GET is allowed'))

    params = {}
    params['pagename'] = request.GET.get('pagename', '')
    params['tags'] = request.GET.getlist('tag', '')

    # special search feature to report all pages without tags
    if 'notags' in request.GET:
        params['notags'] = 1
        del params['tags']  # just to be extra sure

    urls = search_pages(**params)

    return render_json_response(urls)
예제 #8
0
파일: views.py 프로젝트: jennspics/ductus
def ajax_search_pages(request, pagename):
    """return a JSON object containing the urls matching the query
    in the request, such that:
    TODO: document
    """
    # TODO: limit the number of results returned
    if request.method != 'GET':
        raise ImmediateResponse(
            HttpTextResponseBadRequest('only GET is allowed'))

    params = {}
    params['pagename'] = request.GET.get('pagename', '')
    params['tags'] = request.GET.getlist('tag', '')

    # special search feature to report all pages without tags
    if 'notags' in request.GET:
        params['notags'] = 1
        del params['tags']  # just to be extra sure

    urls = search_pages(**params)

    return render_json_response(urls)
예제 #9
0
파일: views.py 프로젝트: antiface/ductus
def ajax_search_pages(request, pagename):
    """return a JSON object containing the urls matching the query
    in the request, such that:
    TODO: document
    """
    # TODO: limit the number of results returned
    if request.method == 'GET':
        params = {}
        params['pagename'] = request.GET.get('pagename', '')
        params['tags'] = request.GET.getlist('tag', '')

        # special search feature to report all pages without tags
        if 'notags' in request.GET:
            params['notags'] = 1
            del params['tags']  # just to be extra sure

        rv = {}
        try:
            urls = search_pages(**params)
        except IndexingError:
            raise Http404('indexing error')

        return render_json_response(urls)