def preview_textwiki(request, pagename): """Return HTML rendered from Creole markup sent via AJAX request""" from ductus.utils.http import render_json_response if request.method == 'POST': from ductus.modules.textwiki.templatetags.textwiki import creole markup = request.POST.get('text', '') rv = creole(markup) return render_json_response({"html": rv}) return render_json_response({"error": "Error previewing your changes"})
def flickr_search_view(request): kw = {'page': request.GET.get("page", "1")} place_name = None if "place_id" in request.GET: kw["place_id"] = request.GET["place_id"] elif "place" in request.GET and request.GET["place"]: places = flickr.places_find(query=request.GET["place"])["places"]["place"] try: place = places[0] except IndexError: pass else: kw["place_id"] = place["place_id"] place_name = place["_content"] if "q" in request.GET: search_photos = partial(flickr.photos_search, per_page=100, license=(','.join(license_map())), safe_search=1, content_type=1, media="photos", extras="license,owner_name,original_format") if request.GET.get("sort", None) in valid_sort_methods: kw["sort"] = request.GET["sort"] if request.GET.get("search_by", None) == 'tags': tags = [t for t in re.split(r'\s|"(.+)"', request.GET['q']) if t] kw['tags'] = ','.join(tags) else: kw['text'] = request.GET['q'] if "group" in request.GET: kw['group_id'] = request.GET["group"] search_result = search_photos(**kw)["photos"] page = int(search_result["page"]) pages = int(search_result["pages"]) # if we're not searching in a specific group, only return results for # which we are allowed to download the original image. see ductus # ticket #64 for explanation of why photos = [FlickrPhoto(p).dict for p in search_result['photo'] if 'group' in request.GET or 'originalsecret' in p] # if somebody entered a flickr url in the search box, return that image if FlickrUriHandler.handles(request.GET['q']): from django import forms handler = FlickrUriHandler(request.GET['q']) try: handler.validate() except forms.ValidationError: pass else: photos.insert(0, handler.photo.dict) else: photos = None page = 0 pages = 0 return render_json_response({ 'place': place_name, 'photos': photos, 'page': page, 'pages': pages, 'sort_method': kw.get('sort', 'date-posted-desc'), })
def process_response(self, request, response): "Handles successful ajax edits" if request.is_ajax() and isinstance(response, SuccessfulEditRedirect): d = {"urn": response.urn} if "Location" in response: d["page_url"] = response["Location"] response = render_json_response(d) return response
def fsw_get_flashcard(request, extra_tags, prompt_side, answer_side): """return a JSON flashcard object extra_tags: a list of tags the flashcard deck must have prompt_side: the index (0 based) of the side to use as prompt (which cannot be empty) answer_side: the index (0 based) of the side that must be empty """ if request.method != 'GET': raise ImmediateResponse( HttpTextResponseBadRequest('only GET is allowed')) # get the language to search for language = request.GET.get( 'language', getattr(settings, "FIVE_SEC_WIDGET_DEFAULT_LANGUAGE", 'en')) search_tags = ['target-language:' + language] + extra_tags # get a list of pages tagged as we want url_list = search_pages(tags=search_tags) if not url_list: raise Http404('No material available for this language') #url_list = [url for url in url_list if url.split(':')[0] == language] # pick a randomly chosen flashcard that has no text transcript in side[0] resource_database = get_resource_database() while True: url = url_list[random.randint(0, len(url_list) - 1)] try: page = WikiPage.objects.get(name=url['absolute_pagename']) except WikiPage.DoesNotExist: url_list.remove(url) if len(url_list) > 0: continue else: raise Http404('wikipage does not exist: ' + url['path']) revision = page.get_latest_revision() urn = 'urn:' + revision.urn fcd = resource_database.get_resource_object(urn) card_index = random.randint(0, len(fcd.cards.array) - 1) fc = fcd.cards.array[card_index].get() prompt = fc.sides.array[prompt_side].get() answer = fc.sides.array[answer_side].get() if prompt and not answer: break resource = resource_json(fc) # temporary hack for FSI: add the URL this flashcard is taken from tmp_resource = json.loads(resource) tmp_resource['fsi_url'] = url['absolute_pagename'] tmp_resource['fsi_index'] = card_index return render_json_response(tmp_resource)
def fsw_get_flashcard(request, extra_tags, prompt_side, answer_side): """return a JSON flashcard object extra_tags: a list of tags the flashcard deck must have prompt_side: the index (0 based) of the side to use as prompt (which cannot be empty) answer_side: the index (0 based) of the side that must be empty """ if request.method == 'GET': # get the language to search for language = request.GET.get('language', getattr(settings, "FIVE_SEC_WIDGET_DEFAULT_LANGUAGE", 'en')) search_tags = ['target-language:' + language] + extra_tags # get a list of pages tagged as we want try: url_list = search_pages(tags=search_tags) except IndexingError: raise Http404('Indexing error, contact the site administrator') if url_list != []: #url_list = [url for url in url_list if url.split(':')[0] == language] # pick a randomly chosen flashcard that has no text transcript in side[0] resource_database = get_resource_database() while True: url = url_list[random.randint(0, len(url_list) - 1)] try: page = WikiPage.objects.get(name=url['absolute_pagename']) except WikiPage.DoesNotExist: url_list.remove(url) if len(url_list) > 0: continue else: raise Http404('wikipage does not exist: ' + url['path']) revision = page.get_latest_revision() urn = 'urn:' + revision.urn fcd = resource_database.get_resource_object(urn) card_index = random.randint(0, len(fcd.cards.array) - 1) fc = fcd.cards.array[card_index].get() prompt = fc.sides.array[prompt_side].get() answer = fc.sides.array[answer_side].get() if prompt and not answer: break resource = resource_json(fc) # temporary hack for FSI: add the URL this flashcard is taken from tmp_resource = json.loads(resource) tmp_resource['fsi_url'] = url['absolute_pagename'] tmp_resource['fsi_index'] = card_index return render_json_response(tmp_resource) raise Http404('No material available for this language')
def ajax_language_tag_to_description(request, pagename): """return a JSON object containing the language name for a code passed in the request, such that: (url)?code=en returns {'en': u'English'} or (url)?code=xx returns {'error': 'invalid language code'} """ if request.method == 'GET': code = request.GET.get('code', '') rv = {} try: rv[code] = language_tag_to_description(code) except KeyError: rv['error'] = 'invalid language code' return render_json_response(rv)
def new_audio(request): if request.method == 'POST': form = AudioImportForm(request.POST, request.FILES) if form.is_valid(): save_context = BlueprintSaveContext.from_request(request) urn = form.save(save_context) return SuccessfulEditRedirect(urn) else: if request.is_ajax(): return render_json_response({'errors': form.errors}) else: form = AudioImportForm() return render_to_response('audio/audio_import_form.html', { 'form': form, }, RequestContext(request))
def ajax_search_pages(request, pagename): """return a JSON object containing the urls matching the query in the request, such that: TODO: document """ # TODO: limit the number of results returned if request.method != 'GET': raise ImmediateResponse(HttpTextResponseBadRequest('only GET is allowed')) params = {} params['pagename'] = request.GET.get('pagename', '') params['tags'] = request.GET.getlist('tag', '') # special search feature to report all pages without tags if 'notags' in request.GET: params['notags'] = 1 del params['tags'] # just to be extra sure urls = search_pages(**params) return render_json_response(urls)
def new_picture(request): if request.GET.get('view') == 'flickr_search': return flickr_search_view(request) if request.method == 'POST': form = PictureImportForm(request.POST, request.FILES) if form.is_valid(): save_context = BlueprintSaveContext.from_request(request) urn = form.save(save_context) return SuccessfulEditRedirect(urn) else: if request.is_ajax(): return render_json_response({'errors': form.errors}) else: form = PictureImportForm() verbose_descriptions = PictureImportForm.get_verbose_input_descriptions() return render_to_response('picture/picture_import_form.html', { 'form': form, 'verbose_descriptions': verbose_descriptions, }, RequestContext(request))
def ajax_search_pages(request, pagename): """return a JSON object containing the urls matching the query in the request, such that: TODO: document """ # TODO: limit the number of results returned if request.method != 'GET': raise ImmediateResponse( HttpTextResponseBadRequest('only GET is allowed')) params = {} params['pagename'] = request.GET.get('pagename', '') params['tags'] = request.GET.getlist('tag', '') # special search feature to report all pages without tags if 'notags' in request.GET: params['notags'] = 1 del params['tags'] # just to be extra sure urls = search_pages(**params) return render_json_response(urls)
def ajax_search_pages(request, pagename): """return a JSON object containing the urls matching the query in the request, such that: TODO: document """ # TODO: limit the number of results returned if request.method == 'GET': params = {} params['pagename'] = request.GET.get('pagename', '') params['tags'] = request.GET.getlist('tag', '') # special search feature to report all pages without tags if 'notags' in request.GET: params['notags'] = 1 del params['tags'] # just to be extra sure rv = {} try: urls = search_pages(**params) except IndexingError: raise Http404('indexing error') return render_json_response(urls)
def view_wikipage(request, prefix, pagename): """Used for pages represented by a WikiPage""" if not is_legal_wiki_pagename(prefix, pagename): raise Http404 if request.method == 'POST' and not request.GET.get('view', None): return _fully_handle_blueprint_post(request, prefix, pagename) name = join_pagename(prefix, pagename) try: page = WikiPage.objects.get(name=name) except WikiPage.DoesNotExist: page = None if page: if "oldid" in request.GET: try: revision = WikiRevision.objects.get(id=request.GET["oldid"], page=page) except (ValueError, WikiRevision.DoesNotExist): return query_string_not_found(request) if not revision.urn: return query_string_not_found(request) else: revision = page.get_latest_revision() else: revision = None if revision is None and getattr(settings, "DUCTUS_WIKI_REMOTE", None): # See if DUCTUS_WIKI_REMOTE has the page try: remote_url = "%s%s?view=urn" % (settings.DUCTUS_WIKI_REMOTE, iri_to_uri(urlquote(u'%s/%s' % (prefix, pagename)))) remote_urn = json.loads(urlopen(remote_url).read(1000))["urn"] # we never actually save this WikiPage or WikiRevision to the database if page is None: page, page_created = WikiPage.objects.get_or_create(name=name) revision = WikiRevision(page=page, urn=remote_urn[4:]) except urllib2_HTTPError: pass if revision and revision.urn: urn = 'urn:' + revision.urn else: urn = None if request.GET.get('view', None) == 'urn': if revision: return render_json_response({"urn": urn}) response = None if urn: response = main_document_view(request, urn, page, revision) if isinstance(response, SuccessfulEditRedirect): return _handle_successful_wikiedit(request, response, page) response["X-Ductus-URN"] = urn else: requested_view = request.GET.get("view", None) request.ductus = DuctusRequestInfo(None, requested_view, page, revision) if requested_view: f = registered_views[None].get(requested_view, None) if f and f.meets_requirements(request.ductus): response = f(request) if response is None: response = new_wikipage(request, prefix, pagename) # wikipage urls expire immediately since they can frequently be edited patch_response_headers(response, cache_timeout=0) patch_cache_control(response, must_revalidate=True) return response
def flickr_search_view(request): kw = {'page': request.GET.get("page", "1")} place_name = None if "place_id" in request.GET: kw["place_id"] = request.GET["place_id"] elif "place" in request.GET and request.GET["place"]: places = flickr.places_find( query=request.GET["place"])["places"]["place"] try: place = places[0] except IndexError: pass else: kw["place_id"] = place["place_id"] place_name = place["_content"] if "q" in request.GET: search_photos = partial(flickr.photos_search, per_page=100, license=(','.join(license_map())), safe_search=1, content_type=1, media="photos", extras="license,owner_name,original_format") if request.GET.get("sort", None) in valid_sort_methods: kw["sort"] = request.GET["sort"] if request.GET.get("search_by", None) == 'tags': tags = [t for t in re.split(r'\s|"(.+)"', request.GET['q']) if t] kw['tags'] = ','.join(tags) else: kw['text'] = request.GET['q'] if "group" in request.GET: kw['group_id'] = request.GET["group"] search_result = search_photos(**kw)["photos"] page = int(search_result["page"]) pages = int(search_result["pages"]) # if we're not searching in a specific group, only return results for # which we are allowed to download the original image. see ductus # ticket #64 for explanation of why photos = [ FlickrPhoto(p).dict for p in search_result['photo'] if 'group' in request.GET or 'originalsecret' in p ] # if somebody entered a flickr url in the search box, return that image if FlickrUriHandler.handles(request.GET['q']): from django import forms handler = FlickrUriHandler(request.GET['q']) try: handler.validate() except forms.ValidationError: pass else: photos.insert(0, handler.photo.dict) else: photos = None page = 0 pages = 0 return render_json_response({ 'place': place_name, 'photos': photos, 'page': page, 'pages': pages, 'sort_method': kw.get('sort', 'date-posted-desc'), })